1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-13 10:02:38 +00:00

arm64 pmap: Fix a buffer overrun initializing per-superpage locks.

pmap_init_pv_table makes a first pass over the memory segments to
compute the amount of address space needed to allocate per-superpage
locks.  It then makes a second pass over each segment allocating
domain-local memory to back the pages for the locks belonging to each
segment.  This second pass rounds each segment's allocation up to a
page size since the domain-local allocation has to be a multiple of
pages.  However, the first pass was only doing a single round of the
total page counts up at the end not accounting for the padding present
in each segment.  To fix, apply the rounding in each segment in the
first pass instead of just at the end.

While here, tidy the second pass a bit by trimming some
not-quite-right logic copied from amd64.  In particular, compute pages
directly at the start of the loop iteration to more closely match the
first loop.  Then, drop an always-false condition as 'end' was
computed as 'start + pages' where 'start == highest + 1'.  Thus, the
actual condition being tested was 'if (highest >= highest + 1 +
pages)'.  Finally, remove 'highest' entirely by keep the result of the
'pvd' increment in the existing loop.

Reported by:	CHERI (overflow)
Reviewed by:	markj
Sponsored by:	DARPA
Differential Revision:	https://reviews.freebsd.org/D38377
This commit is contained in:
John Baldwin 2023-02-13 13:19:03 -08:00
parent 3dfd18a769
commit 18bb97b76b

View File

@ -1347,7 +1347,6 @@ pmap_init_pv_table(void)
struct vm_phys_seg *seg, *next_seg;
struct pmap_large_md_page *pvd;
vm_size_t s;
long start, end, highest, pv_npg;
int domain, i, j, pages;
/*
@ -1360,14 +1359,13 @@ pmap_init_pv_table(void)
/*
* Calculate the size of the array.
*/
pv_npg = 0;
s = 0;
for (i = 0; i < vm_phys_nsegs; i++) {
seg = &vm_phys_segs[i];
pv_npg += pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
pages = pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
pmap_l2_pindex(seg->start);
s += round_page(pages * sizeof(*pvd));
}
s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page);
s = round_page(s);
pv_table = (struct pmap_large_md_page *)kva_alloc(s);
if (pv_table == NULL)
panic("%s: kva_alloc failed\n", __func__);
@ -1376,23 +1374,14 @@ pmap_init_pv_table(void)
* Iterate physical segments to allocate domain-local memory for PV
* list headers.
*/
highest = -1;
s = 0;
pvd = pv_table;
for (i = 0; i < vm_phys_nsegs; i++) {
seg = &vm_phys_segs[i];
start = highest + 1;
end = start + pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
pages = pmap_l2_pindex(roundup2(seg->end, L2_SIZE)) -
pmap_l2_pindex(seg->start);
domain = seg->domain;
if (highest >= end)
continue;
pvd = &pv_table[start];
pages = end - start + 1;
s = round_page(pages * sizeof(*pvd));
highest = start + (s / sizeof(*pvd)) - 1;
for (j = 0; j < s; j += PAGE_SIZE) {
vm_page_t m = vm_page_alloc_noobj_domain(domain,