1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-20 15:43:16 +00:00

merge 194209 in to the i386/xen pmap

requested by: alc@
This commit is contained in:
Kip Macy 2010-04-30 03:26:12 +00:00
parent b7402d8269
commit 958d87cd86
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207419

View File

@ -289,6 +289,12 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
"Max number of PV entries");
SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
"Page share factor per proc");
SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
"2/4MB page mapping counters");
static u_long pmap_pde_mappings;
SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
&pmap_pde_mappings, 0, "2/4MB page mappings");
static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
@ -3130,64 +3136,59 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex,
vm_size_t size)
{
pd_entry_t *pde;
vm_paddr_t pa, ptepa;
vm_page_t p;
int pat_mode;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if (pseflag &&
((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
int i;
vm_page_t m[1];
unsigned int ptepindex;
int npdes;
pd_entry_t ptepa;
PMAP_LOCK(pmap);
if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
goto out;
PMAP_UNLOCK(pmap);
retry:
(addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
if (!vm_object_populate(object, pindex, pindex + atop(size)))
return;
p = vm_page_lookup(object, pindex);
if (p != NULL) {
if (vm_page_sleep_if_busy(p, FALSE, "init4p"))
goto retry;
} else {
p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
if (p == NULL)
return;
m[0] = p;
if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
return;
}
p = vm_page_lookup(object, pindex);
vm_page_wakeup(p);
}
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
pat_mode = p->md.pat_mode;
/*
* Abort the mapping if the first page is not physically
* aligned to a 2/4MB page boundary.
*/
ptepa = VM_PAGE_TO_PHYS(p);
if (ptepa & (NBPDR - 1))
return;
p->valid = VM_PAGE_BITS_ALL;
PMAP_LOCK(pmap);
pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
npdes = size >> PDRSHIFT;
critical_enter();
for(i = 0; i < npdes; i++) {
PD_SET_VA(pmap, ptepindex,
ptepa | PG_U | PG_M | PG_RW | PG_V | PG_PS, FALSE);
ptepa += NBPDR;
ptepindex += 1;
/*
* Skip the first page. Abort the mapping if the rest of
* the pages are not physically contiguous or have differing
* memory attributes.
*/
p = TAILQ_NEXT(p, listq);
for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
pa += PAGE_SIZE) {
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
if (pa != VM_PAGE_TO_PHYS(p) ||
pat_mode != p->md.pat_mode)
return;
p = TAILQ_NEXT(p, listq);
}
/* Map using 2/4MB pages. */
PMAP_LOCK(pmap);
for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
size; pa += NBPDR) {
pde = pmap_pde(pmap, addr);
if (*pde == 0) {
pde_store(pde, pa | PG_PS | PG_M | PG_A |
PG_U | PG_RW | PG_V);
pmap->pm_stats.resident_count += NBPDR /
PAGE_SIZE;
pmap_pde_mappings++;
}
/* Else continue on if the PDE is already valid. */
addr += NBPDR;
}
pmap_invalidate_all(pmap);
critical_exit();
out:
PMAP_UNLOCK(pmap);
}
}