mirror of
https://git.FreeBSD.org/src.git
synced 2025-01-12 14:29:28 +00:00
Push down the responsibility for zeroing a physical page from the
caller to vm_page_grab(). Although this gives VM_ALLOC_ZERO a different meaning for vm_page_grab() than for vm_page_alloc(), I feel such change is necessary to accomplish other goals. Specifically, I want to make the PG_ZERO flag immutable between the time it is allocated by vm_page_alloc() and freed by vm_page_free() or vm_page_free_zero() to avoid locking overheads. Once we gave up on the ability to automatically recognize a zeroed page upon entry to vm_page_free(), the ability to mutate the PG_ZERO flag became useless. Instead, I would like to say that "Once a page becomes valid, its PG_ZERO flag must be ignored."
This commit is contained in:
parent
63224f60c3
commit
7ef6ba5d27
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=128613
@ -1194,8 +1194,6 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
VM_OBJECT_LOCK(pmap->pm_pteobj);
|
||||
m = vm_page_grab(pmap->pm_pteobj, ptepindex,
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
|
||||
KASSERT(m->queue == PQ_NONE,
|
||||
("_pmap_allocpte: %p->queue != PQ_NONE", m));
|
||||
@ -1242,7 +1240,6 @@ _pmap_allocpte(pmap, ptepindex)
|
||||
|
||||
vm_page_lock_queues();
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_wakeup(m);
|
||||
vm_page_unlock_queues();
|
||||
if (!is_object_locked)
|
||||
|
@ -524,8 +524,6 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
|
||||
m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
|
||||
|
||||
/*
|
||||
|
@ -612,8 +612,6 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
|
||||
m = vm_page_grab(mem->am_obj, 0,
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
vm_page_lock_queues();
|
||||
mem->am_physical = VM_PAGE_TO_PHYS(m);
|
||||
vm_page_wakeup(m);
|
||||
|
@ -524,8 +524,6 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
|
||||
m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
|
||||
|
||||
/*
|
||||
|
@ -612,8 +612,6 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
|
||||
m = vm_page_grab(mem->am_obj, 0,
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
vm_page_lock_queues();
|
||||
mem->am_physical = VM_PAGE_TO_PHYS(m);
|
||||
vm_page_wakeup(m);
|
||||
|
@ -198,8 +198,6 @@ kmem_alloc(map, size)
|
||||
|
||||
mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
|
||||
VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
if ((mem->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(mem);
|
||||
mem->valid = VM_PAGE_BITS_ALL;
|
||||
vm_page_lock_queues();
|
||||
vm_page_unmanage(mem);
|
||||
|
@ -1460,6 +1460,8 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
|
||||
return NULL;
|
||||
goto retrylookup;
|
||||
}
|
||||
if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
|
||||
return m;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user