1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-26 11:47:31 +00:00

o Introduce an argument, VM_ALLOC_WIRED, that requests vm_page_alloc()

to return a wired page.
 o Use VM_ALLOC_WIRED within Alpha's pmap_growkernel().  Also, because
   Alpha's pmap_growkernel() calls vm_page_alloc() from within a critical
   section, specify VM_ALLOC_INTERRUPT instead of VM_ALLOC_SYSTEM.  (Only
   VM_ALLOC_INTERRUPT is implemented entirely with a spin mutex.)
 o Assert that the page queues mutex is held in vm_page_wire()
   on Alpha, just like the other platforms.
This commit is contained in:
Alan Cox 2002-07-18 04:08:10 +00:00
parent a9aa1a970f
commit 827b2fa091
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=100276
3 changed files with 19 additions and 15 deletions

View File

@ -1561,14 +1561,14 @@ pmap_growkernel(vm_offset_t addr)
if (!pmap_pte_v(pte)) { if (!pmap_pte_v(pte)) {
int pindex = NKLEV3MAPS + pmap_lev1_index(kernel_vm_end) - K1SEGLEV1I; int pindex = NKLEV3MAPS + pmap_lev1_index(kernel_vm_end) - K1SEGLEV1I;
nkpg = vm_page_alloc(kptobj, pindex, VM_ALLOC_SYSTEM); nkpg = vm_page_alloc(kptobj, pindex,
VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
if (!nkpg) if (!nkpg)
panic("pmap_growkernel: no memory to grow kernel"); panic("pmap_growkernel: no memory to grow kernel");
printf("pmap_growkernel: growing to %lx\n", addr); printf("pmap_growkernel: growing to %lx\n", addr);
printf("pmap_growkernel: adding new level2 page table\n"); printf("pmap_growkernel: adding new level2 page table\n");
nklev2++; nklev2++;
vm_page_wire(nkpg);
pmap_zero_page(nkpg); pmap_zero_page(nkpg);
pa = VM_PAGE_TO_PHYS(nkpg); pa = VM_PAGE_TO_PHYS(nkpg);
@ -1596,13 +1596,12 @@ pmap_growkernel(vm_offset_t addr)
/* /*
* This index is bogus, but out of the way * This index is bogus, but out of the way
*/ */
nkpg = vm_page_alloc(kptobj, nklev3, VM_ALLOC_SYSTEM); nkpg = vm_page_alloc(kptobj, nklev3,
VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
if (!nkpg) if (!nkpg)
panic("pmap_growkernel: no memory to grow kernel"); panic("pmap_growkernel: no memory to grow kernel");
nklev3++; nklev3++;
vm_page_wire(nkpg);
pmap_zero_page(nkpg); pmap_zero_page(nkpg);
pa = VM_PAGE_TO_PHYS(nkpg); pa = VM_PAGE_TO_PHYS(nkpg);
newlev2 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE; newlev2 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE;

View File

@ -834,19 +834,17 @@ vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zer
* the page cache in this case. * the page cache in this case.
*/ */
vm_page_t vm_page_t
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
{ {
vm_page_t m = NULL; vm_page_t m = NULL;
boolean_t prefer_zero; int page_req, s;
int s;
GIANT_REQUIRED; GIANT_REQUIRED;
KASSERT(!vm_page_lookup(object, pindex), KASSERT(!vm_page_lookup(object, pindex),
("vm_page_alloc: page already allocated")); ("vm_page_alloc: page already allocated"));
prefer_zero = (page_req & VM_ALLOC_ZERO) != 0 ? TRUE : FALSE; page_req = req & VM_ALLOC_CLASS_MASK;
page_req &= ~VM_ALLOC_ZERO;
/* /*
* The pager is allowed to eat deeper into the free page list. * The pager is allowed to eat deeper into the free page list.
@ -863,7 +861,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
* Allocate from the free queue if there are plenty of pages * Allocate from the free queue if there are plenty of pages
* in it. * in it.
*/ */
m = vm_page_select_free(object, pindex, prefer_zero); m = vm_page_select_free(object, pindex,
(req & VM_ALLOC_ZERO) != 0);
} else if ( } else if (
(page_req == VM_ALLOC_SYSTEM && (page_req == VM_ALLOC_SYSTEM &&
cnt.v_cache_count == 0 && cnt.v_cache_count == 0 &&
@ -934,7 +933,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
} else { } else {
m->flags = PG_BUSY; m->flags = PG_BUSY;
} }
m->wire_count = 0; if (req & VM_ALLOC_WIRED) {
cnt.v_wire_count++;
m->wire_count = 1;
} else
m->wire_count = 0;
m->hold_count = 0; m->hold_count = 0;
m->act_count = 0; m->act_count = 0;
m->busy = 0; m->busy = 0;
@ -1241,9 +1244,7 @@ vm_page_wire(vm_page_t m)
* it is already off the queues). * it is already off the queues).
*/ */
s = splvm(); s = splvm();
#ifndef __alpha__
mtx_assert(&vm_page_queue_mtx, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED);
#endif
if (m->wire_count == 0) { if (m->wire_count == 0) {
if ((m->flags & PG_UNMANAGED) == 0) if ((m->flags & PG_UNMANAGED) == 0)
vm_pageq_remove(m); vm_pageq_remove(m);

View File

@ -307,11 +307,15 @@ extern struct mtx vm_page_queue_mtx;
#define VM_PAGE_BITS_ALL 0xffff #define VM_PAGE_BITS_ALL 0xffff
#endif #endif
/* page allocation classes: */
#define VM_ALLOC_NORMAL 0 #define VM_ALLOC_NORMAL 0
#define VM_ALLOC_INTERRUPT 1 #define VM_ALLOC_INTERRUPT 1
#define VM_ALLOC_SYSTEM 2 #define VM_ALLOC_SYSTEM 2
#define VM_ALLOC_CLASS_MASK 3
/* page allocation flags: */
#define VM_ALLOC_WIRED 0x20 /* vm_page_alloc() only */
#define VM_ALLOC_ZERO 0x40 #define VM_ALLOC_ZERO 0x40
#define VM_ALLOC_RETRY 0x80 #define VM_ALLOC_RETRY 0x80 /* vm_page_grab() only */
void vm_page_flag_set(vm_page_t m, unsigned short bits); void vm_page_flag_set(vm_page_t m, unsigned short bits);
void vm_page_flag_clear(vm_page_t m, unsigned short bits); void vm_page_flag_clear(vm_page_t m, unsigned short bits);