diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c index 9bc5884fa16e..6c825c96b63f 100644 --- a/sys/alpha/alpha/pmap.c +++ b/sys/alpha/alpha/pmap.c @@ -1561,14 +1561,14 @@ pmap_growkernel(vm_offset_t addr) if (!pmap_pte_v(pte)) { int pindex = NKLEV3MAPS + pmap_lev1_index(kernel_vm_end) - K1SEGLEV1I; - nkpg = vm_page_alloc(kptobj, pindex, VM_ALLOC_SYSTEM); + nkpg = vm_page_alloc(kptobj, pindex, + VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); printf("pmap_growkernel: growing to %lx\n", addr); printf("pmap_growkernel: adding new level2 page table\n"); nklev2++; - vm_page_wire(nkpg); pmap_zero_page(nkpg); pa = VM_PAGE_TO_PHYS(nkpg); @@ -1596,13 +1596,12 @@ pmap_growkernel(vm_offset_t addr) /* * This index is bogus, but out of the way */ - nkpg = vm_page_alloc(kptobj, nklev3, VM_ALLOC_SYSTEM); + nkpg = vm_page_alloc(kptobj, nklev3, + VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); nklev3++; - - vm_page_wire(nkpg); pmap_zero_page(nkpg); pa = VM_PAGE_TO_PHYS(nkpg); newlev2 = pmap_phys_to_pte(pa) | PG_V | PG_ASM | PG_KRE | PG_KWE; diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 7dd23008ff91..e62e0d15b67a 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -834,19 +834,17 @@ vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zer * the page cache in this case. */ vm_page_t -vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) +vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) { vm_page_t m = NULL; - boolean_t prefer_zero; - int s; + int page_req, s; GIANT_REQUIRED; KASSERT(!vm_page_lookup(object, pindex), ("vm_page_alloc: page already allocated")); - prefer_zero = (page_req & VM_ALLOC_ZERO) != 0 ? TRUE : FALSE; - page_req &= ~VM_ALLOC_ZERO; + page_req = req & VM_ALLOC_CLASS_MASK; /* * The pager is allowed to eat deeper into the free page list. @@ -863,7 +861,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) * Allocate from the free queue if there are plenty of pages * in it. */ - m = vm_page_select_free(object, pindex, prefer_zero); + m = vm_page_select_free(object, pindex, + (req & VM_ALLOC_ZERO) != 0); } else if ( (page_req == VM_ALLOC_SYSTEM && cnt.v_cache_count == 0 && @@ -934,7 +933,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) } else { m->flags = PG_BUSY; } - m->wire_count = 0; + if (req & VM_ALLOC_WIRED) { + cnt.v_wire_count++; + m->wire_count = 1; + } else + m->wire_count = 0; m->hold_count = 0; m->act_count = 0; m->busy = 0; @@ -1241,9 +1244,7 @@ vm_page_wire(vm_page_t m) * it is already off the queues). */ s = splvm(); -#ifndef __alpha__ mtx_assert(&vm_page_queue_mtx, MA_OWNED); -#endif if (m->wire_count == 0) { if ((m->flags & PG_UNMANAGED) == 0) vm_pageq_remove(m); diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 17afdb84114a..fe215c0c05b8 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -307,11 +307,15 @@ extern struct mtx vm_page_queue_mtx; #define VM_PAGE_BITS_ALL 0xffff #endif +/* page allocation classes: */ #define VM_ALLOC_NORMAL 0 #define VM_ALLOC_INTERRUPT 1 #define VM_ALLOC_SYSTEM 2 +#define VM_ALLOC_CLASS_MASK 3 +/* page allocation flags: */ +#define VM_ALLOC_WIRED 0x20 /* vm_page_alloc() only */ #define VM_ALLOC_ZERO 0x40 -#define VM_ALLOC_RETRY 0x80 +#define VM_ALLOC_RETRY 0x80 /* vm_page_grab() only */ void vm_page_flag_set(vm_page_t m, unsigned short bits); void vm_page_flag_clear(vm_page_t m, unsigned short bits);