From f919ebde54deef1969167a0390e91f6e772d635f Mon Sep 17 00:00:00 2001 From: David Greenman Date: Wed, 1 Mar 1995 23:30:04 +0000 Subject: [PATCH] Various changes from John and myself that do the following: New functions create - vm_object_pip_wakeup and pagedaemon_wakeup that are used to reduce the actual number of wakeups. New function vm_page_protect which is used in conjuction with some new page flags to reduce the number of calls to pmap_page_protect. Minor changes to reduce unnecessary spl nesting. Rewrote vm_page_alloc() to improve readability. Various other mostly cosmetic changes. --- sys/vm/swap_pager.c | 22 ++--- sys/vm/vm_fault.c | 97 +++++++-------------- sys/vm/vm_map.c | 3 +- sys/vm/vm_object.c | 116 +++++++++---------------- sys/vm/vm_object.h | 12 ++- sys/vm/vm_page.c | 197 ++++++++++++++++++++----------------------- sys/vm/vm_page.h | 28 ++++-- sys/vm/vm_pageout.c | 29 ++++--- sys/vm/vm_pageout.h | 18 +++- sys/vm/vnode_pager.c | 9 +- 10 files changed, 241 insertions(+), 290 deletions(-) diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 31896195d3a1..9902969c66b1 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -39,7 +39,7 @@ * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ * * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 - * $Id: swap_pager.c,v 1.27 1995/02/22 09:15:20 davidg Exp $ + * $Id: swap_pager.c,v 1.28 1995/02/25 17:02:48 davidg Exp $ */ /* @@ -1040,7 +1040,7 @@ swap_pager_input(swp, m, count, reqpage) if (curproc == pageproc) (void) swap_pager_clean(); else - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); while (swap_pager_free.tqh_first == NULL) { swap_pager_needflags |= SWAP_FREE_NEEDED; if (curproc == pageproc) @@ -1050,7 +1050,7 @@ swap_pager_input(swp, m, count, reqpage) if (curproc == pageproc) (void) swap_pager_clean(); else - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); } splx(s); } @@ -1146,7 +1146,7 @@ swap_pager_input(swp, m, count, reqpage) wakeup((caddr_t) &swap_pager_free); } if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT); } else { /* @@ -1369,7 +1369,7 @@ swap_pager_output(swp, m, count, flags, rtvals) return VM_PAGER_AGAIN; #endif } else - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); while (swap_pager_free.tqh_first == NULL || swap_pager_free.tqh_first->spc_list.tqe_next == NULL || swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) { @@ -1385,7 +1385,7 @@ swap_pager_output(swp, m, count, flags, rtvals) if (curproc == pageproc) (void) swap_pager_clean(); else - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); } splx(s); } @@ -1544,7 +1544,7 @@ swap_pager_output(swp, m, count, flags, rtvals) wakeup((caddr_t) &swap_pager_free); } if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT); return (rv); } @@ -1592,7 +1592,7 @@ swap_pager_clean() wakeup((caddr_t) &swap_pager_free); } if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT); ++cleandone; splx(s); @@ -1691,7 +1691,7 @@ swap_pager_iodone(bp) if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) { swap_pager_needflags &= ~SWAP_FREE_NEEDED_BY_PAGEOUT; - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); } if (vm_pageout_pages_needed) { @@ -1700,8 +1700,8 @@ swap_pager_iodone(bp) } if ((swap_pager_inuse.tqh_first == NULL) || ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min && - nswiodone + cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min)) { - wakeup((caddr_t) &vm_pages_needed); + nswiodone + cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min)) { + pagedaemon_wakeup(); } splx(s); } diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 87788a7e48e7..9beab2ba79f6 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -66,7 +66,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_fault.c,v 1.18 1995/02/02 09:08:17 davidg Exp $ + * $Id: vm_fault.c,v 1.19 1995/02/22 09:15:26 davidg Exp $ */ /* @@ -152,7 +152,7 @@ vm_fault(map, vaddr, fault_type, change_wiring) #define RELEASE_PAGE(m) { \ PAGE_WAKEUP(m); \ vm_page_lock_queues(); \ - vm_page_activate(m); \ + if ((m->flags & PG_ACTIVE) == 0) vm_page_activate(m); \ vm_page_unlock_queues(); \ } @@ -164,22 +164,12 @@ vm_fault(map, vaddr, fault_type, change_wiring) } #define UNLOCK_THINGS { \ - object->paging_in_progress--; \ - if ((object->paging_in_progress == 0) && \ - (object->flags & OBJ_PIPWNT)) { \ - object->flags &= ~OBJ_PIPWNT; \ - wakeup((caddr_t)object); \ - } \ + vm_object_pip_wakeup(object); \ vm_object_unlock(object); \ if (object != first_object) { \ vm_object_lock(first_object); \ FREE_PAGE(first_m); \ - first_object->paging_in_progress--; \ - if ((first_object->paging_in_progress == 0) && \ - (first_object->flags & OBJ_PIPWNT)) { \ - first_object->flags &= ~OBJ_PIPWNT; \ - wakeup((caddr_t)first_object); \ - } \ + vm_object_pip_wakeup(first_object); \ vm_object_unlock(first_object); \ } \ UNLOCK_MAP; \ @@ -287,21 +277,13 @@ RetryFault:; VM_WAIT; goto RetryFault; } - /* - * Remove the page from the pageout daemon's reach - * while we play with it. - */ - - vm_page_lock_queues(); - vm_page_unqueue(m); - vm_page_unlock_queues(); /* - * Mark page busy for other threads. + * Mark page busy for other threads, and the pagedaemon. */ m->flags |= PG_BUSY; - if (m->object != kernel_object && m->object != kmem_object && - m->valid && ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) { + if (m->valid && ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && + m->object != kernel_object && m->object != kmem_object) { goto readrest; } break; @@ -446,12 +428,7 @@ RetryFault:; * object with zeros. */ if (object != first_object) { - object->paging_in_progress--; - if (object->paging_in_progress == 0 && - (object->flags & OBJ_PIPWNT)) { - object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) object); - } + vm_object_pip_wakeup(object); vm_object_unlock(object); object = first_object; @@ -468,12 +445,7 @@ RetryFault:; } else { vm_object_lock(next_object); if (object != first_object) { - object->paging_in_progress--; - if (object->paging_in_progress == 0 && - (object->flags & OBJ_PIPWNT)) { - object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) object); - } + vm_object_pip_wakeup(object); } vm_object_unlock(object); object = next_object; @@ -481,9 +453,8 @@ RetryFault:; } } - if ((m->flags & (PG_ACTIVE | PG_INACTIVE | PG_CACHE) != 0) || - (m->flags & PG_BUSY) == 0) - panic("vm_fault: absent or active or inactive or not busy after main loop"); + if ((m->flags & PG_BUSY) == 0) + panic("vm_fault: not busy after main loop"); /* * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock @@ -542,20 +513,16 @@ RetryFault:; vm_page_lock_queues(); - vm_page_activate(m); - pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); + if ((m->flags & PG_ACTIVE) == 0) + vm_page_activate(m); + vm_page_protect(m, VM_PROT_NONE); vm_page_unlock_queues(); /* * We no longer need the old page or object. */ PAGE_WAKEUP(m); - object->paging_in_progress--; - if (object->paging_in_progress == 0 && - (object->flags & OBJ_PIPWNT)) { - object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) object); - } + vm_object_pip_wakeup(object); vm_object_unlock(object); /* @@ -576,12 +543,7 @@ RetryFault:; * But we have to play ugly games with * paging_in_progress to do that... */ - object->paging_in_progress--; - if (object->paging_in_progress == 0 && - (object->flags & OBJ_PIPWNT)) { - object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) object); - } + vm_object_pip_wakeup(object); vm_object_collapse(object); object->paging_in_progress++; } else { @@ -589,8 +551,6 @@ RetryFault:; m->flags |= PG_COPYONWRITE; } } - if (m->flags & (PG_ACTIVE | PG_INACTIVE | PG_CACHE)) - panic("vm_fault: active or inactive before copy object handling"); /* * If the page is being written, but hasn't been copied to the @@ -739,12 +699,13 @@ RetryFault:; */ vm_page_lock_queues(); - vm_page_activate(old_m); + if ((old_m->flags & PG_ACTIVE) == 0) + vm_page_activate(old_m); - pmap_page_protect(VM_PAGE_TO_PHYS(old_m), - VM_PROT_NONE); + vm_page_protect(old_m, VM_PROT_NONE); copy_m->dirty = VM_PAGE_BITS_ALL; - vm_page_activate(copy_m); + if ((copy_m->flags & PG_ACTIVE) == 0) + vm_page_activate(copy_m); vm_page_unlock_queues(); PAGE_WAKEUP(copy_m); @@ -760,8 +721,6 @@ RetryFault:; m->flags &= ~PG_COPYONWRITE; } } - if (m->flags & (PG_ACTIVE | PG_INACTIVE | PG_CACHE)) - panic("vm_fault: active or inactive before retrying lookup"); /* * We must verify that the maps have not changed since our last @@ -838,9 +797,6 @@ RetryFault:; * once in each map for which it is wired. */ - if (m->flags & (PG_ACTIVE | PG_INACTIVE | PG_CACHE)) - panic("vm_fault: active or inactive before pmap_enter"); - vm_object_unlock(object); /* @@ -850,6 +806,10 @@ RetryFault:; * won't find us (yet). */ + if (prot & VM_PROT_WRITE) + m->flags |= PG_WRITEABLE; + m->flags |= PG_MAPPED; + pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired); #if 0 if( ((prot & VM_PROT_WRITE) == 0) && change_wiring == 0 && wired == 0) @@ -868,7 +828,8 @@ RetryFault:; else vm_page_unwire(m); } else { - vm_page_activate(m); + if ((m->flags & PG_ACTIVE) == 0) + vm_page_activate(m); } if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) { @@ -1066,6 +1027,8 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) vm_object_unlock(src_object); vm_object_unlock(dst_object); + dst_m->flags |= PG_WRITEABLE; + dst_m->flags |= PG_MAPPED; pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), prot, FALSE); @@ -1174,7 +1137,7 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra rahead = ((cnt.v_free_count + cnt.v_cache_count) - 2*cnt.v_free_reserved) / 2; rbehind = rahead; if (!rahead) - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); } /* * if we don't have any free pages, then just read one page. diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 04cc88cae088..43cf626bec5c 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_map.c,v 1.14 1995/02/14 04:00:17 davidg Exp $ + * $Id: vm_map.c,v 1.15 1995/02/21 01:13:05 davidg Exp $ */ /* @@ -331,6 +331,7 @@ vm_map_entry_create(map) m->valid = VM_PAGE_BITS_ALL; pmap_enter(vm_map_pmap(kmem_map), mapvm, VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT, 1); + m->flags |= PG_WRITEABLE|PG_MAPPED; entry = (vm_map_entry_t) mapvm; mapvm += NBPG; diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 9e71990aa271..cc90a6ca18e2 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_object.c,v 1.26 1995/02/22 09:15:28 davidg Exp $ + * $Id: vm_object.c,v 1.27 1995/02/22 10:00:16 davidg Exp $ */ /* @@ -306,12 +306,13 @@ vm_object_deallocate(object) vm_object_cache_unlock(); return; } - /* - * See if this object can persist. If so, enter it in the - * cache, then deactivate all of its pages. - */ - if (object->flags & OBJ_CANPERSIST) { + /* + * See if this object can persist and has some resident + * pages. If so, enter it in the cache. + */ + if ((object->flags & OBJ_CANPERSIST) && + (object->resident_page_count != 0)) { TAILQ_INSERT_TAIL(&vm_object_cached_list, object, cached_list); @@ -323,6 +324,7 @@ vm_object_deallocate(object) vm_object_cache_trim(); return; } + /* * Make sure no one can look us up now. */ @@ -403,8 +405,11 @@ vm_object_terminate(object) vm_page_lock_queues(); if (p->flags & PG_CACHE) vm_page_free(p); - else + else { + s = splhigh(); vm_page_unqueue(p); + splx(s); + } vm_page_unlock_queues(); p = next; } @@ -595,7 +600,7 @@ vm_object_pmap_copy(object, start, end) vm_object_lock(object); for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { if ((start <= p->offset) && (p->offset < end)) { - pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); + vm_page_protect(p, VM_PROT_READ); p->flags |= PG_COPYONWRITE; } } @@ -635,15 +640,11 @@ vm_object_pmap_remove(object, start, end) goto again; } splx(s); - pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_protect(p, VM_PROT_NONE); } } vm_object_unlock(object); - --object->paging_in_progress; - if (object->paging_in_progress == 0 && (object->flags & OBJ_PIPWNT)) { - object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) object); - } + vm_object_pip_wakeup(object); } /* @@ -1036,10 +1037,6 @@ vm_object_qcollapse(object) register vm_size_t size; backing_object = object->shadow; - if (!backing_object) - return; - if ((backing_object->flags & OBJ_INTERNAL) == 0) - return; if (backing_object->shadow != NULL && backing_object->shadow->copy == backing_object) return; @@ -1060,7 +1057,7 @@ vm_object_qcollapse(object) p = next; continue; } - pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_protect(p, VM_PROT_NONE); new_offset = (p->offset - backing_offset); if (p->offset < backing_offset || new_offset >= size) { @@ -1139,32 +1136,27 @@ vm_object_collapse(object) if ((backing_object = object->shadow) == NULL) return; - if ((object->flags & OBJ_DEAD) || (backing_object->flags & OBJ_DEAD)) + /* + * we check the backing object first, because it is most likely + * !OBJ_INTERNAL. + */ + if ((backing_object->flags & OBJ_INTERNAL) == 0 || + (backing_object->flags & OBJ_DEAD) || + (object->flags & OBJ_INTERNAL) == 0 || + (object->flags & OBJ_DEAD)) return; - if (object->paging_in_progress != 0) { - if (backing_object) { - if (vm_object_lock_try(backing_object)) { - vm_object_qcollapse(object); - vm_object_unlock(backing_object); - } + if (object->paging_in_progress != 0 || + backing_object->paging_in_progress != 0) { + if (vm_object_lock_try(backing_object)) { + vm_object_qcollapse(object); + vm_object_unlock(backing_object); } return; } vm_object_lock(backing_object); - /* - * ... The backing object is not read_only, and no pages in - * the backing object are currently being paged out. The - * backing object is internal. - */ - if ((backing_object->flags & OBJ_INTERNAL) == 0 || - backing_object->paging_in_progress != 0) { - vm_object_qcollapse(object); - vm_object_unlock(backing_object); - return; - } /* * The backing object can't be a copy-object: the * shadow_offset for the copy-object must stay as 0. @@ -1179,16 +1171,7 @@ vm_object_collapse(object) vm_object_unlock(backing_object); return; } - /* - * we can deal only with the swap pager - */ - if ((object->pager && - object->pager->pg_type != PG_SWAP) || - (backing_object->pager && - backing_object->pager->pg_type != PG_SWAP)) { - vm_object_unlock(backing_object); - return; - } + /* * We know that we can either collapse the backing object (if * the parent is the only reference to it) or (perhaps) remove @@ -1230,7 +1213,7 @@ vm_object_collapse(object) if (p->offset < backing_offset || new_offset >= size) { vm_page_lock_queues(); - pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_protect(p, VM_PROT_NONE); PAGE_WAKEUP(p); vm_page_free(p); vm_page_unlock_queues(); @@ -1239,7 +1222,7 @@ vm_object_collapse(object) if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, object->paging_offset + new_offset))) { vm_page_lock_queues(); - pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_protect(p, VM_PROT_NONE); PAGE_WAKEUP(p); vm_page_free(p); vm_page_unlock_queues(); @@ -1265,18 +1248,12 @@ vm_object_collapse(object) * shadow object. */ bopager = backing_object->pager; - vm_object_remove(backing_object->pager); backing_object->pager = NULL; swap_pager_copy( bopager, backing_object->paging_offset, object->pager, object->paging_offset, object->shadow_offset); - object->paging_in_progress--; - if (object->paging_in_progress == 0 && - (object->flags & OBJ_PIPWNT)) { - object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) object); - } + vm_object_pip_wakeup(object); } else { object->paging_in_progress++; /* @@ -1284,26 +1261,15 @@ vm_object_collapse(object) */ object->pager = backing_object->pager; object->paging_offset = backing_object->paging_offset + backing_offset; - vm_object_remove(backing_object->pager); backing_object->pager = NULL; /* * free unnecessary blocks */ swap_pager_freespace(object->pager, 0, object->paging_offset); - object->paging_in_progress--; - if (object->paging_in_progress == 0 && - (object->flags & OBJ_PIPWNT)) { - object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) object); - } - } - - backing_object->paging_in_progress--; - if (backing_object->paging_in_progress == 0 && - (backing_object->flags & OBJ_PIPWNT)) { - backing_object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) backing_object); + vm_object_pip_wakeup(object); } + + vm_object_pip_wakeup(backing_object); } /* * Object now shadows whatever backing_object did. @@ -1469,7 +1435,7 @@ vm_object_page_remove(object, start, end) goto again; } splx(s); - pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_protect(p, VM_PROT_NONE); vm_page_lock_queues(); PAGE_WAKEUP(p); vm_page_free(p); @@ -1491,7 +1457,7 @@ vm_object_page_remove(object, start, end) goto again; } splx(s); - pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_protect(p, VM_PROT_NONE); vm_page_lock_queues(); PAGE_WAKEUP(p); vm_page_free(p); @@ -1501,11 +1467,7 @@ vm_object_page_remove(object, start, end) size -= PAGE_SIZE; } } - --object->paging_in_progress; - if (object->paging_in_progress == 0 && (object->flags & OBJ_PIPWNT)) { - object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) object); - } + vm_object_pip_wakeup(object); } /* diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index 484885dd645b..709732e1c58c 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_object.h,v 1.7 1995/02/22 09:15:30 davidg Exp $ + * $Id: vm_object.h,v 1.8 1995/02/22 10:06:43 davidg Exp $ */ /* @@ -146,6 +146,16 @@ vm_object_t kmem_object; #define vm_object_lock_try(object) simple_lock_try(&(object)->Lock) #endif +__inline static void +vm_object_pip_wakeup( vm_object_t object) { + object->paging_in_progress--; + if ((object->flags & OBJ_PIPWNT) && + object->paging_in_progress == 0) { + object->flags &= ~OBJ_PIPWNT; + wakeup((caddr_t) object); + } +} + #ifdef KERNEL vm_object_t vm_object_allocate __P((vm_size_t)); void vm_object_cache_clear __P((void)); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index c92c21d30115..cdac17cd58e1 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 - * $Id: vm_page.c,v 1.21 1995/02/22 10:16:21 davidg Exp $ + * $Id: vm_page.c,v 1.22 1995/02/22 10:27:16 davidg Exp $ */ /* @@ -378,17 +378,16 @@ vm_page_hash(object, offset) * Inserts the given mem entry into the object/object-page * table and object list. * - * The object and page must be locked. + * The object and page must be locked, and must be splhigh. */ -void +inline void vm_page_insert(mem, object, offset) register vm_page_t mem; register vm_object_t object; register vm_offset_t offset; { register struct pglist *bucket; - int s; VM_PAGE_CHECK(mem); @@ -407,7 +406,6 @@ vm_page_insert(mem, object, offset) */ bucket = &vm_page_buckets[vm_page_hash(object, offset)]; - s = splhigh(); simple_lock(&bucket_lock); TAILQ_INSERT_TAIL(bucket, mem, hashq); simple_unlock(&bucket_lock); @@ -417,7 +415,6 @@ vm_page_insert(mem, object, offset) */ TAILQ_INSERT_TAIL(&object->memq, mem, listq); - (void) splx(s); mem->flags |= PG_TABLED; /* @@ -434,19 +431,17 @@ vm_page_insert(mem, object, offset) * Removes the given mem entry from the object/offset-page * table and the object page list. * - * The object and page must be locked. + * The object and page must be locked, and at splhigh. */ -void +inline void vm_page_remove(mem) register vm_page_t mem; { register struct pglist *bucket; - int s; VM_PAGE_CHECK(mem); - if (!(mem->flags & PG_TABLED)) return; @@ -455,7 +450,6 @@ vm_page_remove(mem) */ bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; - s = splhigh(); simple_lock(&bucket_lock); TAILQ_REMOVE(bucket, mem, hashq); simple_unlock(&bucket_lock); @@ -465,7 +459,6 @@ vm_page_remove(mem) */ TAILQ_REMOVE(&mem->object->memq, mem, listq); - (void) splx(s); /* * And show that the object has one fewer resident page. @@ -543,60 +536,35 @@ vm_page_rename(mem, new_object, new_offset) vm_page_unlock_queues(); } -int +/* + * vm_page_unqueue must be called at splhigh(); + */ +inline void vm_page_unqueue(vm_page_t mem) { - int s, origflags; + int origflags; origflags = mem->flags; if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0) - return origflags; + return; - s = splhigh(); - if (mem->flags & PG_ACTIVE) { + if (origflags & PG_ACTIVE) { TAILQ_REMOVE(&vm_page_queue_active, mem, pageq); cnt.v_active_count--; mem->flags &= ~PG_ACTIVE; - } else if (mem->flags & PG_INACTIVE) { + } else if (origflags & PG_INACTIVE) { TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq); cnt.v_inactive_count--; mem->flags &= ~PG_INACTIVE; - } else if (mem->flags & PG_CACHE) { + } else if (origflags & PG_CACHE) { TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq); cnt.v_cache_count--; mem->flags &= ~PG_CACHE; if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved) - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); } - splx(s); - return origflags; -} - -void -vm_page_requeue(vm_page_t mem, int flags) -{ - int s; - - if (mem->wire_count) - return; - s = splhigh(); - if (flags & PG_CACHE) { - TAILQ_INSERT_TAIL(&vm_page_queue_cache, mem, pageq); - mem->flags |= PG_CACHE; - cnt.v_cache_count++; - } else if (flags & PG_ACTIVE) { - TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq); - mem->flags |= PG_ACTIVE; - cnt.v_active_count++; - } else if (flags & PG_INACTIVE) { - TAILQ_INSERT_TAIL(&vm_page_queue_inactive, mem, pageq); - mem->flags |= PG_INACTIVE; - cnt.v_inactive_count++; - } - TAILQ_REMOVE(&mem->object->memq, mem, listq); - TAILQ_INSERT_TAIL(&mem->object->memq, mem, listq); - splx(s); + return; } /* @@ -605,10 +573,10 @@ vm_page_requeue(vm_page_t mem, int flags) * Allocate and return a memory cell associated * with this VM object/offset pair. * - * page_req -- 0 normal process request VM_ALLOC_NORMAL - * page_req -- 1 interrupt time request VM_ALLOC_INTERRUPT - * page_req -- 2 system *really* needs a page VM_ALLOC_SYSTEM - * but *cannot* be at interrupt time + * page_req classes: + * VM_ALLOC_NORMAL normal process request + * VM_ALLOC_SYSTEM system *really* needs a page + * VM_ALLOC_INTERRUPT interrupt time request * * Object must be locked. */ @@ -621,57 +589,73 @@ vm_page_alloc(object, offset, page_req) register vm_page_t mem; int s; + if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { + page_req = VM_ALLOC_SYSTEM; + }; + simple_lock(&vm_page_queue_free_lock); s = splhigh(); - if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) && - (page_req == VM_ALLOC_NORMAL) && - (curproc != pageproc)) { - simple_unlock(&vm_page_queue_free_lock); - splx(s); - return (NULL); - } + mem = vm_page_queue_free.tqh_first; - if (page_req == VM_ALLOC_INTERRUPT) { - if ((mem = vm_page_queue_free.tqh_first) == 0) { - simple_unlock(&vm_page_queue_free_lock); - splx(s); - /* - * need to wakeup at interrupt time -- it doesn't do VM_WAIT - */ - wakeup((caddr_t) &vm_pages_needed); - return NULL; - } - } else { - if ((cnt.v_free_count < cnt.v_free_reserved) || - (mem = vm_page_queue_free.tqh_first) == 0) { + switch (page_req) { + case VM_ALLOC_NORMAL: + if (cnt.v_free_count >= cnt.v_free_reserved) { + TAILQ_REMOVE(&vm_page_queue_free, mem, pageq); + cnt.v_free_count--; + } else { mem = vm_page_queue_cache.tqh_first; - if (mem) { + if (mem != NULL) { TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq); vm_page_remove(mem); cnt.v_cache_count--; - goto gotpage; - } - - if (page_req == VM_ALLOC_SYSTEM && - cnt.v_free_count > cnt.v_interrupt_free_min) { - mem = vm_page_queue_free.tqh_first; - } - - if( !mem) { + } else { simple_unlock(&vm_page_queue_free_lock); splx(s); - wakeup((caddr_t) &vm_pages_needed); + pagedaemon_wakeup(); return (NULL); } } + break; + + case VM_ALLOC_SYSTEM: + if ((cnt.v_free_count >= cnt.v_free_reserved) || + ((cnt.v_cache_count == 0) && + (cnt.v_free_count >= cnt.v_interrupt_free_min))) { + TAILQ_REMOVE(&vm_page_queue_free, mem, pageq); + cnt.v_free_count--; + } else { + mem = vm_page_queue_cache.tqh_first; + if (mem != NULL) { + TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq); + vm_page_remove(mem); + cnt.v_cache_count--; + } else { + simple_unlock(&vm_page_queue_free_lock); + splx(s); + pagedaemon_wakeup(); + return (NULL); + } + } + break; + + case VM_ALLOC_INTERRUPT: + if (mem != NULL) { + TAILQ_REMOVE(&vm_page_queue_free, mem, pageq); + cnt.v_free_count--; + } else { + simple_unlock(&vm_page_queue_free_lock); + splx(s); + pagedaemon_wakeup(); + return NULL; + } + break; + + default: + panic("vm_page_alloc: invalid allocation class"); } - TAILQ_REMOVE(&vm_page_queue_free, mem, pageq); - cnt.v_free_count--; - -gotpage: simple_unlock(&vm_page_queue_free_lock); mem->flags = PG_BUSY; @@ -688,14 +672,13 @@ vm_page_alloc(object, offset, page_req) splx(s); -/* - * don't wakeup too often, so we wakeup the pageout daemon when - * we would be nearly out of memory. - */ - if (curproc != pageproc && - ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) || - (cnt.v_free_count < cnt.v_pageout_free_min)) - wakeup((caddr_t) &vm_pages_needed); + /* + * Don't wakeup too often - wakeup the pageout daemon when + * we would be nearly out of memory. + */ + if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) || + (cnt.v_free_count < cnt.v_pageout_free_min)) + pagedaemon_wakeup(); return (mem); } @@ -792,20 +775,24 @@ vm_page_free(mem) register vm_page_t mem; { int s; + int flags; s = splhigh(); vm_page_remove(mem); vm_page_unqueue(mem); - if (mem->bmapped || mem->busy || mem->flags & PG_BUSY) { + flags = mem->flags; + if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) { + if (flags & PG_FREE) + panic("vm_page_free: freeing free page"); printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n", - mem->offset, mem->bmapped, mem->busy, (mem->flags & PG_BUSY) ? 1 : 0); + mem->offset, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0); panic("vm_page_free: freeing busy page\n"); } - if (mem->flags & PG_FREE) - panic("vm_page_free: freeing free page"); - if (!(mem->flags & PG_FICTITIOUS)) { + if ((flags & PG_WANTED) != 0) + wakeup((caddr_t) mem); + if ((flags & PG_FICTITIOUS) == 0) { simple_lock(&vm_page_queue_free_lock); if (mem->wire_count) { @@ -819,7 +806,6 @@ vm_page_free(mem) mem->flags |= PG_FREE; TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq); - cnt.v_free_count++; simple_unlock(&vm_page_queue_free_lock); splx(s); /* @@ -831,6 +817,7 @@ vm_page_free(mem) vm_pageout_pages_needed = 0; } + cnt.v_free_count++; /* * wakeup processes that are waiting on memory if we hit a * high water mark. And wakeup scheduler process if we have @@ -843,8 +830,6 @@ vm_page_free(mem) } else { splx(s); } - if (mem->flags & PG_WANTED) - wakeup((caddr_t) mem); cnt.v_tfree++; } @@ -863,13 +848,15 @@ vm_page_wire(mem) register vm_page_t mem; { int s; - VM_PAGE_CHECK(mem); if (mem->wire_count == 0) { + s = splhigh(); vm_page_unqueue(mem); + splx(s); cnt.v_wire_count++; } + mem->flags |= PG_WRITEABLE|PG_MAPPED; mem->wire_count++; } @@ -959,7 +946,7 @@ vm_page_cache(m) s = splhigh(); vm_page_unqueue(m); - pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); + vm_page_protect(m, VM_PROT_NONE); TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq); m->flags |= PG_CACHE; diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 8d9d238600f8..08be85d9052a 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_page.h,v 1.14 1995/02/14 06:10:24 phk Exp $ + * $Id: vm_page.h,v 1.15 1995/02/20 23:35:43 davidg Exp $ */ /* @@ -71,6 +71,7 @@ #ifndef _VM_PAGE_ #define _VM_PAGE_ +#include /* * Management of resident (logical) pages. * @@ -130,6 +131,9 @@ struct vm_page { #define PG_TABLED 0x0040 /* page is in VP table (O) */ #define PG_COPYONWRITE 0x0080 /* must copy page before changing (O) */ #define PG_FICTITIOUS 0x0100 /* physical page doesn't exist (O) */ +#define PG_WRITEABLE 0x0200 /* page is mapped writeable */ +#define PG_MAPPED 0x400 /* page is mapped */ + #define PG_DIRTY 0x0800 /* client flag to set when dirty */ #define PG_REFERENCED 0x1000 /* page has been referenced */ #define PG_CACHE 0x4000 /* On VMIO cache */ @@ -245,18 +249,17 @@ void vm_page_set_valid __P((vm_page_t, int, int)); void vm_page_set_invalid __P((vm_page_t, int, int)); int vm_page_is_valid __P((vm_page_t, int, int)); void vm_page_test_dirty __P((vm_page_t)); -int vm_page_unqueue __P((vm_page_t )); +void vm_page_unqueue __P((vm_page_t )); int vm_page_bits __P((int, int)); - /* * Keep page from being freed by the page daemon * much of the same effect as wiring, except much lower * overhead and should be used only for *very* temporary * holding ("wiring"). */ -static __inline void +static inline void vm_page_hold(vm_page_t mem) { mem->hold_count++; @@ -266,7 +269,7 @@ vm_page_hold(vm_page_t mem) #include /* make GCC shut up */ #endif -static __inline void +static inline void vm_page_unhold(vm_page_t mem) { #ifdef DIAGNOSTIC @@ -277,6 +280,21 @@ vm_page_unhold(vm_page_t mem) #endif } +static inline void +vm_page_protect(vm_page_t mem, int prot) +{ + if (prot == VM_PROT_NONE) { + if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { + pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot); + mem->flags &= ~(PG_WRITEABLE|PG_MAPPED); + } + } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { + pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot); + mem->flags &= ~PG_WRITEABLE; + } +} + + #endif /* KERNEL */ #define ACT_DECLINE 1 diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index a92445917864..19961807f2de 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -65,7 +65,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_pageout.c,v 1.38 1995/02/22 10:27:24 davidg Exp $ + * $Id: vm_pageout.c,v 1.39 1995/02/25 18:39:04 bde Exp $ */ /* @@ -205,14 +205,14 @@ vm_pageout_clean(m, sync) */ for (i = 0; i < pageout_count; i++) { ms[i]->flags |= PG_BUSY; - pmap_page_protect(VM_PAGE_TO_PHYS(ms[i]), VM_PROT_READ); + vm_page_protect(ms[i], VM_PROT_READ); } object->paging_in_progress += pageout_count; } else { m->flags |= PG_BUSY; - pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); + vm_page_protect(m, VM_PROT_READ); object->paging_in_progress++; @@ -290,11 +290,7 @@ vm_pageout_clean(m, sync) * collapse. */ if (pageout_status[i] != VM_PAGER_PEND) { - if ((--object->paging_in_progress == 0) && - (object->flags & OBJ_PIPWNT)) { - object->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) object); - } + vm_object_pip_wakeup(object); if ((ms[i]->flags & (PG_REFERENCED|PG_WANTED)) || pmap_is_referenced(VM_PAGE_TO_PHYS(ms[i]))) { pmap_clear_reference(VM_PAGE_TO_PHYS(ms[i])); @@ -377,8 +373,7 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only) if (!p->act_count) { if (!map_remove_only) vm_page_deactivate(p); - pmap_page_protect(VM_PAGE_TO_PHYS(p), - VM_PROT_NONE); + vm_page_protect(p, VM_PROT_NONE); /* * else if on the next go-around we * will deactivate the page we need to @@ -420,8 +415,7 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only) TAILQ_INSERT_TAIL(&object->memq, p, listq); } } else if ((p->flags & (PG_INACTIVE | PG_BUSY)) == PG_INACTIVE) { - pmap_page_protect(VM_PAGE_TO_PHYS(p), - VM_PROT_NONE); + vm_page_protect(p, VM_PROT_NONE); } vm_page_unlock_queues(); p = next; @@ -797,7 +791,16 @@ vm_pageout() * The pageout daemon is never done, so loop forever. */ while (TRUE) { - tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); + int s = splhigh(); + + if (!vm_pages_needed || + ((cnt.v_free_count >= cnt.v_free_reserved) && + (cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min))) { + vm_pages_needed = 0; + tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); + } + vm_pages_needed = 0; + splx(s); cnt.v_pdwakeups++; vm_pager_sync(); vm_pageout_scan(); diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h index 6a34d296afce..6e77a33ed744 100644 --- a/sys/vm/vm_pageout.h +++ b/sys/vm/vm_pageout.h @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_pageout.h,v 1.7 1995/01/09 16:05:54 davidg Exp $ + * $Id: vm_pageout.h,v 1.8 1995/01/10 07:32:50 davidg Exp $ */ #ifndef _VM_VM_PAGEOUT_H_ @@ -91,9 +91,18 @@ extern int vm_pageout_pages_needed; * Signal pageout-daemon and wait for it. */ +static inline void +pagedaemon_wakeup() +{ + if (!vm_pages_needed && curproc != pageproc) { + vm_pages_needed++; + wakeup((caddr_t) &vm_pages_needed); + } +} + #define VM_WAIT vm_wait() -inline static void +static inline void vm_wait() { int s; @@ -104,7 +113,10 @@ vm_wait() tsleep((caddr_t) &vm_pageout_pages_needed, PSWP, "vmwait", 0); vm_pageout_pages_needed = 0; } else { - wakeup((caddr_t) &vm_pages_needed); + if (!vm_pages_needed) { + vm_pages_needed++; + wakeup((caddr_t) &vm_pages_needed); + } tsleep((caddr_t) &cnt.v_free_count, PVM, "vmwait", 0); } splx(s); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index f3a3499688c8..6cf22f2fc9c7 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -37,7 +37,7 @@ * SUCH DAMAGE. * * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 - * $Id: vnode_pager.c,v 1.25 1995/02/22 10:34:34 davidg Exp $ + * $Id: vnode_pager.c,v 1.26 1995/02/23 22:32:38 davidg Exp $ */ /* @@ -565,12 +565,7 @@ vnode_pager_iodone(bp) } pmap_qremove(paddr, npages); if (obj) { - --obj->paging_in_progress; - if (obj->paging_in_progress == 0 && - (obj->flags & OBJ_PIPWNT)) { - obj->flags &= ~OBJ_PIPWNT; - wakeup((caddr_t) obj); - } + vm_object_pip_wakeup(obj); } else { panic("vnode_pager_iodone: object is gone???"); }