1) Eliminate unnecessary bzero of UPAGES.

2) Eliminate unnecessary copying of pages during/after forks.
3) Add user map simplification.
This commit is contained in:
John Dyson 1996-03-02 02:54:24 +00:00
parent 33309c7fc0
commit de5f6a7765
8 changed files with 217 additions and 106 deletions

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.59 1996/01/19 03:59:41 dyson Exp $
* $Id: swap_pager.c,v 1.60 1996/01/31 13:14:21 davidg Exp $
*/
/*
@ -179,6 +179,8 @@ static void swap_pager_setvalid __P((vm_object_t object,
vm_offset_t offset, int valid));
static void swapsizecheck __P((void));
#define SWAPLOW (vm_swap_size < (512 * btodb(PAGE_SIZE)))
static inline void
swapsizecheck()
{
@ -1086,7 +1088,7 @@ swap_pager_getpages(object, m, count, reqpage)
* the clean flag so that the page contents will be
* preserved.
*/
if (swap_pager_full) {
if (SWAPLOW) {
for (i = 0; i < count; i++) {
m[i]->dirty = VM_PAGE_BITS_ALL;
}

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.39 1995/12/11 04:58:06 dyson Exp $
* $Id: vm_fault.c,v 1.40 1996/01/19 03:59:43 dyson Exp $
*/
/*
@ -103,6 +103,10 @@ int vm_fault_additional_pages __P((vm_page_t, int, int, vm_page_t *, int *));
#define VM_FAULT_READ_BEHIND 3
#define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
int vm_fault_free_1;
int vm_fault_copy_save_1;
int vm_fault_copy_save_2;
/*
* vm_fault:
*
@ -470,50 +474,140 @@ readrest:
if (fault_type & VM_PROT_WRITE) {
/*
* If we try to collapse first_object at this point,
* we may deadlock when we try to get the lock on an
* intermediate object (since we have the bottom
* object locked). We can't unlock the bottom object,
* because the page we found may move (by collapse) if
* we do.
*
* Instead, we first copy the page. Then, when we have
* no more use for the bottom object, we unlock it and
* try to collapse.
*
* Note that we copy the page even if we didn't need
* to... that's the breaks.
*/
/*
* We already have an empty page in first_object - use
* it.
*/
vm_page_copy(m, first_m);
first_m->valid = VM_PAGE_BITS_ALL;
if (lookup_still_valid &&
/*
* Only one shadow object
*/
(object->shadow_count == 1) &&
/*
* No COW refs, except us
*/
(object->ref_count == 1) &&
/*
* Noone else can look this object up
*/
(object->handle == NULL) &&
/*
* No other ways to look the object up
*/
((object->type == OBJT_DEFAULT) ||
(object->type == OBJT_SWAP)) &&
/*
* We don't chase down the shadow chain
*/
(object == first_object->backing_object)) {
/*
* If another map is truly sharing this page with us,
* we have to flush all uses of the original page,
* since we can't distinguish those which want the
* original from those which need the new copy.
*
* XXX If we know that only one map has access to this
* page, then we could avoid the pmap_page_protect()
* call.
*/
/*
* get rid of the unnecessary page
*/
vm_page_protect(first_m, VM_PROT_NONE);
PAGE_WAKEUP(first_m);
vm_page_free(first_m);
/*
* grab the page and put it into the process'es object
*/
vm_page_rename(m, first_object, first_pindex);
first_m = m;
m->valid = VM_PAGE_BITS_ALL;
m->dirty = VM_PAGE_BITS_ALL;
m = NULL;
++vm_fault_copy_save_1;
} else {
/*
* Oh, well, lets copy it.
*/
vm_page_copy(m, first_m);
first_m->valid = VM_PAGE_BITS_ALL;
}
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
if (lookup_still_valid &&
/*
* make sure that we have two shadow objs
*/
(object->shadow_count == 2) &&
/*
* And no COW refs -- note that there are sometimes
* temp refs to objs, but ignore that case -- we just
* punt.
*/
(object->ref_count == 2) &&
/*
* Noone else can look us up
*/
(object->handle == NULL) &&
/*
* Not something that can be referenced elsewhere
*/
((object->type == OBJT_DEFAULT) ||
(object->type == OBJT_SWAP)) &&
/*
* We don't bother chasing down object chain
*/
(object == first_object->backing_object)) {
vm_object_t other_object;
vm_pindex_t other_pindex, other_pindex_offset;
vm_page_t tm;
other_object = object->shadow_head.tqh_first;
if (other_object == first_object)
other_object = other_object->shadow_list.tqe_next;
if (!other_object)
panic("vm_fault: other object missing");
if (other_object &&
(other_object->type == OBJT_DEFAULT) &&
(other_object->paging_in_progress == 0)) {
other_pindex_offset =
OFF_TO_IDX(other_object->backing_object_offset);
if (pindex >= other_pindex_offset) {
other_pindex = pindex - other_pindex_offset;
/*
* If the other object has the page, just free it.
*/
if ((tm = vm_page_lookup(other_object, other_pindex))) {
if ((tm->flags & PG_BUSY) == 0 &&
tm->busy == 0 &&
tm->valid == VM_PAGE_BITS_ALL) {
/*
* get rid of the unnecessary page
*/
vm_page_protect(m, VM_PROT_NONE);
PAGE_WAKEUP(m);
vm_page_free(m);
m = NULL;
++vm_fault_free_1;
tm->dirty = VM_PAGE_BITS_ALL;
first_m->dirty = VM_PAGE_BITS_ALL;
}
} else {
/*
* If the other object doesn't have the page,
* then we move it there.
*/
vm_page_rename(m, other_object, other_pindex);
m->dirty = VM_PAGE_BITS_ALL;
/* m->valid = VM_PAGE_BITS_ALL; */
++vm_fault_copy_save_2;
}
}
}
}
if (m) {
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
/*
* We no longer need the old page or object.
*/
PAGE_WAKEUP(m);
vm_object_pip_wakeup(object);
PAGE_WAKEUP(m);
}
vm_object_pip_wakeup(object);
/*
* Only use the new page below...
*/
@ -593,15 +687,6 @@ readrest:
*/
prot &= retry_prot;
}
/*
* (the various bits we're fiddling with here are locked by the
* object's lock)
*/
/*
* It's critically important that a wired-down page be faulted only
* once in each map for which it is wired.
*/
/*
* Put this page into the physical map. We had to do the unlock above
@ -628,7 +713,7 @@ readrest:
pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
#if 0
if (change_wiring == 0 && wired == 0)
if (vp && change_wiring == 0 && wired == 0)
pmap_prefault(map->pmap, vaddr, entry, first_object);
#endif

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.39 1996/02/22 10:57:36 davidg Exp $
* $Id: vm_glue.c,v 1.40 1996/02/23 18:49:24 peter Exp $
*/
#include "opt_ddb.h"
@ -271,7 +271,8 @@ vm_fork(p1, p2)
for(i=0;i<UPAGES;i++) {
vm_page_t m;
while ((m = vm_page_alloc(p2->p_vmspace->vm_upages_obj, i, VM_ALLOC_ZERO)) == NULL) {
while ((m = vm_page_alloc(p2->p_vmspace->vm_upages_obj,
i, VM_ALLOC_NORMAL)) == NULL) {
VM_WAIT;
}
@ -281,13 +282,12 @@ vm_fork(p1, p2)
VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, 1);
pmap_kenter(((vm_offset_t) up) + i * PAGE_SIZE,
VM_PAGE_TO_PHYS(m));
if ((m->flags & PG_ZERO) == 0)
bzero(((caddr_t) up) + i * PAGE_SIZE, PAGE_SIZE);
m->flags &= ~PG_ZERO;
m->valid = VM_PAGE_BITS_ALL;
}
vm_page_unhold(stkm);
p2->p_addr = up;
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.32 1996/01/19 03:59:52 dyson Exp $
* $Id: vm_map.c,v 1.33 1996/02/11 22:03:49 dyson Exp $
*/
/*
@ -824,7 +824,6 @@ vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow)
return (result);
}
#ifdef notyet
/*
* vm_map_simplify_entry: [ internal use only ]
*
@ -837,9 +836,8 @@ vm_map_simplify_entry(map, entry)
vm_map_t map;
vm_map_entry_t entry;
{
#ifdef lint
map++;
#endif
vm_map_entry_t prev, next;
vm_size_t prevsize, nextsize, esize;
/*
* If this entry corresponds to a sharing map, then see if we can
@ -851,39 +849,55 @@ vm_map_simplify_entry(map, entry)
if (entry->is_sub_map)
return;
if (entry->is_a_map) {
#if 0
vm_map_t my_share_map;
int count;
my_share_map = entry->object.share_map;
count = my_share_map->ref_count;
if (count == 1) {
/*
* Can move the region from entry->start to entry->end
* (+ entry->offset) in my_share_map into place of
* entry. Later.
*/
}
#endif
return;
} else {
/*
* Try to merge with our neighbors.
*
* Conditions for merge are:
*
* 1. entries are adjacent. 2. both entries point to objects
* with null pagers.
*
* If a merge is possible, we replace the two entries with a
* single entry, then merge the two objects into a single
* object.
*
* Now, all that is left to do is write the code!
*/
if (entry->wired_count)
return;
prev = entry->prev;
prevsize = prev->end - prev->start;
next = entry->next;
nextsize = next->end - next->start;
esize = entry->end - entry->start;
if (prev != &map->header &&
prev->end == entry->start &&
prev->is_a_map == FALSE &&
prev->is_sub_map == FALSE &&
prev->object.vm_object == entry->object.vm_object &&
prev->protection == entry->protection &&
prev->max_protection == entry->max_protection &&
prev->inheritance == entry->inheritance &&
prev->needs_copy == entry->needs_copy &&
prev->copy_on_write == entry->copy_on_write &&
prev->offset + prevsize == entry->offset &&
prev->wired_count == 0) {
vm_map_entry_unlink(map, prev);
entry->start = prev->start;
vm_object_deallocate(prev->object.vm_object);
vm_map_entry_dispose(map, prev);
esize = entry->end - entry->start;
}
if (next != &map->header &&
entry->end == next->start &&
next->is_a_map == FALSE &&
next->is_sub_map == FALSE &&
next->object.vm_object == entry->object.vm_object &&
next->protection == entry->protection &&
next->max_protection == entry->max_protection &&
next->inheritance == entry->inheritance &&
next->needs_copy == entry->needs_copy &&
next->copy_on_write == entry->copy_on_write &&
entry->offset + esize == next->offset &&
next->wired_count == 0) {
vm_map_entry_unlink(map, next);
entry->end = next->end;
vm_object_deallocate(next->object.vm_object);
vm_map_entry_dispose(map, next);
}
}
}
#endif
/*
* vm_map_clip_start: [ internal use only ]
@ -914,7 +928,7 @@ _vm_map_clip_start(map, entry, start)
* See if we can simplify this entry first
*/
/* vm_map_simplify_entry(map, entry); */
vm_map_simplify_entry(map, entry);
/*
* Split off the front portion -- note that we must insert the new
@ -1806,21 +1820,17 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
if (!(su = src_map->is_main_map)) {
su = (src_map->ref_count == 1);
}
#ifdef VM_MAP_OLD
if (su) {
pmap_protect(src_map->pmap,
src_entry->start,
src_entry->end,
src_entry->protection & ~VM_PROT_WRITE);
} else {
#endif
vm_object_pmap_copy(src_entry->object.vm_object,
OFF_TO_IDX(src_entry->offset),
OFF_TO_IDX(src_entry->offset + (src_entry->end
- src_entry->start)));
#ifdef VM_MAP_OLD
}
#endif
}
/*
* Make a copy of the object.
@ -1883,6 +1893,8 @@ vmspace_fork(vm1)
vm_map_entry_t old_entry;
vm_map_entry_t new_entry;
pmap_t new_pmap;
vm_object_t object;
vm_page_t p;
vm_map_lock(old_map);
@ -1910,7 +1922,8 @@ vmspace_fork(vm1)
new_entry = vm_map_entry_create(new_map);
*new_entry = *old_entry;
new_entry->wired_count = 0;
++new_entry->object.vm_object->ref_count;
object = new_entry->object.vm_object;
++object->ref_count;
/*
* Insert the entry into the new map -- we know we're

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.35 1996/01/19 03:59:59 dyson Exp $
* $Id: vm_mmap.c,v 1.36 1996/02/23 18:49:25 peter Exp $
*/
/*
@ -773,6 +773,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
object2->backing_object_offset = foff;
TAILQ_INSERT_TAIL(&object->shadow_head,
object2, shadow_list);
++object->shadow_count;
} else {
docow |= MAP_COPY_NEEDED;
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.62 1996/01/04 21:13:20 wollman Exp $
* $Id: vm_object.c,v 1.63 1996/01/19 04:00:02 dyson Exp $
*/
/*
@ -164,6 +164,7 @@ _vm_object_allocate(type, size, object)
object->flags = 0;
object->paging_in_progress = 0;
object->resident_page_count = 0;
object->shadow_count = 0;
object->handle = NULL;
object->paging_offset = (vm_ooffset_t) 0;
object->backing_object = NULL;
@ -260,6 +261,7 @@ vm_object_deallocate(object)
vm_object_t object;
{
vm_object_t temp;
vm_page_t p;
while (object != NULL) {
@ -270,7 +272,6 @@ vm_object_deallocate(object)
* Lose the reference
*/
object->ref_count--;
if (object->ref_count != 0) {
if ((object->ref_count == 1) &&
(object->handle == NULL) &&
@ -348,8 +349,10 @@ vm_object_deallocate(object)
object->flags |= OBJ_DEAD;
temp = object->backing_object;
if (temp)
if (temp) {
TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
--temp->shadow_count;
}
vm_object_terminate(object);
/* unlocks and deallocates object */
object = temp;
@ -678,6 +681,7 @@ vm_object_pmap_remove(object, start, end)
*
* May defer the copy until later if the object is not backed
* up by a non-default pager.
*
*/
void
vm_object_copy(src_object, src_offset,
@ -711,7 +715,6 @@ vm_object_copy(src_object, src_offset,
* Make another reference to the object
*/
src_object->ref_count++;
*dst_object = src_object;
*dst_offset = src_offset;
@ -758,8 +761,10 @@ vm_object_shadow(object, offset, length)
* of reference count.
*/
result->backing_object = source;
if (source)
TAILQ_INSERT_TAIL(&result->backing_object->shadow_head, result, shadow_list);
if (source) {
TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list);
++source->shadow_count;
}
/*
* Store the offset into the source object, and fix up the offset into
@ -1018,13 +1023,18 @@ vm_object_collapse(object)
TAILQ_REMOVE(&object->backing_object->shadow_head, object,
shadow_list);
if (backing_object->backing_object)
--object->backing_object->shadow_count;
if (backing_object->backing_object) {
TAILQ_REMOVE(&backing_object->backing_object->shadow_head,
backing_object, shadow_list);
--backing_object->backing_object->shadow_count;
}
object->backing_object = backing_object->backing_object;
if (object->backing_object)
if (object->backing_object) {
TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
object, shadow_list);
++object->backing_object->shadow_count;
}
object->backing_object_offset += backing_object->backing_object_offset;
/*
@ -1096,10 +1106,13 @@ vm_object_collapse(object)
TAILQ_REMOVE(&object->backing_object->shadow_head,
object, shadow_list);
--object->backing_object->shadow_count;
vm_object_reference(object->backing_object = backing_object->backing_object);
if (object->backing_object)
if (object->backing_object) {
TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
object, shadow_list);
++object->backing_object->shadow_count;
}
object->backing_object_offset += backing_object->backing_object_offset;
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.25 1995/12/11 04:58:23 dyson Exp $
* $Id: vm_object.h,v 1.26 1995/12/14 09:55:05 phk Exp $
*/
/*
@ -93,6 +93,7 @@ struct vm_object {
objtype_t type; /* type of pager */
vm_size_t size; /* Object size */
int ref_count; /* How many refs?? */
int shadow_count; /* how many objects that this is a shadow for */
u_short flags; /* see below */
u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
int resident_page_count; /* number of resident pages */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.46 1996/01/19 04:00:10 dyson Exp $
* $Id: vm_page.c,v 1.47 1996/01/27 00:13:33 bde Exp $
*/
/*
@ -826,10 +826,6 @@ vm_page_free(m)
vm_page_remove(m);
vm_page_unqueue(m);
/*
if ((flags & PG_WANTED) != 0)
wakeup(m);
*/
if ((flags & PG_FICTITIOUS) == 0) {
if (m->wire_count) {
if (m->wire_count > 1) {