1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-12 14:29:28 +00:00

Fix non-fatal bug in vm_map_insert() which improperly cleared

OBJ_ONEMAPPING in the case where an object is extended by an
    additional vm_map_entry must be allocated.

    In vm_object_madvise(), remove calll to vm_page_cache() in MADV_FREE
    case in order to avoid a page fault on page reuse.  However, we still
    mark the page as clean and destroy any swap backing store.

Submitted by:	Alan Cox <alc@cs.rice.edu>
This commit is contained in:
Matthew Dillon 1999-02-12 09:51:43 +00:00
parent 28791bce44
commit 2aaeadf8d9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=43923
2 changed files with 49 additions and 60 deletions

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.147 1999/02/03 01:57:16 dillon Exp $
* $Id: vm_map.c,v 1.148 1999/02/07 21:48:22 dillon Exp $
*/
/*
@ -429,6 +429,9 @@ vm_map_lookup_entry(map, address, entry)
* size should match that of the address range.
*
* Requires that the map be locked, and leaves it so.
*
* If object is non-NULL, ref count must be bumped by caller
* prior to making call to account for the new entry.
*/
int
vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
@ -438,9 +441,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_map_entry_t new_entry;
vm_map_entry_t prev_entry;
vm_map_entry_t temp_entry;
#if 0
vm_object_t prev_object;
#endif
u_char protoeflags;
if ((object != NULL) && (cow & MAP_NOFAULT)) {
@ -483,13 +483,18 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
if (cow & MAP_NOFAULT)
protoeflags |= MAP_ENTRY_NOFAULT;
/*
* See if we can avoid creating a new entry by extending one of our
* neighbors. Or at least extend the object.
*/
if (
(object == NULL) &&
if (object) {
/*
* When object is non-NULL, it could be shared with another
* process. We have to set or clear OBJ_ONEMAPPING
* appropriately.
*/
if ((object->ref_count > 1) || (object->shadow_count != 0)) {
vm_object_clear_flag(object, OBJ_ONEMAPPING);
} else {
vm_object_set_flag(object, OBJ_ONEMAPPING);
}
} else if (
(prev_entry != &map->header) &&
((prev_entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) &&
((prev_entry->object.vm_object == NULL) ||
@ -506,8 +511,9 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
(vm_size_t) (end - prev_entry->end)))) {
/*
* Coalesced the two objects. Can we extend the
* previous map entry to include the new range?
* We were able to extend the object. Determine if we
* can extend the previous map entry to include the
* new range as well.
*/
if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
(prev_entry->protection == prot) &&
@ -515,27 +521,28 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
map->size += (end - prev_entry->end);
prev_entry->end = end;
#if 0
/*
* (no longer applies)
*/
if ((cow & MAP_NOFAULT) == 0) {
prev_object = prev_entry->object.vm_object;
default_pager_convert_to_swapq(prev_object);
}
#endif
return (KERN_SUCCESS);
}
else {
object = prev_entry->object.vm_object;
offset = prev_entry->offset + (prev_entry->end -
prev_entry->start);
vm_object_reference(object);
}
/*
* If we can extend the object but cannot extend the
* map entry, we have to create a new map entry. We
* must bump the ref count on the extended object to
* account for it.
*/
object = prev_entry->object.vm_object;
offset = prev_entry->offset +
(prev_entry->end - prev_entry->start);
vm_object_reference(object);
}
}
/*
* NOTE: if conditionals fail, object can be NULL here. This occurs
* in things like the buffer map where we manage kva but do not manage
* backing objects.
*/
/*
* Create a new entry
*/
@ -549,14 +556,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
new_entry->offset = offset;
new_entry->avail_ssize = 0;
if (object) {
if ((object->ref_count > 1) || (object->shadow_count != 0)) {
vm_object_clear_flag(object, OBJ_ONEMAPPING);
} else {
vm_object_set_flag(object, OBJ_ONEMAPPING);
}
}
if (map->is_main_map) {
new_entry->inheritance = VM_INHERIT_DEFAULT;
new_entry->protection = prot;
@ -577,12 +576,6 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
(prev_entry->end >= new_entry->start))
map->first_free = new_entry;
#if 0
/*
* (no longer applies)
*/
default_pager_convert_to_swapq(object);
#endif
return (KERN_SUCCESS);
}
@ -853,6 +846,8 @@ vm_map_findspace(map, start, length, addr)
* first-fit from the specified address; the region found is
* returned in the same parameter.
*
* If object is non-NULL, ref count must be bumped by caller
* prior to making call to account for the new entry.
*/
int
vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.147 1999/02/08 05:15:54 dillon Exp $
* $Id: vm_object.c,v 1.148 1999/02/08 19:00:15 dillon Exp $
*/
/*
@ -762,8 +762,8 @@ vm_object_madvise(object, pindex, count, advise)
end = pindex + count;
/*
* MADV_FREE special case - free any swap backing store (as well
* as resident pages later on).
* MADV_FREE special case - free any swap backing store now,
* whether or not resident pages can be found later.
*/
if (advise == MADV_FREE) {
@ -835,24 +835,18 @@ vm_object_madvise(object, pindex, count, advise)
vm_page_deactivate(m);
} else if (advise == MADV_FREE) {
/*
* If MADV_FREE_FORCE_FREE is defined, we attempt to
* immediately free the page. Otherwise we just
* destroy any swap backing store, mark it clean,
* and stuff it into the cache.
* Mark the page clean. This will allow the page
* to be freed up by the system. However, such pages
* are often reused quickly by malloc()/free()
* so we do not do anything that would cause
* a page fault if we can help it.
*
* Specifically, we do not try to actually free
* the page now nor do we try to put it in the
* cache (which would cause a page fault on reuse).
*/
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
m->dirty = 0;
#ifdef MADV_FREE_FORCE_FREE
if (tobject->resident_page_count > 1) {
vm_page_busy(m);
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
} else
#endif
{
vm_page_cache(m);
}
}
}
}