mirror of
https://git.FreeBSD.org/src.git
synced 2024-11-30 08:19:09 +00:00
Simplify anonymous memory handling with an OBJ_ANON flag. This eliminates
reudundant complicated checks and additional locking required only for anonymous memory. Introduce vm_object_allocate_anon() to create these objects. DEFAULT and SWAP objects now have the correct settings for non-anonymous consumers and so individual consumers need not modify the default flags to create super-pages and avoid ONEMAPPING/NOSPLIT. Reviewed by: alc, dougm, kib, markj Tested by: pho Differential Revision: https://reviews.freebsd.org/D22119
This commit is contained in:
parent
44cc3f9c31
commit
639676877b
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=354869
@ -273,8 +273,7 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
|
||||
NULL /* XXXKIB - tmpfs needs swap reservation */);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
/* OBJ_TMPFS is set together with the setting of vp->v_object */
|
||||
vm_object_set_flag(obj, OBJ_NOSPLIT | OBJ_TMPFS_NODE);
|
||||
vm_object_clear_flag(obj, OBJ_ONEMAPPING);
|
||||
vm_object_set_flag(obj, OBJ_TMPFS_NODE);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
break;
|
||||
|
||||
|
@ -751,11 +751,6 @@ shmget_allocate_segment(struct thread *td, struct shmget_args *uap, int mode)
|
||||
#endif
|
||||
return (ENOMEM);
|
||||
}
|
||||
shm_object->pg_color = 0;
|
||||
VM_OBJECT_WLOCK(shm_object);
|
||||
vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
|
||||
vm_object_set_flag(shm_object, OBJ_COLORED | OBJ_NOSPLIT);
|
||||
VM_OBJECT_WUNLOCK(shm_object);
|
||||
|
||||
shmseg->object = shm_object;
|
||||
shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;
|
||||
|
@ -597,11 +597,6 @@ shm_alloc(struct ucred *ucred, mode_t mode)
|
||||
shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
|
||||
shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
|
||||
KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
|
||||
shmfd->shm_object->pg_color = 0;
|
||||
VM_OBJECT_WLOCK(shmfd->shm_object);
|
||||
vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
|
||||
vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
|
||||
VM_OBJECT_WUNLOCK(shmfd->shm_object);
|
||||
vfs_timestamp(&shmfd->shm_birthtime);
|
||||
shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
|
||||
shmfd->shm_birthtime;
|
||||
|
@ -3038,7 +3038,7 @@ swap_pager_update_writecount(vm_object_t object, vm_offset_t start,
|
||||
{
|
||||
|
||||
VM_OBJECT_WLOCK(object);
|
||||
KASSERT((object->flags & OBJ_NOSPLIT) != 0,
|
||||
KASSERT((object->flags & OBJ_ANON) == 0,
|
||||
("Splittable object with writecount"));
|
||||
object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
@ -3050,7 +3050,7 @@ swap_pager_release_writecount(vm_object_t object, vm_offset_t start,
|
||||
{
|
||||
|
||||
VM_OBJECT_WLOCK(object);
|
||||
KASSERT((object->flags & OBJ_NOSPLIT) != 0,
|
||||
KASSERT((object->flags & OBJ_ANON) == 0,
|
||||
("Splittable object with writecount"));
|
||||
object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
@ -1239,8 +1239,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
|
||||
/*
|
||||
* No other ways to look the object up
|
||||
*/
|
||||
((fs.object->type == OBJT_DEFAULT) ||
|
||||
(fs.object->type == OBJT_SWAP)) &&
|
||||
((fs.object->flags & OBJ_ANON) != 0) &&
|
||||
(is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) &&
|
||||
/*
|
||||
* We don't chase down the shadow chain
|
||||
@ -1739,7 +1738,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
|
||||
* Create the top-level object for the destination entry. (Doesn't
|
||||
* actually shadow anything - we copy the pages directly.)
|
||||
*/
|
||||
dst_object = vm_object_allocate(OBJT_DEFAULT,
|
||||
dst_object = vm_object_allocate_anon(
|
||||
atop(dst_entry->end - dst_entry->start));
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
dst_object->flags |= OBJ_COLORED;
|
||||
|
@ -1504,10 +1504,12 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
|
||||
* reference counting is insufficient to recognize
|
||||
* aliases with precision.)
|
||||
*/
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (object->ref_count > 1 || object->shadow_count != 0)
|
||||
vm_object_clear_flag(object, OBJ_ONEMAPPING);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if ((object->flags & OBJ_ANON) != 0) {
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (object->ref_count > 1 || object->shadow_count != 0)
|
||||
vm_object_clear_flag(object, OBJ_ONEMAPPING);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
} else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
|
||||
protoeflags &&
|
||||
(cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP |
|
||||
@ -2101,8 +2103,7 @@ vm_map_entry_back(vm_map_entry_t entry)
|
||||
("map entry %p has backing object", entry));
|
||||
KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
|
||||
("map entry %p is a submap", entry));
|
||||
object = vm_object_allocate(OBJT_DEFAULT,
|
||||
atop(entry->end - entry->start));
|
||||
object = vm_object_allocate_anon(atop(entry->end - entry->start));
|
||||
entry->object.vm_object = object;
|
||||
entry->offset = 0;
|
||||
if (entry->cred != NULL) {
|
||||
@ -3488,8 +3489,10 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
|
||||
crfree(entry->cred);
|
||||
}
|
||||
|
||||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
|
||||
(object != NULL)) {
|
||||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) {
|
||||
entry->object.vm_object = NULL;
|
||||
} else if ((object->flags & OBJ_ANON) != 0 ||
|
||||
object == kernel_object) {
|
||||
KASSERT(entry->cred == NULL || object->cred == NULL ||
|
||||
(entry->eflags & MAP_ENTRY_NEEDS_COPY),
|
||||
("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
|
||||
@ -3497,8 +3500,8 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
|
||||
offidxstart = OFF_TO_IDX(entry->offset);
|
||||
offidxend = offidxstart + count;
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT |
|
||||
OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
|
||||
if (object->ref_count != 1 &&
|
||||
((object->flags & OBJ_ONEMAPPING) != 0 ||
|
||||
object == kernel_object)) {
|
||||
vm_object_collapse(object);
|
||||
|
||||
@ -3528,8 +3531,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
|
||||
}
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
} else
|
||||
entry->object.vm_object = NULL;
|
||||
}
|
||||
if (map->system_map)
|
||||
vm_map_entry_deallocate(entry, TRUE);
|
||||
else {
|
||||
@ -3748,11 +3750,9 @@ vm_map_copy_entry(
|
||||
VM_OBJECT_WLOCK(src_object);
|
||||
charged = ENTRY_CHARGED(src_entry);
|
||||
if (src_object->handle == NULL &&
|
||||
(src_object->type == OBJT_DEFAULT ||
|
||||
src_object->type == OBJT_SWAP)) {
|
||||
(src_object->flags & OBJ_ANON) != 0) {
|
||||
vm_object_collapse(src_object);
|
||||
if ((src_object->flags & (OBJ_NOSPLIT |
|
||||
OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
|
||||
if ((src_object->flags & OBJ_ONEMAPPING) != 0) {
|
||||
vm_object_split(src_entry);
|
||||
src_object =
|
||||
src_entry->object.vm_object;
|
||||
@ -4686,8 +4686,7 @@ vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
|
||||
!map->system_map) {
|
||||
if (vm_map_lock_upgrade(map))
|
||||
goto RetryLookup;
|
||||
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
|
||||
atop(size));
|
||||
entry->object.vm_object = vm_object_allocate_anon(atop(size));
|
||||
entry->offset = 0;
|
||||
if (entry->cred != NULL) {
|
||||
VM_OBJECT_WLOCK(entry->object.vm_object);
|
||||
|
@ -258,7 +258,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
|
||||
continue;
|
||||
}
|
||||
if (object->ref_count == 1 &&
|
||||
(object->flags & OBJ_NOSPLIT) != 0) {
|
||||
(object->flags & OBJ_ANON) == 0) {
|
||||
/*
|
||||
* Also skip otherwise unreferenced swap
|
||||
* objects backing tmpfs vnodes, and POSIX or
|
||||
|
@ -239,7 +239,8 @@ vm_object_zinit(void *mem, int size, int flags)
|
||||
}
|
||||
|
||||
static void
|
||||
_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
|
||||
_vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
|
||||
vm_object_t object)
|
||||
{
|
||||
|
||||
TAILQ_INIT(&object->memq);
|
||||
@ -256,29 +257,8 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
|
||||
*/
|
||||
atomic_thread_fence_rel();
|
||||
|
||||
switch (type) {
|
||||
case OBJT_DEAD:
|
||||
panic("_vm_object_allocate: can't create OBJT_DEAD");
|
||||
case OBJT_DEFAULT:
|
||||
case OBJT_SWAP:
|
||||
object->flags = OBJ_ONEMAPPING;
|
||||
break;
|
||||
case OBJT_DEVICE:
|
||||
case OBJT_SG:
|
||||
object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
|
||||
break;
|
||||
case OBJT_MGTDEVICE:
|
||||
object->flags = OBJ_FICTITIOUS;
|
||||
break;
|
||||
case OBJT_PHYS:
|
||||
object->flags = OBJ_UNMANAGED;
|
||||
break;
|
||||
case OBJT_VNODE:
|
||||
object->flags = 0;
|
||||
break;
|
||||
default:
|
||||
panic("_vm_object_allocate: type %d is undefined", type);
|
||||
}
|
||||
object->pg_color = 0;
|
||||
object->flags = flags;
|
||||
object->size = size;
|
||||
object->domain.dr_policy = NULL;
|
||||
object->generation = 1;
|
||||
@ -309,7 +289,7 @@ vm_object_init(void)
|
||||
|
||||
rw_init(&kernel_object->lock, "kernel vm object");
|
||||
_vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -
|
||||
VM_MIN_KERNEL_ADDRESS), kernel_object);
|
||||
VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
kernel_object->flags |= OBJ_COLORED;
|
||||
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
|
||||
@ -425,11 +405,55 @@ vm_object_pip_wait_unlocked(vm_object_t object, char *waitid)
|
||||
*/
|
||||
vm_object_t
|
||||
vm_object_allocate(objtype_t type, vm_pindex_t size)
|
||||
{
|
||||
vm_object_t object;
|
||||
u_short flags;
|
||||
|
||||
switch (type) {
|
||||
case OBJT_DEAD:
|
||||
panic("vm_object_allocate: can't create OBJT_DEAD");
|
||||
case OBJT_DEFAULT:
|
||||
case OBJT_SWAP:
|
||||
flags = OBJ_COLORED;
|
||||
break;
|
||||
case OBJT_DEVICE:
|
||||
case OBJT_SG:
|
||||
flags = OBJ_FICTITIOUS | OBJ_UNMANAGED;
|
||||
break;
|
||||
case OBJT_MGTDEVICE:
|
||||
flags = OBJ_FICTITIOUS;
|
||||
break;
|
||||
case OBJT_PHYS:
|
||||
flags = OBJ_UNMANAGED;
|
||||
break;
|
||||
case OBJT_VNODE:
|
||||
flags = 0;
|
||||
break;
|
||||
default:
|
||||
panic("vm_object_allocate: type %d is undefined", type);
|
||||
}
|
||||
object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
|
||||
_vm_object_allocate(type, size, flags, object);
|
||||
|
||||
return (object);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_object_allocate_anon:
|
||||
*
|
||||
* Returns a new default object of the given size and marked as
|
||||
* anonymous memory for special split/collapse handling. Color
|
||||
* to be initialized by the caller.
|
||||
*/
|
||||
vm_object_t
|
||||
vm_object_allocate_anon(vm_pindex_t size)
|
||||
{
|
||||
vm_object_t object;
|
||||
|
||||
object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
|
||||
_vm_object_allocate(type, size, object);
|
||||
_vm_object_allocate(OBJT_DEFAULT, size, OBJ_ANON | OBJ_ONEMAPPING,
|
||||
object);
|
||||
|
||||
return (object);
|
||||
}
|
||||
|
||||
@ -522,7 +546,10 @@ vm_object_deallocate(vm_object_t object)
|
||||
* being 0 or 1. These cases require a write lock on the
|
||||
* object.
|
||||
*/
|
||||
released = refcount_release_if_gt(&object->ref_count, 2);
|
||||
if ((object->flags & OBJ_ANON) == 0)
|
||||
released = refcount_release_if_gt(&object->ref_count, 1);
|
||||
else
|
||||
released = refcount_release_if_gt(&object->ref_count, 2);
|
||||
VM_OBJECT_RUNLOCK(object);
|
||||
if (released)
|
||||
return;
|
||||
@ -538,14 +565,11 @@ vm_object_deallocate(vm_object_t object)
|
||||
} else if (object->ref_count == 1) {
|
||||
if (object->shadow_count == 0 &&
|
||||
object->handle == NULL &&
|
||||
(object->type == OBJT_DEFAULT ||
|
||||
(object->type == OBJT_SWAP &&
|
||||
(object->flags & OBJ_TMPFS_NODE) == 0))) {
|
||||
(object->flags & OBJ_ANON) != 0) {
|
||||
vm_object_set_flag(object, OBJ_ONEMAPPING);
|
||||
} else if ((object->shadow_count == 1) &&
|
||||
(object->handle == NULL) &&
|
||||
(object->type == OBJT_DEFAULT ||
|
||||
object->type == OBJT_SWAP)) {
|
||||
(object->flags & OBJ_ANON) != 0) {
|
||||
vm_object_t robject;
|
||||
|
||||
robject = LIST_FIRST(&object->shadow_head);
|
||||
@ -576,10 +600,9 @@ vm_object_deallocate(vm_object_t object)
|
||||
* be deallocated by the thread that is
|
||||
* deallocating its shadow.
|
||||
*/
|
||||
if ((robject->flags & OBJ_DEAD) == 0 &&
|
||||
(robject->handle == NULL) &&
|
||||
(robject->type == OBJT_DEFAULT ||
|
||||
robject->type == OBJT_SWAP)) {
|
||||
if ((robject->flags &
|
||||
(OBJ_DEAD | OBJ_ANON)) == OBJ_ANON &&
|
||||
robject->handle == NULL) {
|
||||
|
||||
refcount_acquire(&robject->ref_count);
|
||||
retry:
|
||||
@ -1049,8 +1072,8 @@ vm_object_advice_applies(vm_object_t object, int advice)
|
||||
return (false);
|
||||
if (advice != MADV_FREE)
|
||||
return (true);
|
||||
return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) &&
|
||||
(object->flags & OBJ_ONEMAPPING) != 0);
|
||||
return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) ==
|
||||
(OBJ_ONEMAPPING | OBJ_ANON));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1211,23 +1234,20 @@ vm_object_shadow(
|
||||
|
||||
/*
|
||||
* Don't create the new object if the old object isn't shared.
|
||||
*
|
||||
* If we hold the only reference we can guarantee that it won't
|
||||
* increase while we have the map locked. Otherwise the race is
|
||||
* harmless and we will end up with an extra shadow object that
|
||||
* will be collapsed later.
|
||||
*/
|
||||
if (source != NULL) {
|
||||
VM_OBJECT_RLOCK(source);
|
||||
if (source->ref_count == 1 &&
|
||||
source->handle == NULL &&
|
||||
(source->type == OBJT_DEFAULT ||
|
||||
source->type == OBJT_SWAP)) {
|
||||
VM_OBJECT_RUNLOCK(source);
|
||||
return;
|
||||
}
|
||||
VM_OBJECT_RUNLOCK(source);
|
||||
}
|
||||
if (source != NULL && source->ref_count == 1 &&
|
||||
source->handle == NULL && (source->flags & OBJ_ANON) != 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Allocate a new object with the given length.
|
||||
*/
|
||||
result = vm_object_allocate(OBJT_DEFAULT, atop(length));
|
||||
result = vm_object_allocate_anon(atop(length));
|
||||
|
||||
/*
|
||||
* The new object shadows the source object, adding a reference to it.
|
||||
@ -1282,7 +1302,7 @@ vm_object_split(vm_map_entry_t entry)
|
||||
vm_size_t size;
|
||||
|
||||
orig_object = entry->object.vm_object;
|
||||
if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
|
||||
if ((orig_object->flags & OBJ_ANON) == 0)
|
||||
return;
|
||||
if (orig_object->ref_count <= 1)
|
||||
return;
|
||||
@ -1295,7 +1315,7 @@ vm_object_split(vm_map_entry_t entry)
|
||||
* If swap_pager_copy() is later called, it will convert new_object
|
||||
* into a swap object.
|
||||
*/
|
||||
new_object = vm_object_allocate(OBJT_DEFAULT, size);
|
||||
new_object = vm_object_allocate_anon(size);
|
||||
|
||||
/*
|
||||
* At this point, the new object is still private, so the order in
|
||||
@ -1443,8 +1463,7 @@ vm_object_scan_all_shadowed(vm_object_t object)
|
||||
|
||||
backing_object = object->backing_object;
|
||||
|
||||
if (backing_object->type != OBJT_DEFAULT &&
|
||||
backing_object->type != OBJT_SWAP)
|
||||
if ((backing_object->flags & OBJ_ANON) == 0)
|
||||
return (false);
|
||||
|
||||
pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
|
||||
@ -1668,15 +1687,13 @@ vm_object_collapse(vm_object_t object)
|
||||
* we check the backing object first, because it is most likely
|
||||
* not collapsable.
|
||||
*/
|
||||
if ((backing_object->flags & OBJ_ANON) == 0)
|
||||
break;
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
if (backing_object->handle != NULL ||
|
||||
(backing_object->type != OBJT_DEFAULT &&
|
||||
backing_object->type != OBJT_SWAP) ||
|
||||
(backing_object->flags & (OBJ_DEAD | OBJ_NOSPLIT)) != 0 ||
|
||||
(backing_object->flags & OBJ_DEAD) != 0 ||
|
||||
object->handle != NULL ||
|
||||
(object->type != OBJT_DEFAULT &&
|
||||
object->type != OBJT_SWAP) ||
|
||||
(object->flags & OBJ_DEAD)) {
|
||||
(object->flags & OBJ_DEAD) != 0) {
|
||||
VM_OBJECT_WUNLOCK(backing_object);
|
||||
break;
|
||||
}
|
||||
@ -2027,14 +2044,10 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
|
||||
|
||||
if (prev_object == NULL)
|
||||
return (TRUE);
|
||||
VM_OBJECT_WLOCK(prev_object);
|
||||
if ((prev_object->type != OBJT_DEFAULT &&
|
||||
prev_object->type != OBJT_SWAP) ||
|
||||
(prev_object->flags & OBJ_NOSPLIT) != 0) {
|
||||
VM_OBJECT_WUNLOCK(prev_object);
|
||||
if ((prev_object->flags & OBJ_ANON) == 0)
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
VM_OBJECT_WLOCK(prev_object);
|
||||
/*
|
||||
* Try to collapse the object first
|
||||
*/
|
||||
|
@ -185,7 +185,7 @@ struct vm_object {
|
||||
#define OBJ_UNMANAGED 0x0002 /* (c) contains unmanaged pages */
|
||||
#define OBJ_POPULATE 0x0004 /* pager implements populate() */
|
||||
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
|
||||
#define OBJ_NOSPLIT 0x0010 /* dont split this object */
|
||||
#define OBJ_ANON 0x0010 /* (c) contains anonymous memory */
|
||||
#define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */
|
||||
#define OBJ_SIZEVNLOCK 0x0040 /* lock vnode to check obj size */
|
||||
#define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */
|
||||
@ -340,6 +340,7 @@ void umtx_shm_object_terminated(vm_object_t object);
|
||||
extern int umtx_shm_vnobj_persistent;
|
||||
|
||||
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
|
||||
vm_object_t vm_object_allocate_anon(vm_pindex_t);
|
||||
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
|
||||
boolean_t);
|
||||
void vm_object_collapse (vm_object_t);
|
||||
|
@ -719,20 +719,15 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
|
||||
|
||||
/*
|
||||
* Would the last new reservation extend past the end of the object?
|
||||
*
|
||||
* If the object is unlikely to grow don't allocate a reservation for
|
||||
* the tail.
|
||||
*/
|
||||
if (first + maxpages > object->size) {
|
||||
/*
|
||||
* Don't allocate the last new reservation if the object is a
|
||||
* vnode or backed by another object that is a vnode.
|
||||
*/
|
||||
if (object->type == OBJT_VNODE ||
|
||||
(object->backing_object != NULL &&
|
||||
object->backing_object->type == OBJT_VNODE)) {
|
||||
if (maxpages == VM_LEVEL_0_NPAGES)
|
||||
return (NULL);
|
||||
allocpages = minpages;
|
||||
}
|
||||
/* Speculate that the object may grow. */
|
||||
if ((object->flags & OBJ_ANON) == 0 &&
|
||||
first + maxpages > object->size) {
|
||||
if (maxpages == VM_LEVEL_0_NPAGES)
|
||||
return (NULL);
|
||||
allocpages = minpages;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -878,19 +873,14 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
|
||||
vm_reserv_object_unlock(object);
|
||||
|
||||
/*
|
||||
* Would a new reservation extend past the end of the object?
|
||||
* Would the last new reservation extend past the end of the object?
|
||||
*
|
||||
* If the object is unlikely to grow don't allocate a reservation for
|
||||
* the tail.
|
||||
*/
|
||||
if (first + VM_LEVEL_0_NPAGES > object->size) {
|
||||
/*
|
||||
* Don't allocate a new reservation if the object is a vnode or
|
||||
* backed by another object that is a vnode.
|
||||
*/
|
||||
if (object->type == OBJT_VNODE ||
|
||||
(object->backing_object != NULL &&
|
||||
object->backing_object->type == OBJT_VNODE))
|
||||
return (NULL);
|
||||
/* Speculate that the object may grow. */
|
||||
}
|
||||
if ((object->flags & OBJ_ANON) == 0 &&
|
||||
first + VM_LEVEL_0_NPAGES > object->size)
|
||||
return (NULL);
|
||||
|
||||
/*
|
||||
* Allocate and populate the new reservation.
|
||||
|
Loading…
Reference in New Issue
Block a user