1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-28 16:43:09 +00:00

- Change uma_zone_set_obj() to call kmem_alloc_nofault() instead of

kmem_alloc_pageable().  The difference between these is that an errant
   memory access to the zone will be detected sooner with
   kmem_alloc_nofault().

The following changes serve to eliminate the following lock-order
reversal reported by witness:

 1st 0xc1a3c084 vm object (vm object) @ vm/swap_pager.c:1311
 2nd 0xc07acb00 swap_pager swhash (swap_pager swhash) @ vm/swap_pager.c:1797
 3rd 0xc1804bdc vm object (vm object) @ vm/uma_core.c:931

There is no potential deadlock in this case.  However, witness is unable
to recognize this because vm objects used by UMA have the same type as
ordinary vm objects.  To remedy this, we make the following changes:

 - Add a mutex type argument to VM_OBJECT_LOCK_INIT().
 - Use the mutex type argument to assign distinct types to special
   vm objects such as the kernel object, kmem object, and UMA objects.
 - Define a static swap zone object for use by UMA.  (Only static
   objects are assigned a special mutex type.)
This commit is contained in:
Alan Cox 2004-07-22 19:44:49 +00:00
parent 1c1ce9253f
commit 5285558ac2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=132550
4 changed files with 10 additions and 13 deletions

View File

@ -219,6 +219,7 @@ SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
static struct mtx sw_alloc_mtx; /* protect list manipulation */
static struct pagerlst swap_pager_object_list[NOBJLISTS];
static uma_zone_t swap_zone;
static struct vm_object swap_zone_obj;
/*
* pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
@ -419,7 +420,7 @@ swap_pager_swap_init(void)
swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
do {
if (uma_zone_set_obj(swap_zone, NULL, n))
if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n))
break;
/*
* if the allocation failed, try a zone two thirds the

View File

@ -2304,7 +2304,7 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
if (pages * keg->uk_ipers < count)
pages++;
kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE);
kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
if (kva == 0)
return (0);
@ -2312,7 +2312,7 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
obj = vm_object_allocate(OBJT_DEFAULT,
pages);
} else {
VM_OBJECT_LOCK_INIT(obj);
VM_OBJECT_LOCK_INIT(obj, "uma object");
_vm_object_allocate(OBJT_DEFAULT,
pages, obj);
}

View File

@ -182,7 +182,7 @@ vm_object_zinit(void *mem, int size)
object = (vm_object_t)mem;
bzero(&object->mtx, sizeof(object->mtx));
VM_OBJECT_LOCK_INIT(object);
VM_OBJECT_LOCK_INIT(object, "standard object");
/* These are true for any object that has been freed */
object->paging_in_progress = 0;
@ -234,16 +234,11 @@ vm_object_init(void)
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
VM_OBJECT_LOCK_INIT(&kernel_object_store);
VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object");
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
/*
* The kmem object's mutex is given a unique name, instead of
* "vm object", to avoid false reports of lock-order reversal
* with a system map mutex.
*/
mtx_init(VM_OBJECT_MTX(kmem_object), "kmem object", NULL, MTX_DEF);
VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object");
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);

View File

@ -172,8 +172,9 @@ extern struct vm_object kmem_object_store;
#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx)
#define VM_OBJECT_LOCK_ASSERT(object, type) \
mtx_assert(&(object)->mtx, (type))
#define VM_OBJECT_LOCK_INIT(object) mtx_init(&(object)->mtx, "vm object", \
NULL, MTX_DEF | MTX_DUPOK)
#define VM_OBJECT_LOCK_INIT(object, type) \
mtx_init(&(object)->mtx, "vm object", \
(type), MTX_DEF | MTX_DUPOK)
#define VM_OBJECT_LOCKED(object) mtx_owned(&(object)->mtx)
#define VM_OBJECT_MTX(object) (&(object)->mtx)
#define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx)