From 582ec34cd83de37af4dab174f7c1f61ef9e043fe Mon Sep 17 00:00:00 2001 From: Alfred Perlstein Date: Tue, 5 Feb 2002 21:23:05 +0000 Subject: [PATCH] Fix a race with free'ing vmspaces at process exit when vmspaces are shared. Also introduce vm_endcopy instead of using pointer tricks when initializing new vmspaces. The race occured because of how the reference was utilized: test vmspace reference, possibly block, decrement reference When sharing a vmspace between multiple processes it was possible for two processes exiting at the same time to test the reference count, possibly block and neither one free because they wouldn't see the other's update. Submitted by: green --- sys/kern/kern_exit.c | 7 ++++--- sys/vm/vm_extern.h | 1 + sys/vm/vm_glue.c | 2 +- sys/vm/vm_map.c | 45 ++++++++++++++++++++++++++++---------------- sys/vm/vm_map.h | 2 ++ 5 files changed, 37 insertions(+), 20 deletions(-) diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 51b77c4c0e3d..e8c55583c1c3 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -217,13 +217,14 @@ exit1(td, rv) * Can't free the entire vmspace as the kernel stack * may be mapped within that space also. */ - if (vm->vm_refcnt == 1) { + if (--vm->vm_refcnt == 0) { if (vm->vm_shm) shmexit(p); pmap_remove_pages(vmspace_pmap(vm), VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); (void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); + vm->vm_freer = p; } PROC_LOCK(p); @@ -400,8 +401,8 @@ exit1(td, rv) /* * Finally, call machine-dependent code to release the remaining * resources including address space, the kernel stack and pcb. - * The address space is released by "vmspace_free(p->p_vmspace)" - * in vm_waitproc(); + * The address space is released by "vmspace_exitfree(p)" in + * vm_waitproc(). */ cpu_exit(td); diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h index 7f24e1109c9b..728b5b6e8857 100644 --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -90,6 +90,7 @@ struct vmspace *vmspace_fork __P((struct vmspace *)); void vmspace_exec __P((struct proc *)); void vmspace_unshare __P((struct proc *)); void vmspace_free __P((struct vmspace *)); +void vmspace_exitfree __P((struct proc *)); void vnode_pager_setsize __P((struct vnode *, vm_ooffset_t)); void vslock __P((caddr_t, u_int)); void vsunlock __P((caddr_t, u_int)); diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index f9fb68cc5b4a..d2ad1e2ef3be 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -305,7 +305,7 @@ vm_waitproc(p) pmap_dispose_proc(p); /* drop per-process resources */ FOREACH_THREAD_IN_PROC(p, td) pmap_dispose_thread(td); - vmspace_free(p->p_vmspace); /* and clean-out the vmspace */ + vmspace_exitfree(p); /* and clean-out the vmspace */ } /* diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 8dc355077805..8b7a230cea7a 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -172,6 +172,7 @@ vmspace_alloc(min, max) vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ vm->vm_refcnt = 1; vm->vm_shm = NULL; + vm->vm_freer = NULL; return (vm); } @@ -189,6 +190,24 @@ vm_init2(void) vm_object_init2(); } +static __inline void +vmspace_dofree( struct vmspace *vm) +{ + CTR1(KTR_VM, "vmspace_free: %p", vm); + /* + * Lock the map, to wait out all other references to it. + * Delete all of the mappings and pages they hold, then call + * the pmap module to reclaim anything left. + */ + vm_map_lock(&vm->vm_map); + (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, + vm->vm_map.max_offset); + vm_map_unlock(&vm->vm_map); + pmap_release(vmspace_pmap(vm)); + vm_map_destroy(&vm->vm_map); + zfree(vmspace_zone, vm); +} + void vmspace_free(struct vmspace *vm) { @@ -197,23 +216,17 @@ vmspace_free(struct vmspace *vm) if (vm->vm_refcnt == 0) panic("vmspace_free: attempt to free already freed vmspace"); - if (--vm->vm_refcnt == 0) { + if (--vm->vm_refcnt == 0) + vmspace_dofree(vm); +} - CTR1(KTR_VM, "vmspace_free: %p", vm); - /* - * Lock the map, to wait out all other references to it. - * Delete all of the mappings and pages they hold, then call - * the pmap module to reclaim anything left. - */ - vm_map_lock(&vm->vm_map); - (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, - vm->vm_map.max_offset); - vm_map_unlock(&vm->vm_map); +void +vmspace_exitfree(struct proc *p) +{ + GIANT_REQUIRED; - pmap_release(vmspace_pmap(vm)); - vm_map_destroy(&vm->vm_map); - zfree(vmspace_zone, vm); - } + if (p == p->p_vmspace->vm_freer) + vmspace_dofree(p->p_vmspace); } /* @@ -2317,7 +2330,7 @@ vmspace_fork(struct vmspace *vm1) vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset); bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, - (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); + (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy); new_map = &vm2->vm_map; /* XXX */ new_map->timestamp = 1; diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index c9831d59fc6a..b376cc1b81d8 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -188,6 +188,8 @@ struct vmspace { caddr_t vm_daddr; /* user virtual address of data XXX */ caddr_t vm_maxsaddr; /* user VA at max stack growth */ caddr_t vm_minsaddr; /* user VA at max stack growth */ +#define vm_endcopy vm_freer + struct proc *vm_freer; /* vm freed on whose behalf */ }; #ifdef _KERNEL