1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-14 14:55:41 +00:00

- Use atomic ops for updating the vmspace's refcnt and exitingcnt.

- Push down Giant into shmexit().  (Giant is acquired only if the vmspace
   contains shm segments.)
 - Eliminate the acquisition of Giant from proc_rwmem().
 - Reduce the scope of Giant in exit1(), uncovering the destruction of the
   address space.
This commit is contained in:
Alan Cox 2004-07-27 03:53:41 +00:00
parent 1f1b01c0a5
commit 1a276a3f91
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=132684
6 changed files with 30 additions and 25 deletions

View File

@ -114,6 +114,7 @@ exit1(struct thread *td, int rv)
struct ucred *tracecred;
#endif
struct plimit *plim;
int refcnt;
/*
* Drop Giant if caller has it. Eventually we should warn about
@ -254,7 +255,6 @@ exit1(struct thread *td, int rv)
}
mtx_unlock(&ppeers_lock);
mtx_lock(&Giant);
/* The next two chunks should probably be moved to vmspace_exit. */
vm = p->p_vmspace;
/*
@ -272,8 +272,11 @@ exit1(struct thread *td, int rv)
* by vmspace_exit() (which decrements exitingcnt) cleans up the
* remainder.
*/
++vm->vm_exitingcnt;
if (--vm->vm_refcnt == 0) {
atomic_add_int(&vm->vm_exitingcnt, 1);
do
refcnt = vm->vm_refcnt;
while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
if (refcnt == 1) {
shmexit(vm);
pmap_remove_pages(vmspace_pmap(vm), vm_map_min(&vm->vm_map),
vm_map_max(&vm->vm_map));
@ -281,6 +284,7 @@ exit1(struct thread *td, int rv)
vm_map_max(&vm->vm_map));
}
mtx_lock(&Giant);
sx_xlock(&proctree_lock);
if (SESS_LEADER(p)) {
struct session *sp;

View File

@ -153,24 +153,21 @@ proc_rwmem(struct proc *p, struct uio *uio)
vm_object_t backing_object, object = NULL;
vm_offset_t pageno = 0; /* page number */
vm_prot_t reqprot;
int error, writing;
int error, refcnt, writing;
mtx_lock(&Giant);
/*
* if the vmspace is in the midst of being deallocated or the
* process is exiting, don't try to grab anything. The page table
* usage in that process can be messed up.
*/
vm = p->p_vmspace;
if ((p->p_flag & P_WEXIT)) {
mtx_unlock(&Giant);
if ((p->p_flag & P_WEXIT))
return (EFAULT);
}
if (vm->vm_refcnt < 1) {
mtx_unlock(&Giant);
return (EFAULT);
}
++vm->vm_refcnt;
do {
if ((refcnt = vm->vm_refcnt) < 1)
return (EFAULT);
} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
/*
* The map we want...
*/
@ -278,7 +275,6 @@ proc_rwmem(struct proc *p, struct uio *uio)
} while (error == 0 && uio->uio_resid > 0);
vmspace_free(vm);
mtx_unlock(&Giant);
return (error);
}

View File

@ -822,14 +822,14 @@ shmexit_myhook(struct vmspace *vm)
struct shmmap_state *base, *shm;
int i;
GIANT_REQUIRED;
if ((base = vm->vm_shm) != NULL) {
vm->vm_shm = NULL;
mtx_lock(&Giant);
for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
if (shm->shmid != -1)
shm_delete_mapping(vm, shm);
}
mtx_unlock(&Giant);
free(base, M_SHM);
}
}

View File

@ -868,7 +868,7 @@ aio_daemon(void *uproc)
* refer to it.
*/
mycp->p_vmspace = userp->p_vmspace;
mycp->p_vmspace->vm_refcnt++;
atomic_add_int(&mycp->p_vmspace->vm_refcnt, 1);
/* Activate the new mapping. */
pmap_activate(FIRST_THREAD_IN_PROC(mycp));

View File

@ -638,7 +638,7 @@ vm_forkproc(td, p2, td2, flags)
if (flags & RFMEM) {
p2->p_vmspace = p1->p_vmspace;
p1->p_vmspace->vm_refcnt++;
atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
}
while (vm_page_count_severe()) {
@ -952,7 +952,7 @@ int action;
vm = p->p_vmspace;
KASSERT(vm != NULL,
("swapout_procs: a process has no address space"));
++vm->vm_refcnt;
atomic_add_int(&vm->vm_refcnt, 1);
PROC_UNLOCK(p);
if (!vm_map_trylock(&vm->vm_map))
goto nextproc1;

View File

@ -311,13 +311,15 @@ vmspace_dofree(struct vmspace *vm)
void
vmspace_free(struct vmspace *vm)
{
GIANT_REQUIRED;
int refcnt;
if (vm->vm_refcnt == 0)
panic("vmspace_free: attempt to free already freed vmspace");
--vm->vm_refcnt;
if (vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
do
refcnt = vm->vm_refcnt;
while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
if (refcnt == 1 && vm->vm_exitingcnt == 0)
vmspace_dofree(vm);
}
@ -325,8 +327,8 @@ void
vmspace_exitfree(struct proc *p)
{
struct vmspace *vm;
int exitingcnt;
GIANT_REQUIRED;
vm = p->p_vmspace;
p->p_vmspace = NULL;
@ -341,8 +343,11 @@ vmspace_exitfree(struct proc *p)
* The last wait on the exiting child's vmspace will clean up
* the remainder of the vmspace.
*/
--vm->vm_exitingcnt;
if (vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
do
exitingcnt = vm->vm_exitingcnt;
while (!atomic_cmpset_int(&vm->vm_exitingcnt, exitingcnt,
exitingcnt - 1));
if (vm->vm_refcnt == 0 && exitingcnt == 1)
vmspace_dofree(vm);
}