diff --git a/sys/alpha/alpha/vm_machdep.c b/sys/alpha/alpha/vm_machdep.c index a40ea11b4507..0aec8077b4a6 100644 --- a/sys/alpha/alpha/vm_machdep.c +++ b/sys/alpha/alpha/vm_machdep.c @@ -241,40 +241,14 @@ void cpu_exit(p) register struct proc *p; { + alpha_fpstate_drop(p); - - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_switch(); - panic("cpu_exit"); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); - - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); } /* diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index 4fc91dd5ea01..53cedff64763 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -268,38 +268,12 @@ cpu_exit(p) reset_dbregs(); pcb->pcb_flags &= ~PCB_DBREGS; } - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_throw(); - panic("cpu_exit"); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); - - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); } /* diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index 4fc91dd5ea01..53cedff64763 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -268,38 +268,12 @@ cpu_exit(p) reset_dbregs(); pcb->pcb_flags &= ~PCB_DBREGS; } - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_throw(); - panic("cpu_exit"); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); - - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); } /* diff --git a/sys/ia64/ia64/vm_machdep.c b/sys/ia64/ia64/vm_machdep.c index d7443a0604f0..3774212e68b6 100644 --- a/sys/ia64/ia64/vm_machdep.c +++ b/sys/ia64/ia64/vm_machdep.c @@ -284,40 +284,23 @@ void cpu_exit(p) register struct proc *p; { + ia64_fpstate_drop(p); - - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_switch(); - panic("cpu_exit"); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; +} - /* drop per-process resources */ - pmap_dispose_proc(p); +/* Temporary helper */ +void +cpu_throw(void) +{ - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); + cpu_switch(); + panic("cpu_throw() didn't"); } /* diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 8022cb59c048..812b20ec678f 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include #include @@ -67,6 +68,7 @@ #include #include +#include #include #include #include @@ -380,13 +382,30 @@ exit1(p, rv) /* * Finally, call machine-dependent code to release the remaining * resources including address space, the kernel stack and pcb. - * The address space is released by "vmspace_free(p->p_vmspace)"; - * This is machine-dependent, as we may have to change stacks - * or ensure that the current one isn't reallocated before we - * finish. cpu_exit will end with a call to cpu_switch(), finishing - * our execution (pun intended). + * The address space is released by "vmspace_free(p->p_vmspace)" + * in vm_waitproc(); */ cpu_exit(p); + + PROC_LOCK(p); + mtx_lock_spin(&sched_lock); + while (mtx_owned(&Giant)) + mtx_unlock_flags(&Giant, MTX_NOSWITCH); + + /* + * We have to wait until after releasing all locks before + * changing p_stat. If we block on a mutex then we will be + * back at SRUN when we resume and our parent will never + * harvest us. + */ + p->p_stat = SZOMB; + + wakeup(p->p_pptr); + PROC_UNLOCK_NOSWITCH(p); + + cnt.v_swtch++; + cpu_throw(); + panic("exit1"); } #ifdef COMPAT_43 @@ -571,11 +590,11 @@ wait1(q, uap, compat) } /* - * Give machine-dependent layer a chance + * Give vm and machine-dependent layer a chance * to free anything that cpu_exit couldn't * release while still running in process context. */ - cpu_wait(p); + vm_waitproc(p); mtx_destroy(&p->p_mtx); zfree(proc_zone, p); nprocs--; diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index dea8ff028046..af154c3beeae 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -235,7 +235,7 @@ fork1(p1, flags, procp) * certain parts of a process from itself. */ if ((flags & RFPROC) == 0) { - vm_fork(p1, 0, flags); + vm_forkproc(p1, 0, flags); /* * Close all file descriptors. @@ -412,7 +412,7 @@ fork1(p1, flags, procp) /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. - * The p_stats and p_sigacts substructs are set in vm_fork. + * The p_stats and p_sigacts substructs are set in vm_forkproc. */ p2->p_flag = 0; mtx_lock_spin(&sched_lock); @@ -461,7 +461,7 @@ fork1(p1, flags, procp) PROC_LOCK(p1); bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig)); p2->p_procsig->ps_refcnt = 1; - p2->p_sigacts = NULL; /* finished in vm_fork() */ + p2->p_sigacts = NULL; /* finished in vm_forkproc() */ } if (flags & RFLINUXTHPN) p2->p_sigparent = SIGUSR1; @@ -573,7 +573,7 @@ fork1(p1, flags, procp) * Finish creating the child process. It will return via a different * execution path later. (ie: directly into user mode) */ - vm_fork(p1, p2, flags); + vm_forkproc(p1, p2, flags); if (flags == (RFFDG | RFPROC)) { cnt.v_forks++; diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c index e811fc891b50..630f9bf9f0d6 100644 --- a/sys/powerpc/aim/vm_machdep.c +++ b/sys/powerpc/aim/vm_machdep.c @@ -159,38 +159,21 @@ void cpu_exit(p) register struct proc *p; { - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_switch(); - panic("cpu_exit"); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); +} - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); +/* Temporary helper */ +void +cpu_throw(void) +{ + + cpu_switch(); + panic("cpu_throw() didn't"); } /* diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c index e811fc891b50..630f9bf9f0d6 100644 --- a/sys/powerpc/powerpc/vm_machdep.c +++ b/sys/powerpc/powerpc/vm_machdep.c @@ -159,38 +159,21 @@ void cpu_exit(p) register struct proc *p; { - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_switch(); - panic("cpu_exit"); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); +} - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); +/* Temporary helper */ +void +cpu_throw(void) +{ + + cpu_switch(); + panic("cpu_throw() didn't"); } /* diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c index 2365c320527d..addec1b2b74e 100644 --- a/sys/sparc64/sparc64/vm_machdep.c +++ b/sys/sparc64/sparc64/vm_machdep.c @@ -63,30 +63,9 @@ #include #include -/* XXX: it seems that all that is in here should really be MI... */ void cpu_exit(struct proc *p) { - - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_throw(); - panic("cpu_exit"); } /* @@ -178,13 +157,6 @@ cpu_set_fork_handler(struct proc *p, void (*func)(void *), void *arg) void cpu_wait(struct proc *p) { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); - - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); } void diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 9de9392ef724..0a321a519872 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -501,7 +501,7 @@ void updatepri __P((struct proc *)); void userret __P((struct proc *, struct trapframe *, u_int)); void maybe_resched __P((struct proc *)); -void cpu_exit __P((struct proc *)) __dead2; +void cpu_exit __P((struct proc *)); void exit1 __P((struct proc *, int)) __dead2; void cpu_fork __P((struct proc *, struct proc *, int)); void cpu_set_fork_handler __P((struct proc *, void (*)(void *), void *)); diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h index 307192f4a278..9ff01912c6c5 100644 --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -80,7 +80,8 @@ void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t)); int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t)); int vm_fault_user_wire __P((vm_map_t, vm_offset_t, vm_offset_t)); -void vm_fork __P((struct proc *, struct proc *, int)); +void vm_forkproc __P((struct proc *, struct proc *, int)); +void vm_waitproc __P((struct proc *)); int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t)); vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t)); void vm_set_page_size __P((void)); diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 8ac0ddeb3023..56381752b448 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -209,7 +209,7 @@ vsunlock(addr, len) * to user mode to avoid stack copying and relocation problems. */ void -vm_fork(p1, p2, flags) +vm_forkproc(p1, p2, flags) struct proc *p1, *p2; int flags; { @@ -285,6 +285,22 @@ vm_fork(p1, p2, flags) cpu_fork(p1, p2, flags); } +/* + * Called after process has been wait(2)'ed apon and is being reaped. + * The idea is to reclaim resources that we could not reclaim while + * the process was still executing. + */ +void +vm_waitproc(p) + struct proc *p; +{ + + GIANT_REQUIRED; + cpu_wait(p); + pmap_dispose_proc(p); /* drop per-process resources */ + vmspace_free(p->p_vmspace); /* and clean-out the vmspace */ +} + /* * Set default limits for VM system. * Called for proc 0, and then inherited by all others.