1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-05 12:56:08 +00:00

Rip some well duplicated code out of cpu_wait() and cpu_exit() and move

it to the MI area.  KSE touched cpu_wait() which had the same change
replicated five ways for each platform.  Now it can just do it once.
The only MD parts seemed to be dealing with fpu state cleanup and things
like vm86 cleanup on x86.  The rest was identical.

XXX: ia64 and powerpc did not have cpu_throw(), so I've put a functional
stub in place.

Reviewed by:	jake, tmm, dillon
This commit is contained in:
Peter Wemm 2001-09-10 04:28:58 +00:00
parent e11e07928b
commit eb30c1c0b9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=83276
12 changed files with 75 additions and 196 deletions

View File

@ -241,40 +241,14 @@ void
cpu_exit(p)
register struct proc *p;
{
alpha_fpstate_drop(p);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock_flags(&Giant, MTX_NOSWITCH);
/*
* We have to wait until after releasing all locks before
* changing p_stat. If we block on a mutex then we will be
* back at SRUN when we resume and our parent will never
* harvest us.
*/
p->p_stat = SZOMB;
wakeup(p->p_pptr);
PROC_UNLOCK_NOSWITCH(p);
cnt.v_swtch++;
cpu_switch();
panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
GIANT_REQUIRED;
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
}
/*

View File

@ -268,38 +268,12 @@ cpu_exit(p)
reset_dbregs();
pcb->pcb_flags &= ~PCB_DBREGS;
}
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock_flags(&Giant, MTX_NOSWITCH);
/*
* We have to wait until after releasing all locks before
* changing p_stat. If we block on a mutex then we will be
* back at SRUN when we resume and our parent will never
* harvest us.
*/
p->p_stat = SZOMB;
wakeup(p->p_pptr);
PROC_UNLOCK_NOSWITCH(p);
cnt.v_swtch++;
cpu_throw();
panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
GIANT_REQUIRED;
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
}
/*

View File

@ -268,38 +268,12 @@ cpu_exit(p)
reset_dbregs();
pcb->pcb_flags &= ~PCB_DBREGS;
}
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock_flags(&Giant, MTX_NOSWITCH);
/*
* We have to wait until after releasing all locks before
* changing p_stat. If we block on a mutex then we will be
* back at SRUN when we resume and our parent will never
* harvest us.
*/
p->p_stat = SZOMB;
wakeup(p->p_pptr);
PROC_UNLOCK_NOSWITCH(p);
cnt.v_swtch++;
cpu_throw();
panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
GIANT_REQUIRED;
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
}
/*

View File

@ -284,40 +284,23 @@ void
cpu_exit(p)
register struct proc *p;
{
ia64_fpstate_drop(p);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock_flags(&Giant, MTX_NOSWITCH);
/*
* We have to wait until after releasing all locks before
* changing p_stat. If we block on a mutex then we will be
* back at SRUN when we resume and our parent will never
* harvest us.
*/
p->p_stat = SZOMB;
wakeup(p->p_pptr);
PROC_UNLOCK_NOSWITCH(p);
cnt.v_swtch++;
cpu_switch();
panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
GIANT_REQUIRED;
}
/* drop per-process resources */
pmap_dispose_proc(p);
/* Temporary helper */
void
cpu_throw(void)
{
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
cpu_switch();
panic("cpu_throw() didn't");
}
/*

View File

@ -54,6 +54,7 @@
#include <sys/tty.h>
#include <sys/wait.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/resourcevar.h>
#include <sys/signalvar.h>
#include <sys/sx.h>
@ -67,6 +68,7 @@
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_zone.h>
@ -380,13 +382,30 @@ exit1(p, rv)
/*
* Finally, call machine-dependent code to release the remaining
* resources including address space, the kernel stack and pcb.
* The address space is released by "vmspace_free(p->p_vmspace)";
* This is machine-dependent, as we may have to change stacks
* or ensure that the current one isn't reallocated before we
* finish. cpu_exit will end with a call to cpu_switch(), finishing
* our execution (pun intended).
* The address space is released by "vmspace_free(p->p_vmspace)"
* in vm_waitproc();
*/
cpu_exit(p);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock_flags(&Giant, MTX_NOSWITCH);
/*
* We have to wait until after releasing all locks before
* changing p_stat. If we block on a mutex then we will be
* back at SRUN when we resume and our parent will never
* harvest us.
*/
p->p_stat = SZOMB;
wakeup(p->p_pptr);
PROC_UNLOCK_NOSWITCH(p);
cnt.v_swtch++;
cpu_throw();
panic("exit1");
}
#ifdef COMPAT_43
@ -571,11 +590,11 @@ wait1(q, uap, compat)
}
/*
* Give machine-dependent layer a chance
* Give vm and machine-dependent layer a chance
* to free anything that cpu_exit couldn't
* release while still running in process context.
*/
cpu_wait(p);
vm_waitproc(p);
mtx_destroy(&p->p_mtx);
zfree(proc_zone, p);
nprocs--;

View File

@ -235,7 +235,7 @@ fork1(p1, flags, procp)
* certain parts of a process from itself.
*/
if ((flags & RFPROC) == 0) {
vm_fork(p1, 0, flags);
vm_forkproc(p1, 0, flags);
/*
* Close all file descriptors.
@ -412,7 +412,7 @@ fork1(p1, flags, procp)
/*
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
* The p_stats and p_sigacts substructs are set in vm_fork.
* The p_stats and p_sigacts substructs are set in vm_forkproc.
*/
p2->p_flag = 0;
mtx_lock_spin(&sched_lock);
@ -461,7 +461,7 @@ fork1(p1, flags, procp)
PROC_LOCK(p1);
bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig));
p2->p_procsig->ps_refcnt = 1;
p2->p_sigacts = NULL; /* finished in vm_fork() */
p2->p_sigacts = NULL; /* finished in vm_forkproc() */
}
if (flags & RFLINUXTHPN)
p2->p_sigparent = SIGUSR1;
@ -573,7 +573,7 @@ fork1(p1, flags, procp)
* Finish creating the child process. It will return via a different
* execution path later. (ie: directly into user mode)
*/
vm_fork(p1, p2, flags);
vm_forkproc(p1, p2, flags);
if (flags == (RFFDG | RFPROC)) {
cnt.v_forks++;

View File

@ -159,38 +159,21 @@ void
cpu_exit(p)
register struct proc *p;
{
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock_flags(&Giant, MTX_NOSWITCH);
/*
* We have to wait until after releasing all locks before
* changing p_stat. If we block on a mutex then we will be
* back at SRUN when we resume and our parent will never
* harvest us.
*/
p->p_stat = SZOMB;
wakeup(p->p_pptr);
PROC_UNLOCK_NOSWITCH(p);
cnt.v_swtch++;
cpu_switch();
panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
GIANT_REQUIRED;
/* drop per-process resources */
pmap_dispose_proc(p);
}
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
/* Temporary helper */
void
cpu_throw(void)
{
cpu_switch();
panic("cpu_throw() didn't");
}
/*

View File

@ -159,38 +159,21 @@ void
cpu_exit(p)
register struct proc *p;
{
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock_flags(&Giant, MTX_NOSWITCH);
/*
* We have to wait until after releasing all locks before
* changing p_stat. If we block on a mutex then we will be
* back at SRUN when we resume and our parent will never
* harvest us.
*/
p->p_stat = SZOMB;
wakeup(p->p_pptr);
PROC_UNLOCK_NOSWITCH(p);
cnt.v_swtch++;
cpu_switch();
panic("cpu_exit");
}
void
cpu_wait(p)
struct proc *p;
{
GIANT_REQUIRED;
/* drop per-process resources */
pmap_dispose_proc(p);
}
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
/* Temporary helper */
void
cpu_throw(void)
{
cpu_switch();
panic("cpu_throw() didn't");
}
/*

View File

@ -63,30 +63,9 @@
#include <machine/md_var.h>
#include <machine/tstate.h>
/* XXX: it seems that all that is in here should really be MI... */
void
cpu_exit(struct proc *p)
{
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
mtx_unlock_flags(&Giant, MTX_NOSWITCH);
/*
* We have to wait until after releasing all locks before
* changing p_stat. If we block on a mutex then we will be
* back at SRUN when we resume and our parent will never
* harvest us.
*/
p->p_stat = SZOMB;
wakeup(p->p_pptr);
PROC_UNLOCK_NOSWITCH(p);
cnt.v_swtch++;
cpu_throw();
panic("cpu_exit");
}
/*
@ -178,13 +157,6 @@ cpu_set_fork_handler(struct proc *p, void (*func)(void *), void *arg)
void
cpu_wait(struct proc *p)
{
GIANT_REQUIRED;
/* drop per-process resources */
pmap_dispose_proc(p);
/* and clean-out the vmspace */
vmspace_free(p->p_vmspace);
}
void

View File

@ -501,7 +501,7 @@ void updatepri __P((struct proc *));
void userret __P((struct proc *, struct trapframe *, u_int));
void maybe_resched __P((struct proc *));
void cpu_exit __P((struct proc *)) __dead2;
void cpu_exit __P((struct proc *));
void exit1 __P((struct proc *, int)) __dead2;
void cpu_fork __P((struct proc *, struct proc *, int));
void cpu_set_fork_handler __P((struct proc *, void (*)(void *), void *));

View File

@ -80,7 +80,8 @@ void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_user_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
void vm_fork __P((struct proc *, struct proc *, int));
void vm_forkproc __P((struct proc *, struct proc *, int));
void vm_waitproc __P((struct proc *));
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t));
vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t));
void vm_set_page_size __P((void));

View File

@ -209,7 +209,7 @@ vsunlock(addr, len)
* to user mode to avoid stack copying and relocation problems.
*/
void
vm_fork(p1, p2, flags)
vm_forkproc(p1, p2, flags)
struct proc *p1, *p2;
int flags;
{
@ -285,6 +285,22 @@ vm_fork(p1, p2, flags)
cpu_fork(p1, p2, flags);
}
/*
* Called after process has been wait(2)'ed apon and is being reaped.
* The idea is to reclaim resources that we could not reclaim while
* the process was still executing.
*/
void
vm_waitproc(p)
struct proc *p;
{
GIANT_REQUIRED;
cpu_wait(p);
pmap_dispose_proc(p); /* drop per-process resources */
vmspace_free(p->p_vmspace); /* and clean-out the vmspace */
}
/*
* Set default limits for VM system.
* Called for proc 0, and then inherited by all others.