Put the scheduler, vmdaemon, and pagedaemon kthreads back under Giant for

now.  The proc locking isn't actually safe yet and won't be until the proc
locking is finished.
This commit is contained in:
John Baldwin 2001-06-20 00:48:20 +00:00
parent 29dfd70b01
commit 69a78d4666
2 changed files with 1 additions and 19 deletions

View File

@ -374,7 +374,6 @@ scheduler(dummy)
int ppri;
mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
mtx_unlock(&Giant);
loop:
mtx_lock(&vm_mtx);
@ -426,11 +425,9 @@ loop:
/*
* We would like to bring someone in. (only if there is space).
*/
mtx_lock(&Giant);
PROC_LOCK(p);
faultin(p);
PROC_UNLOCK(p);
mtx_unlock(&Giant);
mtx_lock_spin(&sched_lock);
p->p_swtime = 0;
mtx_unlock_spin(&sched_lock);

View File

@ -1349,6 +1349,7 @@ vm_pageout()
{
int pass;
mtx_lock(&Giant);
mtx_lock(&vm_mtx);
/*
@ -1465,11 +1466,7 @@ vm_pageout()
if (vm_pages_needed)
cnt.v_pdwakeups++;
splx(s);
mtx_unlock(&vm_mtx);
mtx_lock(&Giant);
mtx_lock(&vm_mtx);
vm_pageout_scan(pass);
mtx_unlock(&Giant);
vm_pageout_deficit = 0;
}
}
@ -1500,13 +1497,8 @@ vm_daemon()
{
struct proc *p;
#ifndef rlimlocked
mtx_lock(&Giant);
#endif
while (TRUE) {
#ifdef rlimlocked
mtx_lock(&Giant);
#endif
mtx_lock(&vm_mtx);
msleep(&vm_daemon_needed, &vm_mtx, PPAUSE, "psleep", 0);
if (vm_pageout_req_swapout) {
@ -1515,13 +1507,6 @@ vm_daemon()
vm_pageout_req_swapout = 0;
}
mtx_unlock(&vm_mtx);
#ifdef rlimlocked
/*
* XXX: we can't do this yet because Giant still protects
* the per-process resource limits that we check below.
*/
mtx_unlock(&Giant);
#endif
/*
* scan the processes for exceeding their rlimits or if
* process is swapped out -- deactivate pages