mirror of
https://git.FreeBSD.org/src.git
synced 2025-01-11 14:10:34 +00:00
- Remove asleep(), await(), and M_ASLEEP.
- Callers of asleep() and await() have been converted to calling tsleep(). The only caller outside of M_ASLEEP was the ata driver, which called both asleep() and await() with spl-raised, so there was no need for the asleep() and await() pair. M_ASLEEP was unused. Reviewed by: jasone, peter
This commit is contained in:
parent
043b27b450
commit
8ec48c6dbf
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=81397
@ -970,14 +970,13 @@ ata_command(struct ata_softc *scp, int device, u_int8_t command,
|
||||
switch (flags) {
|
||||
case ATA_WAIT_INTR:
|
||||
scp->active |= ATA_WAIT_INTR;
|
||||
asleep((caddr_t)scp, PRIBIO, "atacmd", 10 * hz);
|
||||
ATA_OUTB(scp->r_io, ATA_CMD, command);
|
||||
|
||||
/* enable interrupt */
|
||||
if (scp->flags & ATA_QUEUED)
|
||||
ATA_OUTB(scp->r_altio, ATA_ALTSTAT, ATA_A_4BIT);
|
||||
|
||||
if (await(PRIBIO, 10 * hz)) {
|
||||
if (tsleep((caddr_t)scp, PRIBIO, "atacmd", 10 * hz) != 0) {
|
||||
ata_printf(scp, device, "ata_command: timeout waiting for intr\n");
|
||||
scp->active &= ~ATA_WAIT_INTR;
|
||||
error = -1;
|
||||
|
@ -199,10 +199,6 @@ atapi_queue_cmd(struct atapi_softc *atp, int8_t *ccb, caddr_t data,
|
||||
|
||||
s = splbio();
|
||||
|
||||
/* if not using callbacks, prepare to sleep for this request */
|
||||
if (!callback)
|
||||
asleep((caddr_t)request, PRIBIO, "atprq", 0);
|
||||
|
||||
/* append onto controller queue and try to start controller */
|
||||
#ifdef ATAPI_DEBUG
|
||||
ata_printf(atp->controller, atp->unit, "queueing %s ",
|
||||
@ -222,7 +218,7 @@ atapi_queue_cmd(struct atapi_softc *atp, int8_t *ccb, caddr_t data,
|
||||
}
|
||||
|
||||
/* wait for request to complete */
|
||||
await(PRIBIO, 0);
|
||||
tsleep((caddr_t)request, PRIBIO, "atprq", 0);
|
||||
splx(s);
|
||||
error = request->error;
|
||||
if (error)
|
||||
|
@ -403,12 +403,6 @@ msleep(ident, mtx, priority, wmesg, timo)
|
||||
|
||||
KASSERT(p != NULL, ("msleep1"));
|
||||
KASSERT(ident != NULL && p->p_stat == SRUN, ("msleep"));
|
||||
/*
|
||||
* Process may be sitting on a slpque if asleep() was called, remove
|
||||
* it before re-adding.
|
||||
*/
|
||||
if (p->p_wchan != NULL)
|
||||
unsleep(p);
|
||||
|
||||
p->p_wchan = ident;
|
||||
p->p_wmesg = wmesg;
|
||||
@ -487,181 +481,7 @@ msleep(ident, mtx, priority, wmesg, timo)
|
||||
}
|
||||
|
||||
/*
|
||||
* asleep() - async sleep call. Place process on wait queue and return
|
||||
* immediately without blocking. The process stays runnable until await()
|
||||
* is called. If ident is NULL, remove process from wait queue if it is still
|
||||
* on one.
|
||||
*
|
||||
* Only the most recent sleep condition is effective when making successive
|
||||
* calls to asleep() or when calling msleep().
|
||||
*
|
||||
* The timeout, if any, is not initiated until await() is called. The sleep
|
||||
* priority, signal, and timeout is specified in the asleep() call but may be
|
||||
* overriden in the await() call.
|
||||
*
|
||||
* <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>>
|
||||
*/
|
||||
|
||||
int
|
||||
asleep(void *ident, int priority, const char *wmesg, int timo)
|
||||
{
|
||||
struct proc *p = curproc;
|
||||
|
||||
/*
|
||||
* Remove preexisting wait condition (if any) and place process
|
||||
* on appropriate slpque, but do not put process to sleep.
|
||||
*/
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
|
||||
if (p->p_wchan != NULL)
|
||||
unsleep(p);
|
||||
|
||||
if (ident) {
|
||||
p->p_wchan = ident;
|
||||
p->p_wmesg = wmesg;
|
||||
p->p_slptime = 0;
|
||||
p->p_asleep.as_priority = priority;
|
||||
p->p_asleep.as_timo = timo;
|
||||
TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq);
|
||||
}
|
||||
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
return(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* await() - wait for async condition to occur. The process blocks until
|
||||
* wakeup() is called on the most recent asleep() address. If wakeup is called
|
||||
* prior to await(), await() winds up being a NOP.
|
||||
*
|
||||
* If await() is called more then once (without an intervening asleep() call),
|
||||
* await() is still effectively a NOP but it calls mi_switch() to give other
|
||||
* processes some cpu before returning. The process is left runnable.
|
||||
*
|
||||
* <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>>
|
||||
*/
|
||||
|
||||
int
|
||||
await(int priority, int timo)
|
||||
{
|
||||
struct proc *p = curproc;
|
||||
int rval = 0;
|
||||
|
||||
WITNESS_SLEEP(0, NULL);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (cold || panicstr) {
|
||||
/*
|
||||
* After a panic, or during autoconfiguration,
|
||||
* just give interrupts a chance, then just return;
|
||||
* don't run any other procs or panic below,
|
||||
* in case this is the idle process and already asleep.
|
||||
*/
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return (0);
|
||||
}
|
||||
DROP_GIANT_NOSWITCH();
|
||||
|
||||
if (p->p_wchan != NULL) {
|
||||
int sig;
|
||||
int catch;
|
||||
|
||||
#ifdef KTRACE
|
||||
if (p && KTRPOINT(p, KTR_CSW))
|
||||
ktrcsw(p->p_tracep, 1, 0);
|
||||
#endif
|
||||
/*
|
||||
* The call to await() can override defaults specified in
|
||||
* the original asleep().
|
||||
*/
|
||||
if (priority < 0)
|
||||
priority = p->p_asleep.as_priority;
|
||||
if (timo < 0)
|
||||
timo = p->p_asleep.as_timo;
|
||||
|
||||
/*
|
||||
* Install timeout
|
||||
*/
|
||||
|
||||
if (timo)
|
||||
callout_reset(&p->p_slpcallout, timo, endtsleep, p);
|
||||
|
||||
sig = 0;
|
||||
catch = priority & PCATCH;
|
||||
|
||||
if (catch) {
|
||||
p->p_sflag |= PS_SINTR;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
sig = CURSIG(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
if (sig != 0) {
|
||||
if (p->p_wchan)
|
||||
unsleep(p);
|
||||
} else if (p->p_wchan == NULL)
|
||||
catch = 0;
|
||||
}
|
||||
if (p->p_wchan != NULL) {
|
||||
p->p_stat = SSLEEP;
|
||||
p->p_stats->p_ru.ru_nvcsw++;
|
||||
mi_switch();
|
||||
}
|
||||
KASSERT(p->p_stat == SRUN, ("running but not SRUN"));
|
||||
p->p_sflag &= ~PS_SINTR;
|
||||
if (p->p_sflag & PS_TIMEOUT) {
|
||||
p->p_sflag &= ~PS_TIMEOUT;
|
||||
if (sig == 0)
|
||||
rval = EWOULDBLOCK;
|
||||
} else if (timo)
|
||||
callout_stop(&p->p_slpcallout);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (rval == 0 && catch) {
|
||||
PROC_LOCK(p);
|
||||
if (sig != 0 || (sig = CURSIG(p))) {
|
||||
if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
|
||||
rval = EINTR;
|
||||
else
|
||||
rval = ERESTART;
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
#ifdef KTRACE
|
||||
mtx_lock(&Giant);
|
||||
if (KTRPOINT(p, KTR_CSW))
|
||||
ktrcsw(p->p_tracep, 0, 0);
|
||||
mtx_unlock(&Giant);
|
||||
#endif
|
||||
} else {
|
||||
/*
|
||||
* If as_priority is 0, await() has been called without an
|
||||
* intervening asleep(). We are still effectively a NOP,
|
||||
* but we call mi_switch() for safety.
|
||||
*/
|
||||
|
||||
if (p->p_asleep.as_priority == 0) {
|
||||
p->p_stats->p_ru.ru_nvcsw++;
|
||||
mi_switch();
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* clear p_asleep.as_priority as an indication that await() has been
|
||||
* called. If await() is called again without an intervening asleep(),
|
||||
* await() is still effectively a NOP but the above mi_switch() code
|
||||
* is triggered as a safety.
|
||||
*/
|
||||
if (rval == 0)
|
||||
p->p_asleep.as_priority = 0;
|
||||
|
||||
PICKUP_GIANT();
|
||||
return (rval);
|
||||
}
|
||||
|
||||
/*
|
||||
* Implement timeout for msleep or asleep()/await()
|
||||
* Implement timeout for msleep()
|
||||
*
|
||||
* If process hasn't been awakened (wchan non-zero),
|
||||
* set timeout flag and undo the sleep. If proc
|
||||
|
@ -45,8 +45,7 @@
|
||||
#define M_WAITOK 0x0000
|
||||
#define M_NOWAIT 0x0001 /* do not block */
|
||||
#define M_USE_RESERVE 0x0002 /* can alloc out of reserve memory */
|
||||
#define M_ASLEEP 0x0004 /* async sleep on failure */
|
||||
#define M_ZERO 0x0008 /* bzero the allocation */
|
||||
#define M_ZERO 0x0004 /* bzero the allocation */
|
||||
|
||||
#define M_MAGIC 877983977 /* time when first defined :-) */
|
||||
|
||||
|
@ -262,8 +262,6 @@ extern watchdog_tickle_fn wdog_tickler;
|
||||
int msleep __P((void *chan, struct mtx *mtx, int pri, const char *wmesg,
|
||||
int timo));
|
||||
#define tsleep(chan, pri, wmesg, timo) msleep(chan, NULL, pri, wmesg, timo)
|
||||
int asleep __P((void *chan, int pri, const char *wmesg, int timo));
|
||||
int await __P((int pri, int timo));
|
||||
void wakeup __P((void *chan));
|
||||
void wakeup_one __P((void *chan));
|
||||
|
||||
|
@ -987,32 +987,6 @@ vm_wait(void)
|
||||
splx(s);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_await: (also see VM_AWAIT macro)
|
||||
*
|
||||
* asleep on an event that will signal when free pages are available
|
||||
* for allocation.
|
||||
*/
|
||||
|
||||
void
|
||||
vm_await(void)
|
||||
{
|
||||
int s;
|
||||
|
||||
s = splvm();
|
||||
if (curproc == pageproc) {
|
||||
vm_pageout_pages_needed = 1;
|
||||
asleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0);
|
||||
} else {
|
||||
if (!vm_pages_needed) {
|
||||
vm_pages_needed++;
|
||||
wakeup(&vm_pages_needed);
|
||||
}
|
||||
asleep(&cnt.v_free_count, PVM, "vmwait", 0);
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_activate:
|
||||
*
|
||||
|
@ -100,9 +100,7 @@ extern int vm_pageout_deficit;
|
||||
|
||||
extern void pagedaemon_wakeup __P((void));
|
||||
#define VM_WAIT vm_wait()
|
||||
#define VM_AWAIT vm_await()
|
||||
extern void vm_wait __P((void));
|
||||
extern void vm_await __P((void));
|
||||
|
||||
#ifdef _KERNEL
|
||||
void vm_pageout_page __P((vm_page_t, vm_object_t));
|
||||
|
Loading…
Reference in New Issue
Block a user