mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-11 09:50:12 +00:00
Change the preemption code for software interrupt thread schedules and
mutex releases to not require flags for the cases when preemption is not allowed: The purpose of the MTX_NOSWITCH and SWI_NOSWITCH flags is to prevent switching to a higher priority thread on mutex releease and swi schedule, respectively when that switch is not safe. Now that the critical section API maintains a per-thread nesting count, the kernel can easily check whether or not it should switch without relying on flags from the programmer. This fixes a few bugs in that all current callers of swi_sched() used SWI_NOSWITCH, when in fact, only the ones called from fast interrupt handlers and the swi_sched of softclock needed this flag. Note that to ensure that swi_sched()'s in clock and fast interrupt handlers do not switch, these handlers have to be explicitly wrapped in critical_enter/exit pairs. Presently, just wrapping the handlers is sufficient, but in the future with the fully preemptive kernel, the interrupt must be EOI'd before critical_exit() is called. (critical_exit() can switch due to a deferred preemption in a fully preemptive kernel.) I've tested the changes to the interrupt code on i386 and alpha. I have not tested ia64, but the interrupt code is almost identical to the alpha code, so I expect it will work fine. PowerPC and ARM do not yet have interrupt code in the tree so they shouldn't be broken. Sparc64 is broken, but that's been ok'd by jake and tmm who will be fixing the interrupt code for sparc64 shortly. Reviewed by: peter Tested on: i386, alpha
This commit is contained in:
parent
422f61655f
commit
c86b6ff551
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=88900
@ -697,7 +697,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
|
||||
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
|
||||
map, links);
|
||||
busdma_swi_pending = 1;
|
||||
swi_sched(vm_ih, SWI_NOSWITCH);
|
||||
swi_sched(vm_ih, 0);
|
||||
}
|
||||
}
|
||||
mtx_unlock(&bounce_lock);
|
||||
|
@ -437,7 +437,9 @@ alpha_dispatch_intr(void *frame, unsigned long vector)
|
||||
*/
|
||||
ih = TAILQ_FIRST(&ithd->it_handlers);
|
||||
if ((ih->ih_flags & IH_FAST) != 0) {
|
||||
critical_enter();
|
||||
ih->ih_handler(ih->ih_argument);
|
||||
critical_exit();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -461,6 +463,7 @@ alpha_clock_interrupt(struct trapframe *framep)
|
||||
intrcnt[INTRCNT_CLOCK]++;
|
||||
#endif
|
||||
if (platform.clockintr) {
|
||||
critical_enter();
|
||||
#ifdef SMP
|
||||
/*
|
||||
* Only one processor drives the actual timer.
|
||||
@ -481,5 +484,6 @@ alpha_clock_interrupt(struct trapframe *framep)
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
#endif
|
||||
critical_exit();
|
||||
}
|
||||
}
|
||||
|
@ -46,6 +46,7 @@ IDTVEC(vec_name) ; \
|
||||
movl $KPSEL,%eax ; \
|
||||
mov %ax,%fs ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; \
|
||||
call critical_enter ; \
|
||||
movl PCPU(CURTHREAD),%ebx ; \
|
||||
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
@ -58,6 +59,7 @@ IDTVEC(vec_name) ; \
|
||||
lock ; \
|
||||
incl (%eax) ; \
|
||||
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
call critical_exit ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp doreti
|
||||
|
||||
|
@ -645,7 +645,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
|
||||
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
|
||||
map, links);
|
||||
busdma_swi_pending = 1;
|
||||
swi_sched(vm_ih, SWI_NOSWITCH);
|
||||
swi_sched(vm_ih, 0);
|
||||
}
|
||||
}
|
||||
splx(s);
|
||||
|
@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
call critical_enter ; \
|
||||
movl PCPU(CURTHREAD),%ebx ; \
|
||||
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
call critical_exit ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp doreti
|
||||
|
||||
|
@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
call critical_enter ; \
|
||||
movl PCPU(CURTHREAD),%ebx ; \
|
||||
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
call critical_exit ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp doreti
|
||||
|
||||
|
@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
call critical_enter ; \
|
||||
movl PCPU(CURTHREAD),%ebx ; \
|
||||
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
call critical_exit ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp doreti
|
||||
|
||||
|
@ -4762,13 +4762,13 @@ xpt_done(union ccb *done_ccb)
|
||||
TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
|
||||
sim_links.tqe);
|
||||
done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
|
||||
swi_sched(cambio_ih, SWI_NOSWITCH);
|
||||
swi_sched(cambio_ih, 0);
|
||||
break;
|
||||
case CAM_PERIPH_NET:
|
||||
TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
|
||||
sim_links.tqe);
|
||||
done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
|
||||
swi_sched(camnet_ih, SWI_NOSWITCH);
|
||||
swi_sched(camnet_ih, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ static void *taskqueue_acpi_ih;
|
||||
static void
|
||||
taskqueue_acpi_enqueue(void *context)
|
||||
{
|
||||
swi_sched(taskqueue_acpi_ih, SWI_NOSWITCH);
|
||||
swi_sched(taskqueue_acpi_ih, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1181,7 +1181,7 @@ siointr(unit)
|
||||
#ifndef SOFT_HOTCHAR
|
||||
if (line_status & CD1400_RDSR_SPECIAL
|
||||
&& com->hotchar != 0)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
|
||||
#endif
|
||||
#if 1 /* XXX "intelligent" PFO error handling would break O error handling */
|
||||
@ -1209,7 +1209,7 @@ siointr(unit)
|
||||
++com->bytes_in;
|
||||
#ifdef SOFT_HOTCHAR
|
||||
if (com->hotchar != 0 && recv_data == com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
ioptr = com->iptr;
|
||||
if (ioptr >= com->ibufend)
|
||||
@ -1259,7 +1259,7 @@ siointr(unit)
|
||||
if (com->hotchar != 0
|
||||
&& recv_data
|
||||
== com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
ioptr[0] = recv_data;
|
||||
ioptr[com->ierroff] = 0;
|
||||
@ -1274,7 +1274,7 @@ siointr(unit)
|
||||
#ifdef SOFT_HOTCHAR
|
||||
if (com->hotchar != 0
|
||||
&& recv_data == com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
} while (--count != 0);
|
||||
} else {
|
||||
@ -1299,7 +1299,7 @@ siointr(unit)
|
||||
#ifdef SOFT_HOTCHAR
|
||||
if (com->hotchar != 0
|
||||
&& recv_data == com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
ioptr[0] = recv_data;
|
||||
ioptr[com->ierroff] = 0;
|
||||
@ -1364,7 +1364,7 @@ siointr(unit)
|
||||
if (!(com->state & CS_CHECKMSR)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_CHECKMSR;
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
|
||||
#ifdef SOFT_CTS_OFLOW
|
||||
@ -1495,7 +1495,7 @@ siointr(unit)
|
||||
if (!(com->state & CS_ODONE)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_ODONE;
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
break;
|
||||
case ETC_BREAK_ENDED:
|
||||
@ -1509,7 +1509,7 @@ siointr(unit)
|
||||
if (!(com->extra_state & CSE_ODONE)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->extra_state |= CSE_ODONE;
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
cd_outb(iobase, CD1400_SRER, cy_align,
|
||||
com->intr_enable
|
||||
@ -1567,7 +1567,7 @@ siointr(unit)
|
||||
com->state |= CS_ODONE;
|
||||
|
||||
/* handle at high level ASAP */
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1587,7 +1587,7 @@ siointr(unit)
|
||||
/* ensure an edge for the next interrupt */
|
||||
cy_outb(cy_iobase, CY_CLEAR_INTR, cy_align, 0);
|
||||
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
|
||||
COM_UNLOCK();
|
||||
}
|
||||
|
@ -1181,7 +1181,7 @@ siointr(unit)
|
||||
#ifndef SOFT_HOTCHAR
|
||||
if (line_status & CD1400_RDSR_SPECIAL
|
||||
&& com->hotchar != 0)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
|
||||
#endif
|
||||
#if 1 /* XXX "intelligent" PFO error handling would break O error handling */
|
||||
@ -1209,7 +1209,7 @@ siointr(unit)
|
||||
++com->bytes_in;
|
||||
#ifdef SOFT_HOTCHAR
|
||||
if (com->hotchar != 0 && recv_data == com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
ioptr = com->iptr;
|
||||
if (ioptr >= com->ibufend)
|
||||
@ -1259,7 +1259,7 @@ siointr(unit)
|
||||
if (com->hotchar != 0
|
||||
&& recv_data
|
||||
== com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
ioptr[0] = recv_data;
|
||||
ioptr[com->ierroff] = 0;
|
||||
@ -1274,7 +1274,7 @@ siointr(unit)
|
||||
#ifdef SOFT_HOTCHAR
|
||||
if (com->hotchar != 0
|
||||
&& recv_data == com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
} while (--count != 0);
|
||||
} else {
|
||||
@ -1299,7 +1299,7 @@ siointr(unit)
|
||||
#ifdef SOFT_HOTCHAR
|
||||
if (com->hotchar != 0
|
||||
&& recv_data == com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
ioptr[0] = recv_data;
|
||||
ioptr[com->ierroff] = 0;
|
||||
@ -1364,7 +1364,7 @@ siointr(unit)
|
||||
if (!(com->state & CS_CHECKMSR)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_CHECKMSR;
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
|
||||
#ifdef SOFT_CTS_OFLOW
|
||||
@ -1495,7 +1495,7 @@ siointr(unit)
|
||||
if (!(com->state & CS_ODONE)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_ODONE;
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
break;
|
||||
case ETC_BREAK_ENDED:
|
||||
@ -1509,7 +1509,7 @@ siointr(unit)
|
||||
if (!(com->extra_state & CSE_ODONE)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->extra_state |= CSE_ODONE;
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
cd_outb(iobase, CD1400_SRER, cy_align,
|
||||
com->intr_enable
|
||||
@ -1567,7 +1567,7 @@ siointr(unit)
|
||||
com->state |= CS_ODONE;
|
||||
|
||||
/* handle at high level ASAP */
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1587,7 +1587,7 @@ siointr(unit)
|
||||
/* ensure an edge for the next interrupt */
|
||||
cy_outb(cy_iobase, CY_CLEAR_INTR, cy_align, 0);
|
||||
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
|
||||
COM_UNLOCK();
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ rcintr(unit)
|
||||
optr++;
|
||||
rc_scheduled_event++;
|
||||
if (val != 0 && val == rc->rc_hotchar)
|
||||
swi_sched(rc_ih, SWI_NOSWITCH);
|
||||
swi_sched(rc_ih, 0);
|
||||
}
|
||||
} else {
|
||||
/* Store also status data */
|
||||
@ -393,7 +393,7 @@ rcintr(unit)
|
||||
&& (rc->rc_tp->t_iflag & INPCK))))
|
||||
val = 0;
|
||||
else if (val != 0 && val == rc->rc_hotchar)
|
||||
swi_sched(rc_ih, SWI_NOSWITCH);
|
||||
swi_sched(rc_ih, 0);
|
||||
optr[0] = val;
|
||||
optr[INPUT_FLAGS_SHIFT] = iack;
|
||||
optr++;
|
||||
@ -440,7 +440,7 @@ rcintr(unit)
|
||||
if ((iack & MCR_CDchg) && !(rc->rc_flags & RC_MODCHG)) {
|
||||
rc_scheduled_event += LOTS_OF_EVENTS;
|
||||
rc->rc_flags |= RC_MODCHG;
|
||||
swi_sched(rc_ih, SWI_NOSWITCH);
|
||||
swi_sched(rc_ih, 0);
|
||||
}
|
||||
goto more_intrs;
|
||||
}
|
||||
@ -481,7 +481,7 @@ rcintr(unit)
|
||||
if (!(rc->rc_flags & RC_DOXXFER)) {
|
||||
rc_scheduled_event += LOTS_OF_EVENTS;
|
||||
rc->rc_flags |= RC_DOXXFER;
|
||||
swi_sched(rc_ih, SWI_NOSWITCH);
|
||||
swi_sched(rc_ih, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1748,7 +1748,7 @@ siointr1(com)
|
||||
}
|
||||
++com->bytes_in;
|
||||
if (com->hotchar != 0 && recv_data == com->hotchar)
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
ioptr = com->iptr;
|
||||
if (ioptr >= com->ibufend)
|
||||
CE_RECORD(com, CE_INTERRUPT_BUF_OVERFLOW);
|
||||
@ -1759,7 +1759,7 @@ siointr1(com)
|
||||
swi_sched(sio_slow_ih, SWI_DELAY);
|
||||
#if 0 /* for testing input latency vs efficiency */
|
||||
if (com->iptr - com->ibuf == 8)
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
#endif
|
||||
ioptr[0] = recv_data;
|
||||
ioptr[com->ierroff] = line_status;
|
||||
@ -1797,7 +1797,7 @@ if (com->iptr - com->ibuf == 8)
|
||||
if (!(com->state & CS_CHECKMSR)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_CHECKMSR;
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
}
|
||||
|
||||
/* handle CTS change immediately for crisp flow ctl */
|
||||
@ -1852,7 +1852,7 @@ if (com->iptr - com->ibuf == 8)
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_ODONE;
|
||||
/* handle at high level ASAP */
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
}
|
||||
}
|
||||
if (COM_IIR_TXRDYBUG(com->flags) && (int_ctl != int_ctl_new)) {
|
||||
|
@ -46,6 +46,7 @@ IDTVEC(vec_name) ; \
|
||||
movl $KPSEL,%eax ; \
|
||||
mov %ax,%fs ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; \
|
||||
call critical_enter ; \
|
||||
movl PCPU(CURTHREAD),%ebx ; \
|
||||
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
@ -58,6 +59,7 @@ IDTVEC(vec_name) ; \
|
||||
lock ; \
|
||||
incl (%eax) ; \
|
||||
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
call critical_exit ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp doreti
|
||||
|
||||
|
@ -645,7 +645,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
|
||||
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
|
||||
map, links);
|
||||
busdma_swi_pending = 1;
|
||||
swi_sched(vm_ih, SWI_NOSWITCH);
|
||||
swi_sched(vm_ih, 0);
|
||||
}
|
||||
}
|
||||
splx(s);
|
||||
|
@ -46,6 +46,7 @@ IDTVEC(vec_name) ; \
|
||||
movl $KPSEL,%eax ; \
|
||||
mov %ax,%fs ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; \
|
||||
call critical_enter ; \
|
||||
movl PCPU(CURTHREAD),%ebx ; \
|
||||
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
@ -58,6 +59,7 @@ IDTVEC(vec_name) ; \
|
||||
lock ; \
|
||||
incl (%eax) ; \
|
||||
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
call critical_exit ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp doreti
|
||||
|
||||
|
@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
call critical_enter ; \
|
||||
movl PCPU(CURTHREAD),%ebx ; \
|
||||
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
call critical_exit ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp doreti
|
||||
|
||||
|
@ -1181,7 +1181,7 @@ siointr(unit)
|
||||
#ifndef SOFT_HOTCHAR
|
||||
if (line_status & CD1400_RDSR_SPECIAL
|
||||
&& com->hotchar != 0)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
|
||||
#endif
|
||||
#if 1 /* XXX "intelligent" PFO error handling would break O error handling */
|
||||
@ -1209,7 +1209,7 @@ siointr(unit)
|
||||
++com->bytes_in;
|
||||
#ifdef SOFT_HOTCHAR
|
||||
if (com->hotchar != 0 && recv_data == com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
ioptr = com->iptr;
|
||||
if (ioptr >= com->ibufend)
|
||||
@ -1259,7 +1259,7 @@ siointr(unit)
|
||||
if (com->hotchar != 0
|
||||
&& recv_data
|
||||
== com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
ioptr[0] = recv_data;
|
||||
ioptr[com->ierroff] = 0;
|
||||
@ -1274,7 +1274,7 @@ siointr(unit)
|
||||
#ifdef SOFT_HOTCHAR
|
||||
if (com->hotchar != 0
|
||||
&& recv_data == com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
} while (--count != 0);
|
||||
} else {
|
||||
@ -1299,7 +1299,7 @@ siointr(unit)
|
||||
#ifdef SOFT_HOTCHAR
|
||||
if (com->hotchar != 0
|
||||
&& recv_data == com->hotchar)
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
#endif
|
||||
ioptr[0] = recv_data;
|
||||
ioptr[com->ierroff] = 0;
|
||||
@ -1364,7 +1364,7 @@ siointr(unit)
|
||||
if (!(com->state & CS_CHECKMSR)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_CHECKMSR;
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
|
||||
#ifdef SOFT_CTS_OFLOW
|
||||
@ -1495,7 +1495,7 @@ siointr(unit)
|
||||
if (!(com->state & CS_ODONE)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_ODONE;
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
break;
|
||||
case ETC_BREAK_ENDED:
|
||||
@ -1509,7 +1509,7 @@ siointr(unit)
|
||||
if (!(com->extra_state & CSE_ODONE)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->extra_state |= CSE_ODONE;
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
cd_outb(iobase, CD1400_SRER, cy_align,
|
||||
com->intr_enable
|
||||
@ -1567,7 +1567,7 @@ siointr(unit)
|
||||
com->state |= CS_ODONE;
|
||||
|
||||
/* handle at high level ASAP */
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1587,7 +1587,7 @@ siointr(unit)
|
||||
/* ensure an edge for the next interrupt */
|
||||
cy_outb(cy_iobase, CY_CLEAR_INTR, cy_align, 0);
|
||||
|
||||
swi_sched(sio_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_ih, 0);
|
||||
|
||||
COM_UNLOCK();
|
||||
}
|
||||
|
@ -61,6 +61,7 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
call critical_enter ; \
|
||||
movl PCPU(CURTHREAD),%ebx ; \
|
||||
incl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
@ -71,6 +72,7 @@ IDTVEC(vec_name) ; \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl TD_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
call critical_exit ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp doreti
|
||||
|
||||
|
@ -362,7 +362,7 @@ rcintr(unit)
|
||||
optr++;
|
||||
rc_scheduled_event++;
|
||||
if (val != 0 && val == rc->rc_hotchar)
|
||||
swi_sched(rc_ih, SWI_NOSWITCH);
|
||||
swi_sched(rc_ih, 0);
|
||||
}
|
||||
} else {
|
||||
/* Store also status data */
|
||||
@ -393,7 +393,7 @@ rcintr(unit)
|
||||
&& (rc->rc_tp->t_iflag & INPCK))))
|
||||
val = 0;
|
||||
else if (val != 0 && val == rc->rc_hotchar)
|
||||
swi_sched(rc_ih, SWI_NOSWITCH);
|
||||
swi_sched(rc_ih, 0);
|
||||
optr[0] = val;
|
||||
optr[INPUT_FLAGS_SHIFT] = iack;
|
||||
optr++;
|
||||
@ -440,7 +440,7 @@ rcintr(unit)
|
||||
if ((iack & MCR_CDchg) && !(rc->rc_flags & RC_MODCHG)) {
|
||||
rc_scheduled_event += LOTS_OF_EVENTS;
|
||||
rc->rc_flags |= RC_MODCHG;
|
||||
swi_sched(rc_ih, SWI_NOSWITCH);
|
||||
swi_sched(rc_ih, 0);
|
||||
}
|
||||
goto more_intrs;
|
||||
}
|
||||
@ -481,7 +481,7 @@ rcintr(unit)
|
||||
if (!(rc->rc_flags & RC_DOXXFER)) {
|
||||
rc_scheduled_event += LOTS_OF_EVENTS;
|
||||
rc->rc_flags |= RC_DOXXFER;
|
||||
swi_sched(rc_ih, SWI_NOSWITCH);
|
||||
swi_sched(rc_ih, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -689,7 +689,7 @@ free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
|
||||
STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
|
||||
map, links);
|
||||
busdma_swi_pending = 1;
|
||||
swi_sched(vm_ih, SWI_NOSWITCH);
|
||||
swi_sched(vm_ih, 0);
|
||||
}
|
||||
}
|
||||
splx(s);
|
||||
|
@ -114,11 +114,13 @@ interrupt(u_int64_t vector, struct trapframe *framep)
|
||||
#else
|
||||
intrcnt[INTRCNT_CLOCK]++;
|
||||
#endif
|
||||
critical_enter();
|
||||
handleclock(framep);
|
||||
|
||||
/* divide hz (1024) by 8 to get stathz (128) */
|
||||
if((++schedclk2 & 0x7) == 0)
|
||||
statclock((struct clockframe *)framep);
|
||||
critical_exit();
|
||||
#ifdef SMP
|
||||
} else if (vector == mp_ipi_vector[IPI_AST]) {
|
||||
ast(framep);
|
||||
@ -317,8 +319,10 @@ ia64_dispatch_intr(void *frame, unsigned long vector)
|
||||
*/
|
||||
ih = TAILQ_FIRST(&ithd->it_handlers);
|
||||
if ((ih->ih_flags & IH_FAST) != 0) {
|
||||
critical_enter();
|
||||
ih->ih_handler(ih->ih_argument);
|
||||
ia64_send_eoi(vector);
|
||||
critical_exit();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -242,7 +242,7 @@ hardclock(frame)
|
||||
* callout_lock held; incorrect locking order.
|
||||
*/
|
||||
if (need_softclock)
|
||||
swi_sched(softclock_ih, SWI_NOSWITCH);
|
||||
swi_sched(softclock_ih, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -145,7 +145,7 @@ cv_switch_catch(struct thread *td)
|
||||
PROC_LOCK(p);
|
||||
sig = CURSIG(p); /* XXXKSE */
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
PROC_UNLOCK(p);
|
||||
if (sig != 0) {
|
||||
if (td->td_wchan != NULL)
|
||||
cv_waitq_remove(td);
|
||||
@ -218,8 +218,8 @@ cv_wait(struct cv *cvp, struct mtx *mp)
|
||||
}
|
||||
CV_WAIT_VALIDATE(cvp, mp);
|
||||
|
||||
DROP_GIANT_NOSWITCH();
|
||||
mtx_unlock_flags(mp, MTX_NOSWITCH);
|
||||
DROP_GIANT();
|
||||
mtx_unlock(mp);
|
||||
|
||||
cv_waitq_add(cvp, td);
|
||||
cv_switch(td);
|
||||
@ -273,8 +273,8 @@ cv_wait_sig(struct cv *cvp, struct mtx *mp)
|
||||
}
|
||||
CV_WAIT_VALIDATE(cvp, mp);
|
||||
|
||||
DROP_GIANT_NOSWITCH();
|
||||
mtx_unlock_flags(mp, MTX_NOSWITCH);
|
||||
DROP_GIANT();
|
||||
mtx_unlock(mp);
|
||||
|
||||
cv_waitq_add(cvp, td);
|
||||
sig = cv_switch_catch(td);
|
||||
@ -339,8 +339,8 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
|
||||
}
|
||||
CV_WAIT_VALIDATE(cvp, mp);
|
||||
|
||||
DROP_GIANT_NOSWITCH();
|
||||
mtx_unlock_flags(mp, MTX_NOSWITCH);
|
||||
DROP_GIANT();
|
||||
mtx_unlock(mp);
|
||||
|
||||
cv_waitq_add(cvp, td);
|
||||
callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
|
||||
@ -412,8 +412,8 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
|
||||
}
|
||||
CV_WAIT_VALIDATE(cvp, mp);
|
||||
|
||||
DROP_GIANT_NOSWITCH();
|
||||
mtx_unlock_flags(mp, MTX_NOSWITCH);
|
||||
DROP_GIANT();
|
||||
mtx_unlock(mp);
|
||||
|
||||
cv_waitq_add(cvp, td);
|
||||
callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
|
||||
|
@ -402,7 +402,7 @@ exit1(td, rv)
|
||||
PROC_LOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
while (mtx_owned(&Giant))
|
||||
mtx_unlock_flags(&Giant, MTX_NOSWITCH);
|
||||
mtx_unlock(&Giant);
|
||||
|
||||
/*
|
||||
* We have to wait until after releasing all locks before
|
||||
@ -413,7 +413,7 @@ exit1(td, rv)
|
||||
p->p_stat = SZOMB;
|
||||
|
||||
wakeup(p->p_pptr);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
cnt.v_swtch++;
|
||||
cpu_throw();
|
||||
|
@ -381,9 +381,9 @@ ithread_schedule(struct ithd *ithread, int do_switch)
|
||||
* Set it_need to tell the thread to keep running if it is already
|
||||
* running. Then, grab sched_lock and see if we actually need to
|
||||
* put this thread on the runqueue. If so and the do_switch flag is
|
||||
* true, then switch to the ithread immediately. Otherwise, set the
|
||||
* needresched flag to guarantee that this ithread will run before any
|
||||
* userland processes.
|
||||
* true and it is safe to switch, then switch to the ithread
|
||||
* immediately. Otherwise, set the needresched flag to guarantee
|
||||
* that this ithread will run before any userland processes.
|
||||
*/
|
||||
ithread->it_need = 1;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
@ -391,7 +391,8 @@ ithread_schedule(struct ithd *ithread, int do_switch)
|
||||
CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
|
||||
p->p_stat = SRUN;
|
||||
setrunqueue(td); /* XXXKSE */
|
||||
if (do_switch && curthread->td_proc->p_stat == SRUN) {
|
||||
if (do_switch && curthread->td_critnest == 1 &&
|
||||
curthread->td_proc->p_stat == SRUN) {
|
||||
if (curthread != PCPU_GET(idlethread))
|
||||
setrunqueue(curthread);
|
||||
curthread->td_proc->p_stats->p_ru.ru_nivcsw++;
|
||||
@ -458,7 +459,7 @@ swi_sched(void *cookie, int flags)
|
||||
*/
|
||||
atomic_store_rel_int(&ih->ih_need, 1);
|
||||
if (!(flags & SWI_DELAY)) {
|
||||
error = ithread_schedule(it, !cold && flags & SWI_SWITCH);
|
||||
error = ithread_schedule(it, !cold);
|
||||
KASSERT(error == 0, ("stray software interrupt"));
|
||||
}
|
||||
}
|
||||
@ -580,7 +581,7 @@ SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
|
||||
void
|
||||
legacy_setsoftnet(void)
|
||||
{
|
||||
swi_sched(net_ih, SWI_NOSWITCH);
|
||||
swi_sched(net_ih, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -209,8 +209,6 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
|
||||
{
|
||||
|
||||
MPASS(curthread != NULL);
|
||||
KASSERT((opts & MTX_NOSWITCH) == 0,
|
||||
("MTX_NOSWITCH used at %s:%d", file, line));
|
||||
_get_sleep_lock(m, curthread, opts, file, line);
|
||||
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
|
||||
line);
|
||||
@ -264,12 +262,6 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
|
||||
|
||||
MPASS(curthread != NULL);
|
||||
|
||||
/*
|
||||
* _mtx_trylock does not accept MTX_NOSWITCH option.
|
||||
*/
|
||||
KASSERT((opts & MTX_NOSWITCH) == 0,
|
||||
("mtx_trylock() called with invalid option flag(s) %d", opts));
|
||||
|
||||
rval = _obtain_lock(m, curthread);
|
||||
|
||||
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
|
||||
@ -524,7 +516,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
td1->td_proc->p_stat = SRUN;
|
||||
setrunqueue(td1);
|
||||
|
||||
if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
|
||||
if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) {
|
||||
#ifdef notyet
|
||||
if (td->td_ithd != NULL) {
|
||||
struct ithd *it = td->td_ithd;
|
||||
@ -691,8 +683,8 @@ mtx_destroy(struct mtx *m)
|
||||
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
|
||||
|
||||
/* Tell witness this isn't locked to make it happy. */
|
||||
WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
|
||||
__FILE__, __LINE__);
|
||||
WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
|
||||
__LINE__);
|
||||
}
|
||||
|
||||
WITNESS_DESTROY(&m->mtx_object);
|
||||
|
@ -268,7 +268,7 @@ boot(int howto)
|
||||
pbusy = nbusy;
|
||||
sync(thread0, NULL);
|
||||
if (curthread != NULL) {
|
||||
DROP_GIANT_NOSWITCH();
|
||||
DROP_GIANT();
|
||||
for (subiter = 0; subiter < 50 * iter; subiter++) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
setrunqueue(curthread);
|
||||
|
@ -1560,8 +1560,8 @@ issignal(p)
|
||||
do {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
stop(p);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
DROP_GIANT_NOSWITCH();
|
||||
PROC_UNLOCK(p);
|
||||
DROP_GIANT();
|
||||
p->p_stats->p_ru.ru_nivcsw++;
|
||||
mi_switch();
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -1639,8 +1639,8 @@ issignal(p)
|
||||
}
|
||||
mtx_lock_spin(&sched_lock);
|
||||
stop(p);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
DROP_GIANT_NOSWITCH();
|
||||
PROC_UNLOCK(p);
|
||||
DROP_GIANT();
|
||||
p->p_stats->p_ru.ru_nivcsw++;
|
||||
mi_switch();
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
@ -386,7 +386,7 @@ uio_yield()
|
||||
|
||||
td = curthread;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
DROP_GIANT_NOSWITCH();
|
||||
DROP_GIANT();
|
||||
td->td_ksegrp->kg_pri.pri_level = td->td_ksegrp->kg_pri.pri_user;
|
||||
setrunqueue(td);
|
||||
td->td_proc->p_stats->p_ru.ru_nivcsw++;
|
||||
|
@ -437,17 +437,17 @@ msleep(ident, mtx, priority, wmesg, timo)
|
||||
* in case this is the idle process and already asleep.
|
||||
*/
|
||||
if (mtx != NULL && priority & PDROP)
|
||||
mtx_unlock_flags(mtx, MTX_NOSWITCH);
|
||||
mtx_unlock(mtx);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return (0);
|
||||
}
|
||||
|
||||
DROP_GIANT_NOSWITCH();
|
||||
DROP_GIANT();
|
||||
|
||||
if (mtx != NULL) {
|
||||
mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
|
||||
WITNESS_SAVE(&mtx->mtx_object, mtx);
|
||||
mtx_unlock_flags(mtx, MTX_NOSWITCH);
|
||||
mtx_unlock(mtx);
|
||||
if (priority & PDROP)
|
||||
mtx = NULL;
|
||||
}
|
||||
@ -482,7 +482,7 @@ msleep(ident, mtx, priority, wmesg, timo)
|
||||
PROC_LOCK(p);
|
||||
sig = CURSIG(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
PROC_UNLOCK(p);
|
||||
if (sig != 0) {
|
||||
if (td->td_wchan != NULL)
|
||||
unsleep(td);
|
||||
@ -750,13 +750,13 @@ mi_switch()
|
||||
PROC_LOCK(p);
|
||||
killproc(p, "exceeded maximum CPU limit");
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
PROC_UNLOCK(p);
|
||||
} else {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGXCPU);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
PROC_UNLOCK(p);
|
||||
if (rlim->rlim_cur < rlim->rlim_max) {
|
||||
/* XXX: we should make a private copy */
|
||||
rlim->rlim_cur += 5;
|
||||
|
@ -209,7 +209,7 @@ taskqueue_run(struct taskqueue *queue)
|
||||
static void
|
||||
taskqueue_swi_enqueue(void *context)
|
||||
{
|
||||
swi_sched(taskqueue_ih, SWI_NOSWITCH);
|
||||
swi_sched(taskqueue_ih, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -82,7 +82,7 @@ userret(td, frame, oticks)
|
||||
mtx_lock_spin(&sched_lock);
|
||||
kg->kg_pri.pri_level = kg->kg_pri.pri_user;
|
||||
if (ke->ke_flags & KEF_NEEDRESCHED) {
|
||||
DROP_GIANT_NOSWITCH();
|
||||
DROP_GIANT();
|
||||
setrunqueue(td);
|
||||
p->p_stats->p_ru.ru_nivcsw++;
|
||||
mi_switch();
|
||||
|
@ -209,8 +209,6 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
|
||||
{
|
||||
|
||||
MPASS(curthread != NULL);
|
||||
KASSERT((opts & MTX_NOSWITCH) == 0,
|
||||
("MTX_NOSWITCH used at %s:%d", file, line));
|
||||
_get_sleep_lock(m, curthread, opts, file, line);
|
||||
LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
|
||||
line);
|
||||
@ -264,12 +262,6 @@ _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
|
||||
|
||||
MPASS(curthread != NULL);
|
||||
|
||||
/*
|
||||
* _mtx_trylock does not accept MTX_NOSWITCH option.
|
||||
*/
|
||||
KASSERT((opts & MTX_NOSWITCH) == 0,
|
||||
("mtx_trylock() called with invalid option flag(s) %d", opts));
|
||||
|
||||
rval = _obtain_lock(m, curthread);
|
||||
|
||||
LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
|
||||
@ -524,7 +516,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
|
||||
td1->td_proc->p_stat = SRUN;
|
||||
setrunqueue(td1);
|
||||
|
||||
if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
|
||||
if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) {
|
||||
#ifdef notyet
|
||||
if (td->td_ithd != NULL) {
|
||||
struct ithd *it = td->td_ithd;
|
||||
@ -691,8 +683,8 @@ mtx_destroy(struct mtx *m)
|
||||
MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
|
||||
|
||||
/* Tell witness this isn't locked to make it happy. */
|
||||
WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
|
||||
__FILE__, __LINE__);
|
||||
WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
|
||||
__LINE__);
|
||||
}
|
||||
|
||||
WITNESS_DESTROY(&m->mtx_object);
|
||||
|
@ -818,7 +818,7 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
|
||||
instance->li_lock->lo_name,
|
||||
instance->li_flags);
|
||||
instance->li_flags--;
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
s = cpu_critical_enter();
|
||||
CTR4(KTR_WITNESS,
|
||||
@ -839,23 +839,11 @@ witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
|
||||
td->td_proc->p_pid, lle);
|
||||
witness_lock_list_free(lle);
|
||||
}
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
}
|
||||
panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
|
||||
file, line);
|
||||
out:
|
||||
/*
|
||||
* We don't need to protect this PCPU_GET() here against preemption
|
||||
* because if we hold any spinlocks then we are already protected,
|
||||
* and if we don't we will get NULL if we hold no spinlocks even if
|
||||
* we switch CPU's while reading it.
|
||||
*/
|
||||
if (class->lc_flags & LC_SLEEPLOCK) {
|
||||
if ((flags & LOP_NOSWITCH) == 0 && PCPU_GET(spinlocks) != NULL)
|
||||
panic("switchable sleep unlock (%s) %s @ %s:%d",
|
||||
class->lc_name, lock->lo_name, file, line);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2700,7 +2700,7 @@ status_read:;
|
||||
}
|
||||
++com->bytes_in;
|
||||
if (com->hotchar != 0 && recv_data == com->hotchar)
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
ioptr = com->iptr;
|
||||
if (ioptr >= com->ibufend)
|
||||
CE_RECORD(com, CE_INTERRUPT_BUF_OVERFLOW);
|
||||
@ -2711,7 +2711,7 @@ status_read:;
|
||||
swi_sched(sio_slow_ih, SWI_DELAY);
|
||||
#if 0 /* for testing input latency vs efficiency */
|
||||
if (com->iptr - com->ibuf == 8)
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
#endif
|
||||
ioptr[0] = recv_data;
|
||||
ioptr[com->ierroff] = line_status;
|
||||
@ -2765,7 +2765,7 @@ if (com->iptr - com->ibuf == 8)
|
||||
if (!(com->state & CS_CHECKMSR)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_CHECKMSR;
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
}
|
||||
|
||||
/* handle CTS change immediately for crisp flow ctl */
|
||||
@ -2868,7 +2868,7 @@ if (com->iptr - com->ibuf == 8)
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_ODONE;
|
||||
/* handle at high level ASAP */
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
}
|
||||
}
|
||||
if (COM_IIR_TXRDYBUG(com->flags) && (int_ctl != int_ctl_new)) {
|
||||
|
@ -2700,7 +2700,7 @@ status_read:;
|
||||
}
|
||||
++com->bytes_in;
|
||||
if (com->hotchar != 0 && recv_data == com->hotchar)
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
ioptr = com->iptr;
|
||||
if (ioptr >= com->ibufend)
|
||||
CE_RECORD(com, CE_INTERRUPT_BUF_OVERFLOW);
|
||||
@ -2711,7 +2711,7 @@ status_read:;
|
||||
swi_sched(sio_slow_ih, SWI_DELAY);
|
||||
#if 0 /* for testing input latency vs efficiency */
|
||||
if (com->iptr - com->ibuf == 8)
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
#endif
|
||||
ioptr[0] = recv_data;
|
||||
ioptr[com->ierroff] = line_status;
|
||||
@ -2765,7 +2765,7 @@ if (com->iptr - com->ibuf == 8)
|
||||
if (!(com->state & CS_CHECKMSR)) {
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_CHECKMSR;
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
}
|
||||
|
||||
/* handle CTS change immediately for crisp flow ctl */
|
||||
@ -2868,7 +2868,7 @@ if (com->iptr - com->ibuf == 8)
|
||||
com_events += LOTS_OF_EVENTS;
|
||||
com->state |= CS_ODONE;
|
||||
/* handle at high level ASAP */
|
||||
swi_sched(sio_fast_ih, SWI_NOSWITCH);
|
||||
swi_sched(sio_fast_ih, 0);
|
||||
}
|
||||
}
|
||||
if (COM_IIR_TXRDYBUG(com->flags) && (int_ctl != int_ctl_new)) {
|
||||
|
@ -82,9 +82,7 @@ struct ithd {
|
||||
#define IT_DEAD 0x000004 /* Thread is waiting to exit. */
|
||||
|
||||
/* Flags to pass to sched_swi. */
|
||||
#define SWI_NOSWITCH 0x0
|
||||
#define SWI_SWITCH 0x1
|
||||
#define SWI_DELAY 0x2 /* implies NOSWITCH */
|
||||
#define SWI_DELAY 0x2
|
||||
|
||||
/*
|
||||
* Software interrupt bit numbers in priority order. The priority only
|
||||
|
@ -78,7 +78,6 @@ struct lock_class {
|
||||
* Option flags passed to lock operations that witness also needs to know
|
||||
* about or that are generic across all locks.
|
||||
*/
|
||||
#define LOP_NOSWITCH 0x00000001 /* Lock doesn't switch on release. */
|
||||
#define LOP_QUIET 0x00000002 /* Don't log locking operations. */
|
||||
#define LOP_TRYLOCK 0x00000004 /* Don't check lock order. */
|
||||
#define LOP_EXCLUSIVE 0x00000008 /* Exclusive lock. */
|
||||
|
@ -62,7 +62,6 @@
|
||||
* Option flags passed to certain lock/unlock routines, through the use
|
||||
* of corresponding mtx_{lock,unlock}_flags() interface macros.
|
||||
*/
|
||||
#define MTX_NOSWITCH LOP_NOSWITCH /* Do not switch on release */
|
||||
#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */
|
||||
|
||||
/*
|
||||
@ -214,7 +213,7 @@ void mtx_unlock_giant(int s);
|
||||
* mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
|
||||
* and passes option flags `opts' to the "hard" function, if required.
|
||||
* With these routines, it is possible to pass flags such as MTX_QUIET
|
||||
* and/or MTX_NOSWITCH to the appropriate lock manipulation routines.
|
||||
* to the appropriate lock manipulation routines.
|
||||
*
|
||||
* mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
|
||||
* it cannot. Rather, it returns 0 on failure and non-zero on success.
|
||||
@ -294,16 +293,6 @@ extern int kern_giant_file;
|
||||
*
|
||||
* Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
|
||||
*/
|
||||
#define DROP_GIANT_NOSWITCH() \
|
||||
do { \
|
||||
int _giantcnt; \
|
||||
WITNESS_SAVE_DECL(Giant); \
|
||||
\
|
||||
if (mtx_owned(&Giant)) \
|
||||
WITNESS_SAVE(&Giant.mtx_object, Giant); \
|
||||
for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
|
||||
mtx_unlock_flags(&Giant, MTX_NOSWITCH)
|
||||
|
||||
#define DROP_GIANT() \
|
||||
do { \
|
||||
int _giantcnt; \
|
||||
|
@ -582,8 +582,6 @@ sigonstack(size_t sp)
|
||||
#define PROC_LOCK(p) mtx_lock(&(p)->p_mtx)
|
||||
#define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx)
|
||||
#define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx)
|
||||
#define PROC_UNLOCK_NOSWITCH(p) \
|
||||
mtx_unlock_flags(&(p)->p_mtx, MTX_NOSWITCH)
|
||||
#define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx)
|
||||
#define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type))
|
||||
|
||||
|
@ -630,7 +630,7 @@ swapout(p)
|
||||
mtx_lock_spin(&sched_lock);
|
||||
p->p_sflag &= ~PS_INMEM;
|
||||
p->p_sflag |= PS_SWAPPING;
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
PROC_UNLOCK(p);
|
||||
FOREACH_THREAD_IN_PROC (p, td)
|
||||
if (td->td_proc->p_stat == SRUN) /* XXXKSE */
|
||||
remrunqueue(td); /* XXXKSE */
|
||||
|
Loading…
Reference in New Issue
Block a user