2003-11-03 21:25:52 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __MACHINE_INTR_MACHDEP_H__
|
|
|
|
#define __MACHINE_INTR_MACHDEP_H__
|
|
|
|
|
|
|
|
#ifdef _KERNEL
|
|
|
|
|
2005-11-02 20:11:47 +00:00
|
|
|
/*
|
|
|
|
* The maximum number of I/O interrupts we allow. This number is rather
|
|
|
|
* arbitrary as it is just the maximum IRQ resource value. The interrupt
|
|
|
|
* source for a given IRQ maps that I/O interrupt to device interrupt
|
|
|
|
* source whether it be a pin on an interrupt controller or an MSI interrupt.
|
|
|
|
* The 16 ISA IRQs are assigned fixed IDT vectors, but all other device
|
|
|
|
* interrupts allocate IDT vectors on demand. Currently we have 191 IDT
|
|
|
|
* vectors available for device interrupts. On many systems with I/O APICs,
|
|
|
|
* a lot of the IRQs are not used, so this number can be much larger than
|
|
|
|
* 191 and still be safe since only interrupt sources in actual use will
|
|
|
|
* allocate IDT vectors.
|
|
|
|
*
|
|
|
|
* For now we stick with 255 as ISA IRQs and PCI intline IRQs only allow
|
|
|
|
* for IRQs in the range 0 - 254. When MSI support is added this number
|
|
|
|
* will likely increase.
|
|
|
|
*/
|
|
|
|
#define NUM_IO_INTS 255
|
|
|
|
|
|
|
|
/*
|
|
|
|
* - 1 ??? dummy counter.
|
|
|
|
* - 2 counters for each I/O interrupt.
|
|
|
|
* - 1 counter for each CPU for lapic timer.
|
|
|
|
* - 7 counters for each CPU for IPI counters for SMP.
|
|
|
|
*/
|
|
|
|
#ifdef SMP
|
|
|
|
#define INTRCNT_COUNT (1 + NUM_IO_INTS * 2 + (1 + 7) * MAXCPU)
|
2006-10-10 19:26:35 +00:00
|
|
|
#else
|
|
|
|
#define INTRCNT_COUNT (1 + NUM_IO_INTS * 2 + 1)
|
2005-11-02 20:11:47 +00:00
|
|
|
#endif
|
2003-11-03 21:25:52 +00:00
|
|
|
|
|
|
|
#ifndef LOCORE
|
|
|
|
|
|
|
|
typedef void inthand_t(u_int cs, u_int ef, u_int esp, u_int ss);
|
|
|
|
|
|
|
|
#define IDTVEC(name) __CONCAT(X,name)
|
|
|
|
|
|
|
|
struct intsrc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Methods that a PIC provides to mask/unmask a given interrupt source,
|
|
|
|
* "turn on" the interrupt on the CPU side by setting up an IDT entry, and
|
|
|
|
* return the vector associated with this source.
|
|
|
|
*/
|
|
|
|
struct pic {
|
|
|
|
void (*pic_enable_source)(struct intsrc *);
|
2004-08-02 15:31:10 +00:00
|
|
|
void (*pic_disable_source)(struct intsrc *, int);
|
2003-11-03 21:25:52 +00:00
|
|
|
void (*pic_eoi_source)(struct intsrc *);
|
|
|
|
void (*pic_enable_intr)(struct intsrc *);
|
|
|
|
int (*pic_vector)(struct intsrc *);
|
|
|
|
int (*pic_source_pending)(struct intsrc *);
|
2006-10-10 23:23:12 +00:00
|
|
|
void (*pic_suspend)(struct pic *);
|
|
|
|
void (*pic_resume)(struct pic *);
|
2004-05-04 21:02:56 +00:00
|
|
|
int (*pic_config_intr)(struct intsrc *, enum intr_trigger,
|
|
|
|
enum intr_polarity);
|
Rework how we wire up interrupt sources to CPUs:
- Throw out all of the logical APIC ID stuff. The Intel docs are somewhat
ambiguous, but it seems that the "flat" cluster model we are currently
using is only supported on Pentium and P6 family CPUs. The other
"hierarchy" cluster model that is supported on all Intel CPUs with
local APICs is severely underdocumented. For example, it's not clear
if the OS needs to glean the topology of the APIC hierarchy from
somewhere (neither ACPI nor MP Table include it) and setup the logical
clusters based on the physical hierarchy or not. Not only that, but on
certain Intel chipsets, even though there were 4 CPUs in a logical
cluster, all the interrupts were only sent to one CPU anyway.
- We now bind interrupts to individual CPUs using physical addressing via
the local APIC IDs. This code has also moved out of the ioapic PIC
driver and into the common interrupt source code so that it can be
shared with MSI interrupt sources since MSI is addressed to APICs the
same way that I/O APIC pins are.
- Interrupt source classes grow a new method pic_assign_cpu() to bind an
interrupt source to a specific local APIC ID.
- The SMP code now tells the interrupt code which CPUs are avaiable to
handle interrupts in a simpler and more intuitive manner. For one thing,
it means we could now choose to not route interrupts to HT cores if we
wanted to (this code is currently in place in fact, but under an #if 0
for now).
- For now we simply do static round-robin of IRQs to CPUs when the first
interrupt handler just as before, with the change that IRQs are now
bound to individual CPUs rather than groups of up to 4 CPUs.
- Because the IRQ to CPU mapping has now been moved up a layer, it would
be easier to manage this mapping from higher levels. For example, we
could allow drivers to specify a CPU affinity map for their interrupts,
or we could allow a userland tool to bind IRQs to specific CPUs.
The MFC is tentative, but I want to see if this fixes problems some folks
had with UP APIC kernels on 6.0 on SMP machines (an SMP kernel would work
fine, but a UP APIC kernel (such as GENERIC in RELENG_6) would lose
interrupts).
MFC after: 1 week
2006-02-28 22:24:55 +00:00
|
|
|
void (*pic_assign_cpu)(struct intsrc *, u_int apic_id);
|
2006-10-10 23:23:12 +00:00
|
|
|
STAILQ_ENTRY(pic) pics;
|
2003-11-03 21:25:52 +00:00
|
|
|
};
|
|
|
|
|
2004-08-02 15:31:10 +00:00
|
|
|
/* Flags for pic_disable_source() */
|
|
|
|
enum {
|
|
|
|
PIC_EOI,
|
|
|
|
PIC_NO_EOI,
|
|
|
|
};
|
|
|
|
|
2003-11-03 21:25:52 +00:00
|
|
|
/*
|
|
|
|
* An interrupt source. The upper-layer code uses the PIC methods to
|
|
|
|
* control a given source. The lower-layer PIC drivers can store additional
|
|
|
|
* private data in a given interrupt source such as an interrupt pin number
|
|
|
|
* or an I/O APIC pointer.
|
|
|
|
*/
|
|
|
|
struct intsrc {
|
|
|
|
struct pic *is_pic;
|
Reorganize the interrupt handling code a bit to make a few things cleaner
and increase flexibility to allow various different approaches to be tried
in the future.
- Split struct ithd up into two pieces. struct intr_event holds the list
of interrupt handlers associated with interrupt sources.
struct intr_thread contains the data relative to an interrupt thread.
Currently we still provide a 1:1 relationship of events to threads
with the exception that events only have an associated thread if there
is at least one threaded interrupt handler attached to the event. This
means that on x86 we no longer have 4 bazillion interrupt threads with
no handlers. It also means that interrupt events with only INTR_FAST
handlers no longer have an associated thread either.
- Renamed struct intrhand to struct intr_handler to follow the struct
intr_foo naming convention. This did require renaming the powerpc
MD struct intr_handler to struct ppc_intr_handler.
- INTR_FAST no longer implies INTR_EXCL on all architectures except for
powerpc. This means that multiple INTR_FAST handlers can attach to the
same interrupt and that INTR_FAST and non-INTR_FAST handlers can attach
to the same interrupt. Sharing INTR_FAST handlers may not always be
desirable, but having sio(4) and uhci(4) fight over an IRQ isn't fun
either. Drivers can always still use INTR_EXCL to ask for an interrupt
exclusively. The way this sharing works is that when an interrupt
comes in, all the INTR_FAST handlers are executed first, and if any
threaded handlers exist, the interrupt thread is scheduled afterwards.
This type of layout also makes it possible to investigate using interrupt
filters ala OS X where the filter determines whether or not its companion
threaded handler should run.
- Aside from the INTR_FAST changes above, the impact on MD interrupt code
is mostly just 's/ithread/intr_event/'.
- A new MI ddb command 'show intrs' walks the list of interrupt events
dumping their state. It also has a '/v' verbose switch which dumps
info about all of the handlers attached to each event.
- We currently don't destroy an interrupt thread when the last threaded
handler is removed because it would suck for things like ppbus(8)'s
braindead behavior. The code is present, though, it is just under
#if 0 for now.
- Move the code to actually execute the threaded handlers for an interrrupt
event into a separate function so that ithread_loop() becomes more
readable. Previously this code was all in the middle of ithread_loop()
and indented halfway across the screen.
- Made struct intr_thread private to kern_intr.c and replaced td_ithd
with a thread private flag TDP_ITHREAD.
- In statclock, check curthread against idlethread directly rather than
curthread's proc against idlethread's proc. (Not really related to intr
changes)
Tested on: alpha, amd64, i386, sparc64
Tested on: arm, ia64 (older version of patch by cognet and marcel)
2005-10-25 19:48:48 +00:00
|
|
|
struct intr_event *is_event;
|
2003-11-03 21:25:52 +00:00
|
|
|
u_long *is_count;
|
|
|
|
u_long *is_straycount;
|
|
|
|
u_int is_index;
|
Rework how we wire up interrupt sources to CPUs:
- Throw out all of the logical APIC ID stuff. The Intel docs are somewhat
ambiguous, but it seems that the "flat" cluster model we are currently
using is only supported on Pentium and P6 family CPUs. The other
"hierarchy" cluster model that is supported on all Intel CPUs with
local APICs is severely underdocumented. For example, it's not clear
if the OS needs to glean the topology of the APIC hierarchy from
somewhere (neither ACPI nor MP Table include it) and setup the logical
clusters based on the physical hierarchy or not. Not only that, but on
certain Intel chipsets, even though there were 4 CPUs in a logical
cluster, all the interrupts were only sent to one CPU anyway.
- We now bind interrupts to individual CPUs using physical addressing via
the local APIC IDs. This code has also moved out of the ioapic PIC
driver and into the common interrupt source code so that it can be
shared with MSI interrupt sources since MSI is addressed to APICs the
same way that I/O APIC pins are.
- Interrupt source classes grow a new method pic_assign_cpu() to bind an
interrupt source to a specific local APIC ID.
- The SMP code now tells the interrupt code which CPUs are avaiable to
handle interrupts in a simpler and more intuitive manner. For one thing,
it means we could now choose to not route interrupts to HT cores if we
wanted to (this code is currently in place in fact, but under an #if 0
for now).
- For now we simply do static round-robin of IRQs to CPUs when the first
interrupt handler just as before, with the change that IRQs are now
bound to individual CPUs rather than groups of up to 4 CPUs.
- Because the IRQ to CPU mapping has now been moved up a layer, it would
be easier to manage this mapping from higher levels. For example, we
could allow drivers to specify a CPU affinity map for their interrupts,
or we could allow a userland tool to bind IRQs to specific CPUs.
The MFC is tentative, but I want to see if this fixes problems some folks
had with UP APIC kernels on 6.0 on SMP machines (an SMP kernel would work
fine, but a UP APIC kernel (such as GENERIC in RELENG_6) would lose
interrupts).
MFC after: 1 week
2006-02-28 22:24:55 +00:00
|
|
|
u_int is_enabled:1;
|
2003-11-03 21:25:52 +00:00
|
|
|
};
|
|
|
|
|
2005-12-05 22:39:09 +00:00
|
|
|
struct trapframe;
|
2003-11-03 21:25:52 +00:00
|
|
|
|
|
|
|
extern struct mtx icu_lock;
|
2005-01-18 20:24:47 +00:00
|
|
|
extern int elcr_found;
|
2003-11-03 21:25:52 +00:00
|
|
|
|
2004-05-04 20:07:46 +00:00
|
|
|
/* XXX: The elcr_* prototypes probably belong somewhere else. */
|
|
|
|
int elcr_probe(void);
|
|
|
|
enum intr_trigger elcr_read_trigger(u_int irq);
|
|
|
|
void elcr_resume(void);
|
|
|
|
void elcr_write_trigger(u_int irq, enum intr_trigger trigger);
|
Rework how we wire up interrupt sources to CPUs:
- Throw out all of the logical APIC ID stuff. The Intel docs are somewhat
ambiguous, but it seems that the "flat" cluster model we are currently
using is only supported on Pentium and P6 family CPUs. The other
"hierarchy" cluster model that is supported on all Intel CPUs with
local APICs is severely underdocumented. For example, it's not clear
if the OS needs to glean the topology of the APIC hierarchy from
somewhere (neither ACPI nor MP Table include it) and setup the logical
clusters based on the physical hierarchy or not. Not only that, but on
certain Intel chipsets, even though there were 4 CPUs in a logical
cluster, all the interrupts were only sent to one CPU anyway.
- We now bind interrupts to individual CPUs using physical addressing via
the local APIC IDs. This code has also moved out of the ioapic PIC
driver and into the common interrupt source code so that it can be
shared with MSI interrupt sources since MSI is addressed to APICs the
same way that I/O APIC pins are.
- Interrupt source classes grow a new method pic_assign_cpu() to bind an
interrupt source to a specific local APIC ID.
- The SMP code now tells the interrupt code which CPUs are avaiable to
handle interrupts in a simpler and more intuitive manner. For one thing,
it means we could now choose to not route interrupts to HT cores if we
wanted to (this code is currently in place in fact, but under an #if 0
for now).
- For now we simply do static round-robin of IRQs to CPUs when the first
interrupt handler just as before, with the change that IRQs are now
bound to individual CPUs rather than groups of up to 4 CPUs.
- Because the IRQ to CPU mapping has now been moved up a layer, it would
be easier to manage this mapping from higher levels. For example, we
could allow drivers to specify a CPU affinity map for their interrupts,
or we could allow a userland tool to bind IRQs to specific CPUs.
The MFC is tentative, but I want to see if this fixes problems some folks
had with UP APIC kernels on 6.0 on SMP machines (an SMP kernel would work
fine, but a UP APIC kernel (such as GENERIC in RELENG_6) would lose
interrupts).
MFC after: 1 week
2006-02-28 22:24:55 +00:00
|
|
|
#ifdef SMP
|
|
|
|
void intr_add_cpu(u_int apic_id);
|
|
|
|
#else
|
|
|
|
#define intr_add_cpu(apic_id)
|
|
|
|
#endif
|
2003-11-03 21:25:52 +00:00
|
|
|
int intr_add_handler(const char *name, int vector, driver_intr_t handler,
|
|
|
|
void *arg, enum intr_type flags, void **cookiep);
|
2004-05-04 21:02:56 +00:00
|
|
|
int intr_config_intr(int vector, enum intr_trigger trig,
|
|
|
|
enum intr_polarity pol);
|
2005-12-05 22:39:09 +00:00
|
|
|
void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame);
|
2003-11-03 21:25:52 +00:00
|
|
|
struct intsrc *intr_lookup_source(int vector);
|
2006-10-10 23:23:12 +00:00
|
|
|
int intr_register_pic(struct pic *pic);
|
2003-11-03 21:25:52 +00:00
|
|
|
int intr_register_source(struct intsrc *isrc);
|
|
|
|
int intr_remove_handler(void *cookie);
|
|
|
|
void intr_resume(void);
|
|
|
|
void intr_suspend(void);
|
2004-12-23 20:34:18 +00:00
|
|
|
void intrcnt_add(const char *name, u_long **countp);
|
2003-11-03 21:25:52 +00:00
|
|
|
|
|
|
|
#endif /* !LOCORE */
|
|
|
|
#endif /* _KERNEL */
|
|
|
|
#endif /* !__MACHINE_INTR_MACHDEP_H__ */
|