1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-15 10:17:20 +00:00
freebsd/sys/i386/isa/clock.c

948 lines
23 KiB
C
Raw Normal View History

1993-06-12 14:58:17 +00:00
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz and Don Ahn.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)clock.c 7.2 (Berkeley) 5/12/91
*/
2003-06-02 16:32:55 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Routines to handle clock hardware.
*/
/*
* inittodr, settodr and support routines written
* by Christoph Robitschko <chmr@edvz.tu-graz.ac.at>
*
* reintroduced and updated by Chris Stenton <chris@gnome.co.uk> 8/10/94
1993-06-12 14:58:17 +00:00
*/
#include "opt_apic.h"
#include "opt_clock.h"
#include "opt_isa.h"
#include "opt_mca.h"
1996-01-04 21:13:23 +00:00
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/kdb.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/timetc.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/module.h>
#include <sys/sysctl.h>
#include <sys/cons.h>
#include <sys/power.h>
#include <machine/clock.h>
#include <machine/cputypes.h>
#include <machine/frame.h>
New APIC support code: - The apic interrupt entry points have been rewritten so that each entry point can serve 32 different vectors. When the entry is executed, it uses one of the 32-bit ISR registers to determine which vector in its assigned range was triggered. Thus, the apic code can support 159 different interrupt vectors with only 5 entry points. - We now always to disable the local APIC to work around an errata in certain PPros and then re-enable it again if we decide to use the APICs to route interrupts. - We no longer map IO APICs or local APICs using special page table entries. Instead, we just use pmap_mapdev(). We also no longer export the virtual address of the local APIC as a global symbol to the rest of the system, but only in local_apic.c. To aid this, the APIC ID of each CPU is exported as a per-CPU variable. - Interrupt sources are provided for each intpin on each IO APIC. Currently, each source is given a unique interrupt vector meaning that PCI interrupts are not shared on most machines with an I/O APIC. That mapping for interrupt sources to interrupt vectors is up to the APIC enumerator driver however. - We no longer probe to see if we need to use mixed mode to route IRQ 0, instead we always use mixed mode to route IRQ 0 for now. This can be disabled via the 'NO_MIXED_MODE' kernel option. - The npx(4) driver now always probes to see if a built-in FPU is present since this test can now be performed with the new APIC code. However, an SMP kernel will panic if there is more than one CPU and a built-in FPU is not found. - PCI interrupts are now properly routed when using APICs to route interrupts, so remove the hack to psuedo-route interrupts when the intpin register was read. - The apic.h header was moved to apicreg.h and a new apicvar.h header that declares the APIs used by the new APIC code was added.
2003-11-03 21:53:38 +00:00
#include <machine/intr_machdep.h>
#include <machine/md_var.h>
#include <machine/psl.h>
#ifdef DEV_APIC
#include <machine/apicvar.h>
New APIC support code: - The apic interrupt entry points have been rewritten so that each entry point can serve 32 different vectors. When the entry is executed, it uses one of the 32-bit ISR registers to determine which vector in its assigned range was triggered. Thus, the apic code can support 159 different interrupt vectors with only 5 entry points. - We now always to disable the local APIC to work around an errata in certain PPros and then re-enable it again if we decide to use the APICs to route interrupts. - We no longer map IO APICs or local APICs using special page table entries. Instead, we just use pmap_mapdev(). We also no longer export the virtual address of the local APIC as a global symbol to the rest of the system, but only in local_apic.c. To aid this, the APIC ID of each CPU is exported as a per-CPU variable. - Interrupt sources are provided for each intpin on each IO APIC. Currently, each source is given a unique interrupt vector meaning that PCI interrupts are not shared on most machines with an I/O APIC. That mapping for interrupt sources to interrupt vectors is up to the APIC enumerator driver however. - We no longer probe to see if we need to use mixed mode to route IRQ 0, instead we always use mixed mode to route IRQ 0 for now. This can be disabled via the 'NO_MIXED_MODE' kernel option. - The npx(4) driver now always probes to see if a built-in FPU is present since this test can now be performed with the new APIC code. However, an SMP kernel will panic if there is more than one CPU and a built-in FPU is not found. - PCI interrupts are now properly routed when using APICs to route interrupts, so remove the hack to psuedo-route interrupts when the intpin register was read. - The apic.h header was moved to apicreg.h and a new apicvar.h header that declares the APIs used by the new APIC code was added.
2003-11-03 21:53:38 +00:00
#endif
#include <machine/specialreg.h>
#include <machine/ppireg.h>
#include <machine/timerreg.h>
#include <isa/rtc.h>
#ifdef DEV_ISA
#include <isa/isareg.h>
#include <isa/isavar.h>
#endif
#ifdef DEV_MCA
#include <i386/bios/mca_machdep.h>
#endif
/*
* 32-bit time_t's can't reach leap years before 1904 or after 2036, so we
* can use a simple formula for leap years.
*/
#define LEAPYEAR(y) (((u_int)(y) % 4 == 0) ? 1 : 0)
#define DAYSPERYEAR (31+28+31+30+31+30+31+31+30+31+30+31)
1993-06-12 14:58:17 +00:00
#define TIMER_DIV(x) ((timer_freq + (x) / 2) / (x))
1993-06-12 14:58:17 +00:00
int adjkerntz; /* local offset from GMT in seconds */
int clkintr_pending;
int disable_rtc_set; /* disable resettodr() if != 0 */
int pscnt = 1;
int psdiv = 1;
int statclock_disable;
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
#endif
u_int timer_freq = TIMER_FREQ;
int timer0_max_count;
int wall_cmos_clock; /* wall CMOS clock assumed if != 0 */
struct mtx clock_lock;
#define RTC_LOCK mtx_lock_spin(&clock_lock)
#define RTC_UNLOCK mtx_unlock_spin(&clock_lock)
static int beeping = 0;
static const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
static u_int hardclock_max_count;
static struct intsrc *i8254_intsrc;
static u_int32_t i8254_lastcount;
static u_int32_t i8254_offset;
static int (*i8254_pending)(struct intsrc *);
static int i8254_ticked;
static int using_lapic_timer;
static u_char rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
static u_char rtc_statusb = RTCSB_24HR;
/* Values for timerX_state: */
#define RELEASED 0
#define RELEASE_PENDING 1
#define ACQUIRED 2
#define ACQUIRE_PENDING 3
static u_char timer2_state;
2002-03-20 07:51:46 +00:00
static unsigned i8254_get_timecount(struct timecounter *tc);
static void set_timer_freq(u_int freq, int intr_freq);
1998-10-23 10:46:20 +00:00
static struct timecounter i8254_timecounter = {
i8254_get_timecount, /* get_timecount */
0, /* no poll_pps */
~0u, /* counter_mask */
0, /* frequency */
"i8254", /* name */
0 /* quality */
};
1995-12-10 13:40:44 +00:00
static void
New APIC support code: - The apic interrupt entry points have been rewritten so that each entry point can serve 32 different vectors. When the entry is executed, it uses one of the 32-bit ISR registers to determine which vector in its assigned range was triggered. Thus, the apic code can support 159 different interrupt vectors with only 5 entry points. - We now always to disable the local APIC to work around an errata in certain PPros and then re-enable it again if we decide to use the APICs to route interrupts. - We no longer map IO APICs or local APICs using special page table entries. Instead, we just use pmap_mapdev(). We also no longer export the virtual address of the local APIC as a global symbol to the rest of the system, but only in local_apic.c. To aid this, the APIC ID of each CPU is exported as a per-CPU variable. - Interrupt sources are provided for each intpin on each IO APIC. Currently, each source is given a unique interrupt vector meaning that PCI interrupts are not shared on most machines with an I/O APIC. That mapping for interrupt sources to interrupt vectors is up to the APIC enumerator driver however. - We no longer probe to see if we need to use mixed mode to route IRQ 0, instead we always use mixed mode to route IRQ 0 for now. This can be disabled via the 'NO_MIXED_MODE' kernel option. - The npx(4) driver now always probes to see if a built-in FPU is present since this test can now be performed with the new APIC code. However, an SMP kernel will panic if there is more than one CPU and a built-in FPU is not found. - PCI interrupts are now properly routed when using APICs to route interrupts, so remove the hack to psuedo-route interrupts when the intpin register was read. - The apic.h header was moved to apicreg.h and a new apicvar.h header that declares the APIs used by the new APIC code was added.
2003-11-03 21:53:38 +00:00
clkintr(struct clockframe *frame)
{
if (timecounter->tc_get_timecount == i8254_get_timecount) {
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock_spin(&clock_lock);
if (i8254_ticked)
i8254_ticked = 0;
else {
i8254_offset += timer0_max_count;
i8254_lastcount = 0;
}
clkintr_pending = 0;
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock_spin(&clock_lock);
}
if (!using_lapic_timer)
hardclock(frame);
#ifdef DEV_MCA
/* Reset clock interrupt by asserting bit 7 of port 0x61 */
if (MCA_system)
outb(0x61, inb(0x61) | 0x80);
#endif
}
int
acquire_timer2(int mode)
{
if (timer2_state != RELEASED)
return (-1);
timer2_state = ACQUIRED;
/*
* This access to the timer registers is as atomic as possible
* because it is a single instruction. We could do better if we
* knew the rate. Use of splclock() limits glitches to 10-100us,
* and this is probably good enough for timer2, so we aren't as
* careful with it as with timer0.
*/
outb(TIMER_MODE, TIMER_SEL2 | (mode & 0x3f));
return (0);
}
int
release_timer2()
{
if (timer2_state != ACQUIRED)
return (-1);
timer2_state = RELEASED;
outb(TIMER_MODE, TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT);
return (0);
}
/*
* This routine receives statistical clock interrupts from the RTC.
* As explained above, these occur at 128 interrupts per second.
* When profiling, we receive interrupts at a rate of 1024 Hz.
*
* This does not actually add as much overhead as it sounds, because
* when the statistical clock is active, the hardclock driver no longer
* needs to keep (inaccurate) statistics on its own. This decouples
* statistics gathering from scheduling interrupts.
*
* The RTC chip requires that we read status register C (RTC_INTR)
* to acknowledge an interrupt, before it will generate the next one.
* Under high interrupt load, rtcintr() can be indefinitely delayed and
* the clock can tick immediately after the read from RTC_INTR. In this
* case, the mc146818A interrupt signal will not drop for long enough
* to register with the 8259 PIC. If an interrupt is missed, the stat
* clock will halt, considerably degrading system performance. This is
* why we use 'while' rather than a more straightforward 'if' below.
* Stat clock ticks can still be lost, causing minor loss of accuracy
* in the statistics, but the stat clock will no longer stop.
*/
1995-12-10 13:40:44 +00:00
static void
New APIC support code: - The apic interrupt entry points have been rewritten so that each entry point can serve 32 different vectors. When the entry is executed, it uses one of the 32-bit ISR registers to determine which vector in its assigned range was triggered. Thus, the apic code can support 159 different interrupt vectors with only 5 entry points. - We now always to disable the local APIC to work around an errata in certain PPros and then re-enable it again if we decide to use the APICs to route interrupts. - We no longer map IO APICs or local APICs using special page table entries. Instead, we just use pmap_mapdev(). We also no longer export the virtual address of the local APIC as a global symbol to the rest of the system, but only in local_apic.c. To aid this, the APIC ID of each CPU is exported as a per-CPU variable. - Interrupt sources are provided for each intpin on each IO APIC. Currently, each source is given a unique interrupt vector meaning that PCI interrupts are not shared on most machines with an I/O APIC. That mapping for interrupt sources to interrupt vectors is up to the APIC enumerator driver however. - We no longer probe to see if we need to use mixed mode to route IRQ 0, instead we always use mixed mode to route IRQ 0 for now. This can be disabled via the 'NO_MIXED_MODE' kernel option. - The npx(4) driver now always probes to see if a built-in FPU is present since this test can now be performed with the new APIC code. However, an SMP kernel will panic if there is more than one CPU and a built-in FPU is not found. - PCI interrupts are now properly routed when using APICs to route interrupts, so remove the hack to psuedo-route interrupts when the intpin register was read. - The apic.h header was moved to apicreg.h and a new apicvar.h header that declares the APIs used by the new APIC code was added.
2003-11-03 21:53:38 +00:00
rtcintr(struct clockframe *frame)
{
Overhaul of the SMP code. Several portions of the SMP kernel support have been made machine independent and various other adjustments have been made to support Alpha SMP. - It splits the per-process portions of hardclock() and statclock() off into hardclock_process() and statclock_process() respectively. hardclock() and statclock() call the *_process() functions for the current process so that UP systems will run as before. For SMP systems, it is simply necessary to ensure that all other processors execute the *_process() functions when the main clock functions are triggered on one CPU by an interrupt. For the alpha 4100, clock interrupts are delievered in a staggered broadcast fashion, so we simply call hardclock/statclock on the boot CPU and call the *_process() functions on the secondaries. For x86, we call statclock and hardclock as usual and then call forward_hardclock/statclock in the MD code to send an IPI to cause the AP's to execute forwared_hardclock/statclock which then call the *_process() functions. - forward_signal() and forward_roundrobin() have been reworked to be MI and to involve less hackery. Now the cpu doing the forward sets any flags, etc. and sends a very simple IPI_AST to the other cpu(s). AST IPIs now just basically return so that they can execute ast() and don't bother with setting the astpending or needresched flags themselves. This also removes the loop in forward_signal() as sched_lock closes the race condition that the loop worked around. - need_resched(), resched_wanted() and clear_resched() have been changed to take a process to act on rather than assuming curproc so that they can be used to implement forward_roundrobin() as described above. - Various other SMP variables have been moved to a MI subr_smp.c and a new header sys/smp.h declares MI SMP variables and API's. The IPI API's from machine/ipl.h have moved to machine/smp.h which is included by sys/smp.h. - The globaldata_register() and globaldata_find() functions as well as the SLIST of globaldata structures has become MI and moved into subr_smp.c. Also, the globaldata list is only available if SMP support is compiled in. Reviewed by: jake, peter Looked over by: eivind
2001-04-27 19:28:25 +00:00
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
if (profprocs != 0) {
if (--pscnt == 0)
pscnt = psdiv;
New APIC support code: - The apic interrupt entry points have been rewritten so that each entry point can serve 32 different vectors. When the entry is executed, it uses one of the 32-bit ISR registers to determine which vector in its assigned range was triggered. Thus, the apic code can support 159 different interrupt vectors with only 5 entry points. - We now always to disable the local APIC to work around an errata in certain PPros and then re-enable it again if we decide to use the APICs to route interrupts. - We no longer map IO APICs or local APICs using special page table entries. Instead, we just use pmap_mapdev(). We also no longer export the virtual address of the local APIC as a global symbol to the rest of the system, but only in local_apic.c. To aid this, the APIC ID of each CPU is exported as a per-CPU variable. - Interrupt sources are provided for each intpin on each IO APIC. Currently, each source is given a unique interrupt vector meaning that PCI interrupts are not shared on most machines with an I/O APIC. That mapping for interrupt sources to interrupt vectors is up to the APIC enumerator driver however. - We no longer probe to see if we need to use mixed mode to route IRQ 0, instead we always use mixed mode to route IRQ 0 for now. This can be disabled via the 'NO_MIXED_MODE' kernel option. - The npx(4) driver now always probes to see if a built-in FPU is present since this test can now be performed with the new APIC code. However, an SMP kernel will panic if there is more than one CPU and a built-in FPU is not found. - PCI interrupts are now properly routed when using APICs to route interrupts, so remove the hack to psuedo-route interrupts when the intpin register was read. - The apic.h header was moved to apicreg.h and a new apicvar.h header that declares the APIs used by the new APIC code was added.
2003-11-03 21:53:38 +00:00
profclock(frame);
}
if (pscnt == psdiv)
New APIC support code: - The apic interrupt entry points have been rewritten so that each entry point can serve 32 different vectors. When the entry is executed, it uses one of the 32-bit ISR registers to determine which vector in its assigned range was triggered. Thus, the apic code can support 159 different interrupt vectors with only 5 entry points. - We now always to disable the local APIC to work around an errata in certain PPros and then re-enable it again if we decide to use the APICs to route interrupts. - We no longer map IO APICs or local APICs using special page table entries. Instead, we just use pmap_mapdev(). We also no longer export the virtual address of the local APIC as a global symbol to the rest of the system, but only in local_apic.c. To aid this, the APIC ID of each CPU is exported as a per-CPU variable. - Interrupt sources are provided for each intpin on each IO APIC. Currently, each source is given a unique interrupt vector meaning that PCI interrupts are not shared on most machines with an I/O APIC. That mapping for interrupt sources to interrupt vectors is up to the APIC enumerator driver however. - We no longer probe to see if we need to use mixed mode to route IRQ 0, instead we always use mixed mode to route IRQ 0 for now. This can be disabled via the 'NO_MIXED_MODE' kernel option. - The npx(4) driver now always probes to see if a built-in FPU is present since this test can now be performed with the new APIC code. However, an SMP kernel will panic if there is more than one CPU and a built-in FPU is not found. - PCI interrupts are now properly routed when using APICs to route interrupts, so remove the hack to psuedo-route interrupts when the intpin register was read. - The apic.h header was moved to apicreg.h and a new apicvar.h header that declares the APIs used by the new APIC code was added.
2003-11-03 21:53:38 +00:00
statclock(frame);
Overhaul of the SMP code. Several portions of the SMP kernel support have been made machine independent and various other adjustments have been made to support Alpha SMP. - It splits the per-process portions of hardclock() and statclock() off into hardclock_process() and statclock_process() respectively. hardclock() and statclock() call the *_process() functions for the current process so that UP systems will run as before. For SMP systems, it is simply necessary to ensure that all other processors execute the *_process() functions when the main clock functions are triggered on one CPU by an interrupt. For the alpha 4100, clock interrupts are delievered in a staggered broadcast fashion, so we simply call hardclock/statclock on the boot CPU and call the *_process() functions on the secondaries. For x86, we call statclock and hardclock as usual and then call forward_hardclock/statclock in the MD code to send an IPI to cause the AP's to execute forwared_hardclock/statclock which then call the *_process() functions. - forward_signal() and forward_roundrobin() have been reworked to be MI and to involve less hackery. Now the cpu doing the forward sets any flags, etc. and sends a very simple IPI_AST to the other cpu(s). AST IPIs now just basically return so that they can execute ast() and don't bother with setting the astpending or needresched flags themselves. This also removes the loop in forward_signal() as sched_lock closes the race condition that the loop worked around. - need_resched(), resched_wanted() and clear_resched() have been changed to take a process to act on rather than assuming curproc so that they can be used to implement forward_roundrobin() as described above. - Various other SMP variables have been moved to a MI subr_smp.c and a new header sys/smp.h declares MI SMP variables and API's. The IPI API's from machine/ipl.h have moved to machine/smp.h which is included by sys/smp.h. - The globaldata_register() and globaldata_find() functions as well as the SLIST of globaldata structures has become MI and moved into subr_smp.c. Also, the globaldata list is only available if SMP support is compiled in. Reviewed by: jake, peter Looked over by: eivind
2001-04-27 19:28:25 +00:00
}
}
#include "opt_ddb.h"
#ifdef DDB
#include <ddb/ddb.h>
DB_SHOW_COMMAND(rtc, rtc)
{
printf("%02x/%02x/%02x %02x:%02x:%02x, A = %02x, B = %02x, C = %02x\n",
rtcin(RTC_YEAR), rtcin(RTC_MONTH), rtcin(RTC_DAY),
rtcin(RTC_HRS), rtcin(RTC_MIN), rtcin(RTC_SEC),
rtcin(RTC_STATUSA), rtcin(RTC_STATUSB), rtcin(RTC_INTR));
}
#endif /* DDB */
static int
getit(void)
{
- Change fast interrupts on x86 to push a full interrupt frame and to return through doreti to handle ast's. This is necessary for the clock interrupts to work properly. - Change the clock interrupts on the x86 to be fast instead of threaded. This is needed because both hardclock() and statclock() need to run in the context of the current process, not in a separate thread context. - Kill the prevproc hack as it is no longer needed. - We really need Giant when we call psignal(), but we don't want to block during the clock interrupt. Instead, use two p_flag's in the proc struct to mark the current process as having a pending SIGVTALRM or a SIGPROF and let them be delivered during ast() when hardclock() has finished running. - Remove CLKF_BASEPRI, which was #ifdef'd out on the x86 anyways. It was broken on the x86 if it was turned on since cpl is gone. It's only use was to bogusly run softclock() directly during hardclock() rather than scheduling an SWI. - Remove the COM_LOCK simplelock and replace it with a clock_lock spin mutex. Since the spin mutex already handles disabling/restoring interrupts appropriately, this also lets us axe all the *_intr() fu. - Back out the hacks in the APIC_IO x86 cpu_initclocks() code to use temporary fast interrupts for the APIC trial. - Add two new process flags P_ALRMPEND and P_PROFPEND to mark the pending signals in hardclock() that are to be delivered in ast(). Submitted by: jakeb (making statclock safe in a fast interrupt) Submitted by: cp (concept of delaying signals until ast())
2000-10-06 02:20:21 +00:00
int high, low;
mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
mtx_unlock_spin(&clock_lock);
return ((high << 8) | low);
}
/*
* Wait "n" microseconds.
* Relies on timer 1 counting down from (timer_freq / hz)
* Note: timer had better have been programmed before this is first used!
*/
void
DELAY(int n)
{
int delta, prev_tick, tick, ticks_left;
#ifdef DELAYDEBUG
int getit_calls = 1;
int n1;
static int state = 0;
if (state == 0) {
state = 1;
for (n1 = 1; n1 <= 10000000; n1 *= 10)
DELAY(n1);
state = 2;
}
if (state == 1)
printf("DELAY(%d)...", n);
#endif
/*
* Guard against the timer being uninitialized if we are called
* early for console i/o.
*/
if (timer0_max_count == 0)
set_timer_freq(timer_freq, hz);
/*
* Read the counter first, so that the rest of the setup overhead is
* counted. Guess the initial overhead is 20 usec (on most systems it
* takes about 1.5 usec for each of the i/o's in getit(). The loop
* takes about 6 usec on a 486/33 and 13 usec on a 386/20. The
* multiplications and divisions to scale the count take a while).
*
* However, if ddb is active then use a fake counter since reading
* the i8254 counter involves acquiring a lock. ddb must not do
* locking for many reasons, but it calls here for at least atkbd
* input.
*/
#ifdef KDB
if (kdb_active)
prev_tick = 1;
else
#endif
prev_tick = getit();
n -= 0; /* XXX actually guess no initial overhead */
/*
* Calculate (n * (timer_freq / 1e6)) without using floating point
* and without any avoidable overflows.
*/
if (n <= 0)
ticks_left = 0;
else if (n < 256)
/*
* Use fixed point to avoid a slow division by 1000000.
* 39099 = 1193182 * 2^15 / 10^6 rounded to nearest.
* 2^15 is the first power of 2 that gives exact results
* for n between 0 and 256.
*/
ticks_left = ((u_int)n * 39099 + (1 << 15) - 1) >> 15;
else
/*
* Don't bother using fixed point, although gcc-2.7.2
* generates particularly poor code for the long long
* division, since even the slow way will complete long
* before the delay is up (unless we're interrupted).
*/
ticks_left = ((u_int)n * (long long)timer_freq + 999999)
/ 1000000;
while (ticks_left > 0) {
#ifdef KDB
if (kdb_active) {
inb(0x84);
tick = prev_tick - 1;
if (tick <= 0)
tick = timer0_max_count;
} else
#endif
tick = getit();
#ifdef DELAYDEBUG
++getit_calls;
#endif
delta = prev_tick - tick;
prev_tick = tick;
if (delta < 0) {
delta += timer0_max_count;
/*
* Guard against timer0_max_count being wrong.
* This shouldn't happen in normal operation,
* but it may happen if set_timer_freq() is
* traced.
*/
if (delta < 0)
delta = 0;
}
ticks_left -= delta;
}
#ifdef DELAYDEBUG
if (state == 1)
printf(" %d calls to getit() at %d usec each\n",
getit_calls, (n + 5) / getit_calls);
#endif
}
static void
sysbeepstop(void *chan)
{
ppi_spkr_off(); /* disable counter2 output to speaker */
timer_spkr_release();
beeping = 0;
}
1995-05-30 08:16:23 +00:00
int
sysbeep(int pitch, int period)
{
int x = splclock();
if (timer_spkr_acquire())
if (!beeping) {
/* Something else owns it. */
splx(x);
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
}
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock_spin(&clock_lock);
spkr_set_pitch(pitch);
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock_spin(&clock_lock);
if (!beeping) {
/* enable counter2 output to speaker */
ppi_spkr_on();
beeping = period;
timeout(sysbeepstop, (void *)NULL, period);
}
splx(x);
return (0);
}
/*
* RTC support routines
*/
int
rtcin(reg)
int reg;
{
u_char val;
RTC_LOCK;
outb(IO_RTC, reg);
inb(0x84);
val = inb(IO_RTC + 1);
inb(0x84);
RTC_UNLOCK;
return (val);
}
static __inline void
writertc(u_char reg, u_char val)
{
RTC_LOCK;
inb(0x84);
outb(IO_RTC, reg);
inb(0x84);
outb(IO_RTC + 1, val);
inb(0x84); /* XXX work around wrong order in rtcin() */
RTC_UNLOCK;
}
static __inline int
readrtc(int port)
{
return(bcd2bin(rtcin(port)));
}
static u_int
calibrate_clocks(void)
{
u_int count, prev_count, tot_count;
int sec, start_sec, timeout;
if (bootverbose)
printf("Calibrating clock(s) ... ");
if (!(rtcin(RTC_STATUSD) & RTCSD_PWR))
goto fail;
timeout = 100000000;
/* Read the mc146818A seconds counter. */
for (;;) {
if (!(rtcin(RTC_STATUSA) & RTCSA_TUP)) {
sec = rtcin(RTC_SEC);
break;
}
if (--timeout == 0)
goto fail;
}
/* Wait for the mC146818A seconds counter to change. */
start_sec = sec;
for (;;) {
if (!(rtcin(RTC_STATUSA) & RTCSA_TUP)) {
sec = rtcin(RTC_SEC);
if (sec != start_sec)
break;
}
if (--timeout == 0)
goto fail;
}
/* Start keeping track of the i8254 counter. */
prev_count = getit();
if (prev_count == 0 || prev_count > timer0_max_count)
goto fail;
tot_count = 0;
/*
* Wait for the mc146818A seconds counter to change. Read the i8254
* counter for each iteration since this is convenient and only
* costs a few usec of inaccuracy. The timing of the final reads
* of the counters almost matches the timing of the initial reads,
* so the main cause of inaccuracy is the varying latency from
* inside getit() or rtcin(RTC_STATUSA) to the beginning of the
* rtcin(RTC_SEC) that returns a changed seconds count. The
* maximum inaccuracy from this cause is < 10 usec on 486's.
*/
start_sec = sec;
for (;;) {
if (!(rtcin(RTC_STATUSA) & RTCSA_TUP))
sec = rtcin(RTC_SEC);
count = getit();
if (count == 0 || count > timer0_max_count)
goto fail;
if (count > prev_count)
tot_count += prev_count - (count - timer0_max_count);
else
tot_count += prev_count - count;
prev_count = count;
if (sec != start_sec)
break;
if (--timeout == 0)
goto fail;
}
if (bootverbose) {
printf("i8254 clock: %u Hz\n", tot_count);
}
return (tot_count);
fail:
if (bootverbose)
printf("failed, using default i8254 clock of %u Hz\n",
timer_freq);
return (timer_freq);
}
static void
set_timer_freq(u_int freq, int intr_freq)
{
int new_timer0_max_count;
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock_spin(&clock_lock);
timer_freq = freq;
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
if (new_timer0_max_count != timer0_max_count) {
timer0_max_count = new_timer0_max_count;
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
}
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock_spin(&clock_lock);
}
static void
i8254_restore(void)
{
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock_spin(&clock_lock);
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(TIMER_CNTR0, timer0_max_count & 0xff);
outb(TIMER_CNTR0, timer0_max_count >> 8);
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock_spin(&clock_lock);
}
static void
rtc_restore(void)
{
/* Restore all of the RTC's "status" (actually, control) registers. */
/* XXX locking is needed for RTC access. */
writertc(RTC_STATUSB, RTCSB_24HR);
writertc(RTC_STATUSA, rtc_statusa);
writertc(RTC_STATUSB, rtc_statusb);
rtcin(RTC_INTR);
}
/*
* Restore all the timers non-atomically (XXX: should be atomically).
*
* This function is called from pmtimer_resume() to restore all the timers.
* This should not be necessary, but there are broken laptops that do not
* restore all the timers on resume.
*/
void
timer_restore(void)
{
i8254_restore(); /* restore timer_freq and hz */
rtc_restore(); /* reenable RTC interrupts */
}
/*
* Initialize 8254 timer 0 early so that it can be used in DELAY().
* XXX initialization of other timers is unintentionally left blank.
*/
void
1995-05-30 08:16:23 +00:00
startrtclock()
{
u_int delta, freq;
writertc(RTC_STATUSA, rtc_statusa);
writertc(RTC_STATUSB, RTCSB_24HR);
set_timer_freq(timer_freq, hz);
freq = calibrate_clocks();
#ifdef CLK_CALIBRATION_LOOP
if (bootverbose) {
printf(
"Press a key on the console to abort clock calibration\n");
while (cncheckc() == -1)
calibrate_clocks();
}
#endif
/*
* Use the calibrated i8254 frequency if it seems reasonable.
* Otherwise use the default, and don't use the calibrated i586
* frequency.
*/
delta = freq > timer_freq ? freq - timer_freq : timer_freq - freq;
if (delta < timer_freq / 100) {
#ifndef CLK_USE_I8254_CALIBRATION
if (bootverbose)
printf(
"CLK_USE_I8254_CALIBRATION not specified - using default frequency\n");
freq = timer_freq;
#endif
timer_freq = freq;
} else {
if (bootverbose)
printf(
"%d Hz differs from default of %d Hz by more than 1%%\n",
freq, timer_freq);
}
set_timer_freq(timer_freq, hz);
1998-10-23 10:46:20 +00:00
i8254_timecounter.tc_frequency = timer_freq;
tc_init(&i8254_timecounter);
init_TSC();
1993-06-12 14:58:17 +00:00
}
/*
* Initialize the time of day register, based on the time base which is, e.g.
* from a filesystem.
*/
void
inittodr(time_t base)
1993-06-12 14:58:17 +00:00
{
unsigned long sec, days;
int year, month;
int y, m, s;
struct timespec ts;
if (base) {
s = splclock();
ts.tv_sec = base;
ts.tv_nsec = 0;
tc_setclock(&ts);
splx(s);
}
/* Look if we have a RTC present and the time is valid */
1995-06-11 19:33:05 +00:00
if (!(rtcin(RTC_STATUSD) & RTCSD_PWR))
goto wrong_time;
/* wait for time update to complete */
/* If RTCSA_TUP is zero, we have at least 244us before next update */
s = splhigh();
while (rtcin(RTC_STATUSA) & RTCSA_TUP) {
splx(s);
s = splhigh();
}
days = 0;
#ifdef USE_RTC_CENTURY
year = readrtc(RTC_YEAR) + readrtc(RTC_CENTURY) * 100;
#else
year = readrtc(RTC_YEAR) + 1900;
if (year < 1970)
year += 100;
#endif
if (year < 1970) {
splx(s);
goto wrong_time;
}
month = readrtc(RTC_MONTH);
for (m = 1; m < month; m++)
days += daysinmonth[m-1];
if ((month > 2) && LEAPYEAR(year))
days ++;
days += readrtc(RTC_DAY) - 1;
for (y = 1970; y < year; y++)
days += DAYSPERYEAR + LEAPYEAR(y);
sec = ((( days * 24 +
readrtc(RTC_HRS)) * 60 +
readrtc(RTC_MIN)) * 60 +
readrtc(RTC_SEC));
/* sec now contains the number of seconds, since Jan 1 1970,
in the local time zone */
1993-06-12 14:58:17 +00:00
sec += tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
1993-06-12 14:58:17 +00:00
y = time_second - sec;
if (y <= -2 || y >= 2) {
/* badly off, adjust it */
ts.tv_sec = sec;
ts.tv_nsec = 0;
tc_setclock(&ts);
}
splx(s);
return;
wrong_time:
printf("Invalid time in real time clock.\n");
printf("Check and reset the date immediately!\n");
1993-06-12 14:58:17 +00:00
}
/*
* Write system time back to RTC
1993-06-12 14:58:17 +00:00
*/
void
resettodr()
1993-06-12 14:58:17 +00:00
{
unsigned long tm;
int y, m, s;
if (disable_rtc_set)
return;
s = splclock();
tm = time_second;
splx(s);
/* Disable RTC updates and interrupts. */
writertc(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR);
/* Calculate local time to put in RTC */
tm -= tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
writertc(RTC_SEC, bin2bcd(tm%60)); tm /= 60; /* Write back Seconds */
writertc(RTC_MIN, bin2bcd(tm%60)); tm /= 60; /* Write back Minutes */
writertc(RTC_HRS, bin2bcd(tm%24)); tm /= 24; /* Write back Hours */
/* We have now the days since 01-01-1970 in tm */
writertc(RTC_WDAY, (tm + 4) % 7 + 1); /* Write back Weekday */
for (y = 1970, m = DAYSPERYEAR + LEAPYEAR(y);
tm >= m;
y++, m = DAYSPERYEAR + LEAPYEAR(y))
tm -= m;
/* Now we have the years in y and the day-of-the-year in tm */
writertc(RTC_YEAR, bin2bcd(y%100)); /* Write back Year */
#ifdef USE_RTC_CENTURY
writertc(RTC_CENTURY, bin2bcd(y/100)); /* ... and Century */
#endif
for (m = 0; ; m++) {
int ml;
ml = daysinmonth[m];
if (m == 1 && LEAPYEAR(y))
ml++;
if (tm < ml)
break;
tm -= ml;
}
writertc(RTC_MONTH, bin2bcd(m + 1)); /* Write back Month */
writertc(RTC_DAY, bin2bcd(tm + 1)); /* Write back Month Day */
/* Reenable RTC updates and interrupts. */
writertc(RTC_STATUSB, rtc_statusb);
rtcin(RTC_INTR);
1993-06-12 14:58:17 +00:00
}
1993-06-12 14:58:17 +00:00
/*
* Start both clocks running.
1993-06-12 14:58:17 +00:00
*/
void
cpu_initclocks()
1993-06-12 14:58:17 +00:00
{
int diag;
1993-06-12 14:58:17 +00:00
#ifdef DEV_APIC
using_lapic_timer = lapic_setup_clock();
#endif
/*
* If we aren't using the local APIC timer to drive the kernel
* clocks, setup the interrupt handler for the 8254 timer 0 so
* that it can drive hardclock().
*/
if (!using_lapic_timer) {
intr_add_handler("clk", 0, (driver_intr_t *)clkintr, NULL,
INTR_TYPE_CLK | INTR_FAST, NULL);
i8254_intsrc = intr_lookup_source(0);
if (i8254_intsrc != NULL)
i8254_pending =
i8254_intsrc->is_pic->pic_source_pending;
}
/* Initialize RTC. */
writertc(RTC_STATUSA, rtc_statusa);
writertc(RTC_STATUSB, RTCSB_24HR);
/*
* If the separate statistics clock hasn't been explicility disabled
* and we aren't already using the local APIC timer to drive the
* kernel clocks, then setup the RTC to periodically interrupt to
* drive statclock() and profclock().
*/
if (!statclock_disable && !using_lapic_timer) {
diag = rtcin(RTC_DIAG);
if (diag != 0)
printf("RTC BIOS diagnostic error %b\n", diag, RTCDG_BITS);
/* Setting stathz to nonzero early helps avoid races. */
stathz = RTC_NOPROFRATE;
profhz = RTC_PROFRATE;
/* Enable periodic interrupts from the RTC. */
rtc_statusb |= RTCSB_PINTR;
intr_add_handler("rtc", 8, (driver_intr_t *)rtcintr, NULL,
INTR_TYPE_CLK | INTR_FAST, NULL);
- Change fast interrupts on x86 to push a full interrupt frame and to return through doreti to handle ast's. This is necessary for the clock interrupts to work properly. - Change the clock interrupts on the x86 to be fast instead of threaded. This is needed because both hardclock() and statclock() need to run in the context of the current process, not in a separate thread context. - Kill the prevproc hack as it is no longer needed. - We really need Giant when we call psignal(), but we don't want to block during the clock interrupt. Instead, use two p_flag's in the proc struct to mark the current process as having a pending SIGVTALRM or a SIGPROF and let them be delivered during ast() when hardclock() has finished running. - Remove CLKF_BASEPRI, which was #ifdef'd out on the x86 anyways. It was broken on the x86 if it was turned on since cpl is gone. It's only use was to bogusly run softclock() directly during hardclock() rather than scheduling an SWI. - Remove the COM_LOCK simplelock and replace it with a clock_lock spin mutex. Since the spin mutex already handles disabling/restoring interrupts appropriately, this also lets us axe all the *_intr() fu. - Back out the hacks in the APIC_IO x86 cpu_initclocks() code to use temporary fast interrupts for the APIC trial. - Add two new process flags P_ALRMPEND and P_PROFPEND to mark the pending signals in hardclock() that are to be delivered in ast(). Submitted by: jakeb (making statclock safe in a fast interrupt) Submitted by: cp (concept of delaying signals until ast())
2000-10-06 02:20:21 +00:00
writertc(RTC_STATUSB, rtc_statusb);
rtcin(RTC_INTR);
}
- Change fast interrupts on x86 to push a full interrupt frame and to return through doreti to handle ast's. This is necessary for the clock interrupts to work properly. - Change the clock interrupts on the x86 to be fast instead of threaded. This is needed because both hardclock() and statclock() need to run in the context of the current process, not in a separate thread context. - Kill the prevproc hack as it is no longer needed. - We really need Giant when we call psignal(), but we don't want to block during the clock interrupt. Instead, use two p_flag's in the proc struct to mark the current process as having a pending SIGVTALRM or a SIGPROF and let them be delivered during ast() when hardclock() has finished running. - Remove CLKF_BASEPRI, which was #ifdef'd out on the x86 anyways. It was broken on the x86 if it was turned on since cpl is gone. It's only use was to bogusly run softclock() directly during hardclock() rather than scheduling an SWI. - Remove the COM_LOCK simplelock and replace it with a clock_lock spin mutex. Since the spin mutex already handles disabling/restoring interrupts appropriately, this also lets us axe all the *_intr() fu. - Back out the hacks in the APIC_IO x86 cpu_initclocks() code to use temporary fast interrupts for the APIC trial. - Add two new process flags P_ALRMPEND and P_PROFPEND to mark the pending signals in hardclock() that are to be delivered in ast(). Submitted by: jakeb (making statclock safe in a fast interrupt) Submitted by: cp (concept of delaying signals until ast())
2000-10-06 02:20:21 +00:00
init_TSC_tc();
}
void
cpu_startprofclock(void)
{
if (using_lapic_timer)
return;
rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
writertc(RTC_STATUSA, rtc_statusa);
psdiv = pscnt = psratio;
}
void
cpu_stopprofclock(void)
{
if (using_lapic_timer)
return;
rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
writertc(RTC_STATUSA, rtc_statusa);
psdiv = pscnt = 1;
}
static int
sysctl_machdep_i8254_freq(SYSCTL_HANDLER_ARGS)
{
int error;
u_int freq;
/*
* Use `i8254' instead of `timer' in external names because `timer'
* is is too generic. Should use it everywhere.
*/
freq = timer_freq;
error = sysctl_handle_int(oidp, &freq, sizeof(freq), req);
if (error == 0 && req->newptr != NULL) {
set_timer_freq(freq, hz);
1998-10-23 10:46:20 +00:00
i8254_timecounter.tc_frequency = freq;
}
return (error);
}
SYSCTL_PROC(_machdep, OID_AUTO, i8254_freq, CTLTYPE_INT | CTLFLAG_RW,
0, sizeof(u_int), sysctl_machdep_i8254_freq, "IU", "");
static unsigned
i8254_get_timecount(struct timecounter *tc)
{
u_int count;
u_int high, low;
- Change fast interrupts on x86 to push a full interrupt frame and to return through doreti to handle ast's. This is necessary for the clock interrupts to work properly. - Change the clock interrupts on the x86 to be fast instead of threaded. This is needed because both hardclock() and statclock() need to run in the context of the current process, not in a separate thread context. - Kill the prevproc hack as it is no longer needed. - We really need Giant when we call psignal(), but we don't want to block during the clock interrupt. Instead, use two p_flag's in the proc struct to mark the current process as having a pending SIGVTALRM or a SIGPROF and let them be delivered during ast() when hardclock() has finished running. - Remove CLKF_BASEPRI, which was #ifdef'd out on the x86 anyways. It was broken on the x86 if it was turned on since cpl is gone. It's only use was to bogusly run softclock() directly during hardclock() rather than scheduling an SWI. - Remove the COM_LOCK simplelock and replace it with a clock_lock spin mutex. Since the spin mutex already handles disabling/restoring interrupts appropriately, this also lets us axe all the *_intr() fu. - Back out the hacks in the APIC_IO x86 cpu_initclocks() code to use temporary fast interrupts for the APIC trial. - Add two new process flags P_ALRMPEND and P_PROFPEND to mark the pending signals in hardclock() that are to be delivered in ast(). Submitted by: jakeb (making statclock safe in a fast interrupt) Submitted by: cp (concept of delaying signals until ast())
2000-10-06 02:20:21 +00:00
u_int eflags;
- Change fast interrupts on x86 to push a full interrupt frame and to return through doreti to handle ast's. This is necessary for the clock interrupts to work properly. - Change the clock interrupts on the x86 to be fast instead of threaded. This is needed because both hardclock() and statclock() need to run in the context of the current process, not in a separate thread context. - Kill the prevproc hack as it is no longer needed. - We really need Giant when we call psignal(), but we don't want to block during the clock interrupt. Instead, use two p_flag's in the proc struct to mark the current process as having a pending SIGVTALRM or a SIGPROF and let them be delivered during ast() when hardclock() has finished running. - Remove CLKF_BASEPRI, which was #ifdef'd out on the x86 anyways. It was broken on the x86 if it was turned on since cpl is gone. It's only use was to bogusly run softclock() directly during hardclock() rather than scheduling an SWI. - Remove the COM_LOCK simplelock and replace it with a clock_lock spin mutex. Since the spin mutex already handles disabling/restoring interrupts appropriately, this also lets us axe all the *_intr() fu. - Back out the hacks in the APIC_IO x86 cpu_initclocks() code to use temporary fast interrupts for the APIC trial. - Add two new process flags P_ALRMPEND and P_PROFPEND to mark the pending signals in hardclock() that are to be delivered in ast(). Submitted by: jakeb (making statclock safe in a fast interrupt) Submitted by: cp (concept of delaying signals until ast())
2000-10-06 02:20:21 +00:00
eflags = read_eflags();
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock_spin(&clock_lock);
/* Select timer0 and latch counter value. */
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
low = inb(TIMER_CNTR0);
high = inb(TIMER_CNTR0);
count = timer0_max_count - ((high << 8) | low);
if (count < i8254_lastcount ||
(!i8254_ticked && (clkintr_pending ||
- Change fast interrupts on x86 to push a full interrupt frame and to return through doreti to handle ast's. This is necessary for the clock interrupts to work properly. - Change the clock interrupts on the x86 to be fast instead of threaded. This is needed because both hardclock() and statclock() need to run in the context of the current process, not in a separate thread context. - Kill the prevproc hack as it is no longer needed. - We really need Giant when we call psignal(), but we don't want to block during the clock interrupt. Instead, use two p_flag's in the proc struct to mark the current process as having a pending SIGVTALRM or a SIGPROF and let them be delivered during ast() when hardclock() has finished running. - Remove CLKF_BASEPRI, which was #ifdef'd out on the x86 anyways. It was broken on the x86 if it was turned on since cpl is gone. It's only use was to bogusly run softclock() directly during hardclock() rather than scheduling an SWI. - Remove the COM_LOCK simplelock and replace it with a clock_lock spin mutex. Since the spin mutex already handles disabling/restoring interrupts appropriately, this also lets us axe all the *_intr() fu. - Back out the hacks in the APIC_IO x86 cpu_initclocks() code to use temporary fast interrupts for the APIC trial. - Add two new process flags P_ALRMPEND and P_PROFPEND to mark the pending signals in hardclock() that are to be delivered in ast(). Submitted by: jakeb (making statclock safe in a fast interrupt) Submitted by: cp (concept of delaying signals until ast())
2000-10-06 02:20:21 +00:00
((count < 20 || (!(eflags & PSL_I) && count < timer0_max_count / 2u)) &&
i8254_pending != NULL && i8254_pending(i8254_intsrc))))) {
i8254_ticked = 1;
i8254_offset += timer0_max_count;
}
i8254_lastcount = count;
count += i8254_offset;
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock_spin(&clock_lock);
return (count);
}
#ifdef DEV_ISA
/*
* Attach to the ISA PnP descriptors for the timer and realtime clock.
*/
static struct isa_pnp_id attimer_ids[] = {
{ 0x0001d041 /* PNP0100 */, "AT timer" },
{ 0x000bd041 /* PNP0B00 */, "AT realtime clock" },
{ 0 }
};
static int
attimer_probe(device_t dev)
{
int result;
if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, attimer_ids)) <= 0)
device_quiet(dev);
return(result);
}
static int
attimer_attach(device_t dev)
{
return(0);
}
static device_method_t attimer_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, attimer_probe),
DEVMETHOD(device_attach, attimer_attach),
DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, bus_generic_suspend), /* XXX stop statclock? */
DEVMETHOD(device_resume, bus_generic_resume), /* XXX restart statclock? */
{ 0, 0 }
};
static driver_t attimer_driver = {
"attimer",
attimer_methods,
1, /* no softc */
};
static devclass_t attimer_devclass;
DRIVER_MODULE(attimer, isa, attimer_driver, attimer_devclass, 0, 0);
2001-08-30 09:17:03 +00:00
DRIVER_MODULE(attimer, acpi, attimer_driver, attimer_devclass, 0, 0);
#endif /* DEV_ISA */