mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-04 09:09:56 +00:00
Remove the leading underscore from all symbols defined in x86 asm
and used in C or vice versa. The elf compiler uses the same names for both. Remove asnames.h with great prejudice; it has served its purpose. Note that this does not affect the ability to generate an aout kernel due to gcc's -mno-underscores option. moral support from: peter, jhb
This commit is contained in:
parent
e548a5c513
commit
02318dac2c
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=73011
@ -50,27 +50,27 @@ IDTVEC(vec_name) ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
incl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl _intr_unit + (irq_num) * 4 ; \
|
||||
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
addl $4, %esp ; \
|
||||
movl $0, _lapic+LA_EOI ; \
|
||||
movl $0, lapic+LA_EOI ; \
|
||||
lock ; \
|
||||
incl _cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl _intr_countp + (irq_num) * 4, %eax ; \
|
||||
incl cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl intr_countp + (irq_num) * 4, %eax ; \
|
||||
lock ; \
|
||||
incl (%eax) ; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
|
||||
#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
|
||||
|
||||
#define MASK_IRQ(irq_num) \
|
||||
IMASK_LOCK ; /* into critical reg */ \
|
||||
testl $IRQ_BIT(irq_num), _apic_imen ; \
|
||||
testl $IRQ_BIT(irq_num), apic_imen ; \
|
||||
jne 7f ; /* masked, don't mask */ \
|
||||
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
|
||||
orl $IRQ_BIT(irq_num), apic_imen ; /* set the mask bit */ \
|
||||
movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
|
||||
movl REDIRIDX(irq_num), %eax ; /* get the index */ \
|
||||
movl %eax, (%ecx) ; /* write the index */ \
|
||||
@ -85,7 +85,7 @@ IDTVEC(vec_name) ; \
|
||||
* and the EOI cycle would cause redundant INTs to occur.
|
||||
*/
|
||||
#define MASK_LEVEL_IRQ(irq_num) \
|
||||
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
|
||||
testl $IRQ_BIT(irq_num), apic_pin_trigger ; \
|
||||
jz 9f ; /* edge, don't mask */ \
|
||||
MASK_IRQ(irq_num) ; \
|
||||
9:
|
||||
@ -93,18 +93,18 @@ IDTVEC(vec_name) ; \
|
||||
|
||||
#ifdef APIC_INTR_REORDER
|
||||
#define EOI_IRQ(irq_num) \
|
||||
movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
|
||||
movl apic_isrbit_location + 8 * (irq_num), %eax ; \
|
||||
movl (%eax), %eax ; \
|
||||
testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
|
||||
testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
|
||||
jz 9f ; /* not active */ \
|
||||
movl $0, _lapic+LA_EOI ; \
|
||||
movl $0, lapic+LA_EOI ; \
|
||||
9:
|
||||
|
||||
#else
|
||||
#define EOI_IRQ(irq_num) \
|
||||
testl $IRQ_BIT(irq_num), _lapic+LA_ISR1; \
|
||||
testl $IRQ_BIT(irq_num), lapic+LA_ISR1; \
|
||||
jz 9f ; /* not active */ \
|
||||
movl $0, _lapic+LA_EOI; \
|
||||
movl $0, lapic+LA_EOI; \
|
||||
9:
|
||||
#endif
|
||||
|
||||
@ -160,12 +160,12 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
|
||||
pushl $irq_num; /* pass the IRQ */ \
|
||||
sti ; \
|
||||
call _sched_ithd ; \
|
||||
call sched_ithd ; \
|
||||
addl $4, %esp ; /* discard the parameter */ \
|
||||
; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Handle "spurious INTerrupts".
|
||||
@ -176,8 +176,8 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
*/
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xspuriousint
|
||||
_Xspuriousint:
|
||||
.globl Xspuriousint
|
||||
Xspuriousint:
|
||||
|
||||
/* No EOI cycle used here */
|
||||
|
||||
@ -189,8 +189,8 @@ _Xspuriousint:
|
||||
*/
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xinvltlb
|
||||
_Xinvltlb:
|
||||
.globl Xinvltlb
|
||||
Xinvltlb:
|
||||
pushl %eax
|
||||
|
||||
#ifdef COUNT_XINVLTLB_HITS
|
||||
@ -207,7 +207,7 @@ _Xinvltlb:
|
||||
movl %eax, %cr3
|
||||
|
||||
ss /* stack segment, avoid %ds load */
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
popl %eax
|
||||
iret
|
||||
@ -229,11 +229,11 @@ _Xinvltlb:
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xcpucheckstate
|
||||
.globl _checkstate_cpustate
|
||||
.globl _checkstate_curproc
|
||||
.globl _checkstate_pc
|
||||
_Xcpucheckstate:
|
||||
.globl Xcpucheckstate
|
||||
.globl checkstate_cpustate
|
||||
.globl checkstate_curproc
|
||||
.globl checkstate_pc
|
||||
Xcpucheckstate:
|
||||
pushl %eax
|
||||
pushl %ebx
|
||||
pushl %ds /* save current data segment */
|
||||
@ -244,7 +244,7 @@ _Xcpucheckstate:
|
||||
movl $KPSEL, %eax
|
||||
mov %ax, %fs
|
||||
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
movl $0, %ebx
|
||||
movl 20(%esp), %eax
|
||||
@ -256,15 +256,15 @@ _Xcpucheckstate:
|
||||
incl %ebx /* system or interrupt */
|
||||
1:
|
||||
movl PCPU(CPUID), %eax
|
||||
movl %ebx, _checkstate_cpustate(,%eax,4)
|
||||
movl %ebx, checkstate_cpustate(,%eax,4)
|
||||
movl PCPU(CURPROC), %ebx
|
||||
movl %ebx, _checkstate_curproc(,%eax,4)
|
||||
movl %ebx, checkstate_curproc(,%eax,4)
|
||||
|
||||
movl 16(%esp), %ebx
|
||||
movl %ebx, _checkstate_pc(,%eax,4)
|
||||
movl %ebx, checkstate_pc(,%eax,4)
|
||||
|
||||
lock /* checkstate_probed_cpus |= (1<<id) */
|
||||
btsl %eax, _checkstate_probed_cpus
|
||||
btsl %eax, checkstate_probed_cpus
|
||||
|
||||
popl %fs
|
||||
popl %ds /* restore previous data segment */
|
||||
@ -284,8 +284,8 @@ _Xcpucheckstate:
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xcpuast
|
||||
_Xcpuast:
|
||||
.globl Xcpuast
|
||||
Xcpuast:
|
||||
PUSH_FRAME
|
||||
movl $KDSEL, %eax
|
||||
mov %ax, %ds /* use KERNEL data segment */
|
||||
@ -295,11 +295,11 @@ _Xcpuast:
|
||||
|
||||
movl PCPU(CPUID), %eax
|
||||
lock /* checkstate_need_ast &= ~(1<<id) */
|
||||
btrl %eax, _checkstate_need_ast
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
btrl %eax, checkstate_need_ast
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
lock
|
||||
btsl %eax, _checkstate_pending_ast
|
||||
btsl %eax, checkstate_pending_ast
|
||||
jc 1f
|
||||
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
@ -310,7 +310,7 @@ _Xcpuast:
|
||||
|
||||
movl PCPU(CPUID), %eax
|
||||
lock
|
||||
btrl %eax, _checkstate_pending_ast
|
||||
btrl %eax, checkstate_pending_ast
|
||||
lock
|
||||
btrl %eax, CNAME(resched_cpus)
|
||||
jnc 2f
|
||||
@ -322,7 +322,7 @@ _Xcpuast:
|
||||
lock
|
||||
incl CNAME(cpuast_cnt)
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
1:
|
||||
/* We are already in the process of delivering an ast for this CPU */
|
||||
POP_FRAME
|
||||
@ -338,8 +338,8 @@ _Xcpuast:
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xcpustop
|
||||
_Xcpustop:
|
||||
.globl Xcpustop
|
||||
Xcpustop:
|
||||
pushl %ebp
|
||||
movl %esp, %ebp
|
||||
pushl %eax
|
||||
@ -353,7 +353,7 @@ _Xcpustop:
|
||||
movl $KPSEL, %eax
|
||||
mov %ax, %fs
|
||||
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
movl PCPU(CPUID), %eax
|
||||
imull $PCB_SIZE, %eax
|
||||
@ -366,15 +366,15 @@ _Xcpustop:
|
||||
movl PCPU(CPUID), %eax
|
||||
|
||||
lock
|
||||
btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
|
||||
btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
|
||||
1:
|
||||
btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
|
||||
btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
|
||||
jnc 1b
|
||||
|
||||
lock
|
||||
btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
|
||||
btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
|
||||
lock
|
||||
btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
|
||||
btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
|
||||
|
||||
test %eax, %eax
|
||||
jnz 2f
|
||||
@ -472,8 +472,8 @@ MCOUNT_LABEL(eintr)
|
||||
*/
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xrendezvous
|
||||
_Xrendezvous:
|
||||
.globl Xrendezvous
|
||||
Xrendezvous:
|
||||
PUSH_FRAME
|
||||
movl $KDSEL, %eax
|
||||
mov %ax, %ds /* use KERNEL data segment */
|
||||
@ -481,9 +481,9 @@ _Xrendezvous:
|
||||
movl $KPSEL, %eax
|
||||
mov %ax, %fs
|
||||
|
||||
call _smp_rendezvous_action
|
||||
call smp_rendezvous_action
|
||||
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
POP_FRAME
|
||||
iret
|
||||
|
||||
@ -497,21 +497,21 @@ _xhits:
|
||||
#endif /* COUNT_XINVLTLB_HITS */
|
||||
|
||||
/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
|
||||
.globl _stopped_cpus, _started_cpus
|
||||
_stopped_cpus:
|
||||
.globl stopped_cpus, started_cpus
|
||||
stopped_cpus:
|
||||
.long 0
|
||||
_started_cpus:
|
||||
started_cpus:
|
||||
.long 0
|
||||
|
||||
#ifdef BETTER_CLOCK
|
||||
.globl _checkstate_probed_cpus
|
||||
_checkstate_probed_cpus:
|
||||
.globl checkstate_probed_cpus
|
||||
checkstate_probed_cpus:
|
||||
.long 0
|
||||
#endif /* BETTER_CLOCK */
|
||||
.globl _checkstate_need_ast
|
||||
_checkstate_need_ast:
|
||||
.globl checkstate_need_ast
|
||||
checkstate_need_ast:
|
||||
.long 0
|
||||
_checkstate_pending_ast:
|
||||
checkstate_pending_ast:
|
||||
.long 0
|
||||
.globl CNAME(resched_cpus)
|
||||
.globl CNAME(want_resched_cnt)
|
||||
@ -526,8 +526,8 @@ CNAME(cpuast_cnt):
|
||||
CNAME(cpustop_restartfunc):
|
||||
.long 0
|
||||
|
||||
.globl _apic_pin_trigger
|
||||
_apic_pin_trigger:
|
||||
.globl apic_pin_trigger
|
||||
apic_pin_trigger:
|
||||
.long 0
|
||||
|
||||
.text
|
||||
|
@ -56,12 +56,12 @@
|
||||
|
||||
.data
|
||||
|
||||
.globl _panic
|
||||
.globl panic
|
||||
|
||||
#if defined(SWTCH_OPTIM_STATS)
|
||||
.globl _swtch_optim_stats, _tlb_flush_count
|
||||
_swtch_optim_stats: .long 0 /* number of _swtch_optims */
|
||||
_tlb_flush_count: .long 0
|
||||
.globl swtch_optim_stats, tlb_flush_count
|
||||
swtch_optim_stats: .long 0 /* number of _swtch_optims */
|
||||
tlb_flush_count: .long 0
|
||||
#endif
|
||||
|
||||
.text
|
||||
@ -129,7 +129,7 @@ ENTRY(cpu_switch)
|
||||
jne 1f
|
||||
addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
|
||||
pushl %edx
|
||||
call _npxsave /* do it in a big C function */
|
||||
call npxsave /* do it in a big C function */
|
||||
popl %eax
|
||||
1:
|
||||
#endif /* DEV_NPX */
|
||||
@ -139,7 +139,7 @@ sw1:
|
||||
|
||||
#ifdef SMP
|
||||
/* Stop scheduling if smp_active goes zero and we are not BSP */
|
||||
cmpl $0,_smp_active
|
||||
cmpl $0,smp_active
|
||||
jne 1f
|
||||
cmpl $0,PCPU(CPUID)
|
||||
je 1f
|
||||
@ -154,7 +154,7 @@ sw1:
|
||||
* if it cannot find another process to run.
|
||||
*/
|
||||
sw1a:
|
||||
call _chooseproc /* trash ecx, edx, ret eax*/
|
||||
call chooseproc /* trash ecx, edx, ret eax*/
|
||||
|
||||
#ifdef INVARIANTS
|
||||
testl %eax,%eax /* no process? */
|
||||
@ -171,15 +171,15 @@ sw1b:
|
||||
movl P_ADDR(%ecx),%edx
|
||||
|
||||
#if defined(SWTCH_OPTIM_STATS)
|
||||
incl _swtch_optim_stats
|
||||
incl swtch_optim_stats
|
||||
#endif
|
||||
/* switch address space */
|
||||
movl %cr3,%ebx
|
||||
cmpl PCB_CR3(%edx),%ebx
|
||||
je 4f
|
||||
#if defined(SWTCH_OPTIM_STATS)
|
||||
decl _swtch_optim_stats
|
||||
incl _tlb_flush_count
|
||||
decl swtch_optim_stats
|
||||
incl tlb_flush_count
|
||||
#endif
|
||||
movl PCB_CR3(%edx),%ebx
|
||||
movl %ebx,%cr3
|
||||
@ -188,7 +188,7 @@ sw1b:
|
||||
movl PCPU(CPUID), %esi
|
||||
cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
|
||||
je 1f
|
||||
btsl %esi, _private_tss /* mark use of private tss */
|
||||
btsl %esi, private_tss /* mark use of private tss */
|
||||
movl PCB_EXT(%edx), %edi /* new tss descriptor */
|
||||
jmp 2f
|
||||
1:
|
||||
@ -198,7 +198,7 @@ sw1b:
|
||||
addl $(UPAGES * PAGE_SIZE - 16), %ebx
|
||||
movl %ebx, PCPU(COMMON_TSS) + TSS_ESP0
|
||||
|
||||
btrl %esi, _private_tss
|
||||
btrl %esi, private_tss
|
||||
jae 3f
|
||||
PCPU_ADDR(COMMON_TSSD, %edi)
|
||||
2:
|
||||
@ -227,9 +227,9 @@ sw1b:
|
||||
#ifdef SMP
|
||||
#ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
|
||||
#ifdef CHEAP_TPR
|
||||
movl $0, _lapic+LA_TPR
|
||||
movl $0, lapic+LA_TPR
|
||||
#else
|
||||
andl $~APIC_TPR_PRIO, _lapic+LA_TPR
|
||||
andl $~APIC_TPR_PRIO, lapic+LA_TPR
|
||||
#endif /** CHEAP_TPR */
|
||||
#endif /** GRAB_LOPRIO */
|
||||
#endif /* SMP */
|
||||
@ -242,14 +242,14 @@ sw1b:
|
||||
|
||||
cmpl $0, PCB_USERLDT(%edx)
|
||||
jnz 1f
|
||||
movl __default_ldt,%eax
|
||||
movl _default_ldt,%eax
|
||||
cmpl PCPU(CURRENTLDT),%eax
|
||||
je 2f
|
||||
lldt __default_ldt
|
||||
lldt _default_ldt
|
||||
movl %eax,PCPU(CURRENTLDT)
|
||||
jmp 2f
|
||||
1: pushl %edx
|
||||
call _set_user_ldt
|
||||
call set_user_ldt
|
||||
popl %edx
|
||||
2:
|
||||
|
||||
@ -282,13 +282,13 @@ CROSSJUMPTARGET(sw1a)
|
||||
#ifdef INVARIANTS
|
||||
badsw2:
|
||||
pushl $sw0_2
|
||||
call _panic
|
||||
call panic
|
||||
|
||||
sw0_2: .asciz "cpu_switch: not SRUN"
|
||||
|
||||
badsw3:
|
||||
pushl $sw0_3
|
||||
call _panic
|
||||
call panic
|
||||
|
||||
sw0_3: .asciz "cpu_switch: chooseproc returned NULL"
|
||||
#endif
|
||||
@ -337,7 +337,7 @@ ENTRY(savectx)
|
||||
leal PCB_SAVEFPU(%eax),%eax
|
||||
pushl %eax
|
||||
pushl %eax
|
||||
call _npxsave
|
||||
call npxsave
|
||||
addl $4,%esp
|
||||
popl %eax
|
||||
popl %ecx
|
||||
@ -346,7 +346,7 @@ ENTRY(savectx)
|
||||
leal PCB_SAVEFPU(%ecx),%ecx
|
||||
pushl %ecx
|
||||
pushl %eax
|
||||
call _bcopy
|
||||
call bcopy
|
||||
addl $12,%esp
|
||||
#endif /* DEV_NPX */
|
||||
|
||||
|
@ -81,18 +81,18 @@
|
||||
* On entry to a trap or interrupt WE DO NOT OWN THE MP LOCK. This means
|
||||
* that we must be careful in regards to accessing global variables. We
|
||||
* save (push) the current cpl (our software interrupt disable mask), call
|
||||
* the trap function, then call _doreti to restore the cpl and deal with
|
||||
* ASTs (software interrupts). _doreti will determine if the restoration
|
||||
* the trap function, then call doreti to restore the cpl and deal with
|
||||
* ASTs (software interrupts). doreti will determine if the restoration
|
||||
* of the cpl unmasked any pending interrupts and will issue those interrupts
|
||||
* synchronously prior to doing the iret.
|
||||
*
|
||||
* At the moment we must own the MP lock to do any cpl manipulation, which
|
||||
* means we must own it prior to calling _doreti. The syscall case attempts
|
||||
* means we must own it prior to calling doreti. The syscall case attempts
|
||||
* to avoid this by handling a reduced set of cases itself and iret'ing.
|
||||
*/
|
||||
#define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(_X,name); \
|
||||
.type __CONCAT(_X,name),@function; __CONCAT(_X,name):
|
||||
#define TRAP(a) pushl $(a) ; jmp _alltraps
|
||||
#define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(X,name); \
|
||||
.type __CONCAT(X,name),@function; __CONCAT(X,name):
|
||||
#define TRAP(a) pushl $(a) ; jmp alltraps
|
||||
|
||||
#ifdef BDE_DEBUGGER
|
||||
#define BDBTRAP(name) \
|
||||
@ -171,14 +171,14 @@ IDTVEC(fpu)
|
||||
mov %ax,%fs
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
|
||||
MPLOCKED incl _cnt+V_TRAP
|
||||
MPLOCKED incl cnt+V_TRAP
|
||||
pushl $0 /* dummy unit to finish intr frame */
|
||||
|
||||
call _npx_intr
|
||||
call npx_intr
|
||||
|
||||
addl $4,%esp
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
#else /* DEV_NPX */
|
||||
pushl $0; TRAP(T_ARITHTRAP)
|
||||
#endif /* DEV_NPX */
|
||||
@ -187,16 +187,16 @@ IDTVEC(align)
|
||||
TRAP(T_ALIGNFLT)
|
||||
|
||||
/*
|
||||
* _alltraps entry point. Interrupts are enabled if this was a trap
|
||||
* alltraps entry point. Interrupts are enabled if this was a trap
|
||||
* gate (TGT), else disabled if this was an interrupt gate (IGT).
|
||||
* Note that int0x80_syscall is a trap gate. Only page faults
|
||||
* use an interrupt gate.
|
||||
*/
|
||||
|
||||
SUPERALIGN_TEXT
|
||||
.globl _alltraps
|
||||
.type _alltraps,@function
|
||||
_alltraps:
|
||||
.globl alltraps
|
||||
.type alltraps,@function
|
||||
alltraps:
|
||||
pushal
|
||||
pushl %ds
|
||||
pushl %es
|
||||
@ -209,14 +209,14 @@ alltraps_with_regs_pushed:
|
||||
mov %ax,%fs
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
calltrap:
|
||||
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
|
||||
call _trap
|
||||
FAKE_MCOUNT(btrap) /* init "from" btrap -> calltrap */
|
||||
call trap
|
||||
|
||||
/*
|
||||
* Return via _doreti to handle ASTs.
|
||||
* Return via doreti to handle ASTs.
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* SYSCALL CALL GATE (old entry point for a.out binaries)
|
||||
@ -265,27 +265,27 @@ syscall_with_err_pushed:
|
||||
mov $KPSEL,%ax
|
||||
mov %ax,%fs
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
call _syscall
|
||||
call syscall
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
ENTRY(fork_trampoline)
|
||||
pushl %esp /* trapframe pointer */
|
||||
pushl %ebx /* arg1 */
|
||||
pushl %esi /* function */
|
||||
call _fork_exit
|
||||
call fork_exit
|
||||
addl $12,%esp
|
||||
/* cut from syscall */
|
||||
|
||||
/*
|
||||
* Return via _doreti to handle ASTs.
|
||||
* Return via doreti to handle ASTs.
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
|
||||
/*
|
||||
* Include vm86 call routines, which want to call _doreti.
|
||||
* Include vm86 call routines, which want to call doreti.
|
||||
*/
|
||||
#include "i386/i386/vm86bios.s"
|
||||
|
||||
|
@ -81,18 +81,18 @@
|
||||
* On entry to a trap or interrupt WE DO NOT OWN THE MP LOCK. This means
|
||||
* that we must be careful in regards to accessing global variables. We
|
||||
* save (push) the current cpl (our software interrupt disable mask), call
|
||||
* the trap function, then call _doreti to restore the cpl and deal with
|
||||
* ASTs (software interrupts). _doreti will determine if the restoration
|
||||
* the trap function, then call doreti to restore the cpl and deal with
|
||||
* ASTs (software interrupts). doreti will determine if the restoration
|
||||
* of the cpl unmasked any pending interrupts and will issue those interrupts
|
||||
* synchronously prior to doing the iret.
|
||||
*
|
||||
* At the moment we must own the MP lock to do any cpl manipulation, which
|
||||
* means we must own it prior to calling _doreti. The syscall case attempts
|
||||
* means we must own it prior to calling doreti. The syscall case attempts
|
||||
* to avoid this by handling a reduced set of cases itself and iret'ing.
|
||||
*/
|
||||
#define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(_X,name); \
|
||||
.type __CONCAT(_X,name),@function; __CONCAT(_X,name):
|
||||
#define TRAP(a) pushl $(a) ; jmp _alltraps
|
||||
#define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(X,name); \
|
||||
.type __CONCAT(X,name),@function; __CONCAT(X,name):
|
||||
#define TRAP(a) pushl $(a) ; jmp alltraps
|
||||
|
||||
#ifdef BDE_DEBUGGER
|
||||
#define BDBTRAP(name) \
|
||||
@ -171,14 +171,14 @@ IDTVEC(fpu)
|
||||
mov %ax,%fs
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
|
||||
MPLOCKED incl _cnt+V_TRAP
|
||||
MPLOCKED incl cnt+V_TRAP
|
||||
pushl $0 /* dummy unit to finish intr frame */
|
||||
|
||||
call _npx_intr
|
||||
call npx_intr
|
||||
|
||||
addl $4,%esp
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
#else /* DEV_NPX */
|
||||
pushl $0; TRAP(T_ARITHTRAP)
|
||||
#endif /* DEV_NPX */
|
||||
@ -187,16 +187,16 @@ IDTVEC(align)
|
||||
TRAP(T_ALIGNFLT)
|
||||
|
||||
/*
|
||||
* _alltraps entry point. Interrupts are enabled if this was a trap
|
||||
* alltraps entry point. Interrupts are enabled if this was a trap
|
||||
* gate (TGT), else disabled if this was an interrupt gate (IGT).
|
||||
* Note that int0x80_syscall is a trap gate. Only page faults
|
||||
* use an interrupt gate.
|
||||
*/
|
||||
|
||||
SUPERALIGN_TEXT
|
||||
.globl _alltraps
|
||||
.type _alltraps,@function
|
||||
_alltraps:
|
||||
.globl alltraps
|
||||
.type alltraps,@function
|
||||
alltraps:
|
||||
pushal
|
||||
pushl %ds
|
||||
pushl %es
|
||||
@ -209,14 +209,14 @@ alltraps_with_regs_pushed:
|
||||
mov %ax,%fs
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
calltrap:
|
||||
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
|
||||
call _trap
|
||||
FAKE_MCOUNT(btrap) /* init "from" btrap -> calltrap */
|
||||
call trap
|
||||
|
||||
/*
|
||||
* Return via _doreti to handle ASTs.
|
||||
* Return via doreti to handle ASTs.
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* SYSCALL CALL GATE (old entry point for a.out binaries)
|
||||
@ -265,27 +265,27 @@ syscall_with_err_pushed:
|
||||
mov $KPSEL,%ax
|
||||
mov %ax,%fs
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
call _syscall
|
||||
call syscall
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
ENTRY(fork_trampoline)
|
||||
pushl %esp /* trapframe pointer */
|
||||
pushl %ebx /* arg1 */
|
||||
pushl %esi /* function */
|
||||
call _fork_exit
|
||||
call fork_exit
|
||||
addl $12,%esp
|
||||
/* cut from syscall */
|
||||
|
||||
/*
|
||||
* Return via _doreti to handle ASTs.
|
||||
* Return via doreti to handle ASTs.
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
|
||||
/*
|
||||
* Include vm86 call routines, which want to call _doreti.
|
||||
* Include vm86 call routines, which want to call doreti.
|
||||
*/
|
||||
#include "i386/i386/vm86bios.s"
|
||||
|
||||
|
@ -69,19 +69,19 @@
|
||||
* PTmap is recursive pagemap at top of virtual address space.
|
||||
* Within PTmap, the page directory can be found (third indirection).
|
||||
*/
|
||||
.globl _PTmap,_PTD,_PTDpde
|
||||
.set _PTmap,(PTDPTDI << PDRSHIFT)
|
||||
.set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
|
||||
.set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
|
||||
.globl PTmap,PTD,PTDpde
|
||||
.set PTmap,(PTDPTDI << PDRSHIFT)
|
||||
.set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
|
||||
.set PTDpde,PTD + (PTDPTDI * PDESIZE)
|
||||
|
||||
/*
|
||||
* APTmap, APTD is the alternate recursive pagemap.
|
||||
* It's used when modifying another process's page tables.
|
||||
*/
|
||||
.globl _APTmap,_APTD,_APTDpde
|
||||
.set _APTmap,APTDPTDI << PDRSHIFT
|
||||
.set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
|
||||
.set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
|
||||
.globl APTmap,APTD,APTDpde
|
||||
.set APTmap,APTDPTDI << PDRSHIFT
|
||||
.set APTD,APTmap + (APTDPTDI * PAGE_SIZE)
|
||||
.set APTDpde,PTD + (APTDPTDI * PDESIZE)
|
||||
|
||||
#ifdef SMP
|
||||
/*
|
||||
@ -89,9 +89,9 @@
|
||||
* This is "constructed" in locore.s on the BSP and in mp_machdep.c
|
||||
* for each AP. DO NOT REORDER THESE WITHOUT UPDATING THE REST!
|
||||
*/
|
||||
.globl _SMP_prvspace, _lapic
|
||||
.set _SMP_prvspace,(MPPTDI << PDRSHIFT)
|
||||
.set _lapic,_SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
|
||||
.globl SMP_prvspace, lapic
|
||||
.set SMP_prvspace,(MPPTDI << PDRSHIFT)
|
||||
.set lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
|
||||
#endif /* SMP */
|
||||
|
||||
/*
|
||||
@ -104,48 +104,48 @@
|
||||
.space 0x2000 /* space for tmpstk - temporary stack */
|
||||
HIDENAME(tmpstk):
|
||||
|
||||
.globl _boothowto,_bootdev
|
||||
.globl boothowto,bootdev
|
||||
|
||||
.globl _cpu,_cpu_vendor,_cpu_id,_bootinfo
|
||||
.globl _cpu_high, _cpu_feature
|
||||
.globl cpu,cpu_vendor,cpu_id,bootinfo
|
||||
.globl cpu_high, cpu_feature
|
||||
|
||||
_cpu: .long 0 /* are we 386, 386sx, or 486 */
|
||||
_cpu_id: .long 0 /* stepping ID */
|
||||
_cpu_high: .long 0 /* highest arg to CPUID */
|
||||
_cpu_feature: .long 0 /* features */
|
||||
_cpu_vendor: .space 20 /* CPU origin code */
|
||||
_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
|
||||
cpu: .long 0 /* are we 386, 386sx, or 486 */
|
||||
cpu_id: .long 0 /* stepping ID */
|
||||
cpu_high: .long 0 /* highest arg to CPUID */
|
||||
cpu_feature: .long 0 /* features */
|
||||
cpu_vendor: .space 20 /* CPU origin code */
|
||||
bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
|
||||
|
||||
_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
|
||||
KERNend: .long 0 /* phys addr end of kernel (just after bss) */
|
||||
physfree: .long 0 /* phys addr of next free page */
|
||||
|
||||
#ifdef SMP
|
||||
.globl _cpu0prvpage
|
||||
.globl cpu0prvpage
|
||||
cpu0pp: .long 0 /* phys addr cpu0 private pg */
|
||||
_cpu0prvpage: .long 0 /* relocated version */
|
||||
cpu0prvpage: .long 0 /* relocated version */
|
||||
|
||||
.globl _SMPpt
|
||||
.globl SMPpt
|
||||
SMPptpa: .long 0 /* phys addr SMP page table */
|
||||
_SMPpt: .long 0 /* relocated version */
|
||||
SMPpt: .long 0 /* relocated version */
|
||||
#endif /* SMP */
|
||||
|
||||
.globl _IdlePTD
|
||||
_IdlePTD: .long 0 /* phys addr of kernel PTD */
|
||||
.globl IdlePTD
|
||||
IdlePTD: .long 0 /* phys addr of kernel PTD */
|
||||
|
||||
#ifdef SMP
|
||||
.globl _KPTphys
|
||||
.globl KPTphys
|
||||
#endif
|
||||
_KPTphys: .long 0 /* phys addr of kernel page tables */
|
||||
KPTphys: .long 0 /* phys addr of kernel page tables */
|
||||
|
||||
.globl _proc0paddr
|
||||
_proc0paddr: .long 0 /* address of proc 0 address space */
|
||||
.globl proc0paddr
|
||||
proc0paddr: .long 0 /* address of proc 0 address space */
|
||||
p0upa: .long 0 /* phys addr of proc0's UPAGES */
|
||||
|
||||
vm86phystk: .long 0 /* PA of vm86/bios stack */
|
||||
|
||||
.globl _vm86paddr, _vm86pa
|
||||
_vm86paddr: .long 0 /* address of vm86 region */
|
||||
_vm86pa: .long 0 /* phys addr of vm86 region */
|
||||
.globl vm86paddr, vm86pa
|
||||
vm86paddr: .long 0 /* address of vm86 region */
|
||||
vm86pa: .long 0 /* phys addr of vm86 region */
|
||||
|
||||
#ifdef BDE_DEBUGGER
|
||||
.globl _bdb_exists /* flag to indicate BDE debugger is present */
|
||||
@ -153,8 +153,8 @@ _bdb_exists: .long 0
|
||||
#endif
|
||||
|
||||
#ifdef PC98
|
||||
.globl _pc98_system_parameter
|
||||
_pc98_system_parameter:
|
||||
.globl pc98_system_parameter
|
||||
pc98_system_parameter:
|
||||
.space 0x240
|
||||
#endif
|
||||
|
||||
@ -205,7 +205,7 @@ _pc98_system_parameter:
|
||||
#define fillkptphys(prot) \
|
||||
movl %eax, %ebx ; \
|
||||
shrl $PAGE_SHIFT, %ebx ; \
|
||||
fillkpt(R(_KPTphys), prot)
|
||||
fillkpt(R(KPTphys), prot)
|
||||
|
||||
.text
|
||||
/**********************************************************************
|
||||
@ -218,7 +218,7 @@ NON_GPROF_ENTRY(btext)
|
||||
#ifdef PC98
|
||||
/* save SYSTEM PARAMETER for resume (NS/T or other) */
|
||||
movl $0xa1400,%esi
|
||||
movl $R(_pc98_system_parameter),%edi
|
||||
movl $R(pc98_system_parameter),%edi
|
||||
movl $0x0240,%ecx
|
||||
cld
|
||||
rep
|
||||
@ -266,10 +266,10 @@ NON_GPROF_ENTRY(btext)
|
||||
|
||||
#ifdef PC98
|
||||
/* pc98_machine_type & M_EPSON_PC98 */
|
||||
testb $0x02,R(_pc98_system_parameter)+220
|
||||
testb $0x02,R(pc98_system_parameter)+220
|
||||
jz 3f
|
||||
/* epson_machine_id <= 0x0b */
|
||||
cmpb $0x0b,R(_pc98_system_parameter)+224
|
||||
cmpb $0x0b,R(pc98_system_parameter)+224
|
||||
ja 3f
|
||||
|
||||
/* count up memory */
|
||||
@ -284,11 +284,11 @@ NON_GPROF_ENTRY(btext)
|
||||
loop 1b
|
||||
2: subl $0x100000,%eax
|
||||
shrl $17,%eax
|
||||
movb %al,R(_pc98_system_parameter)+1
|
||||
movb %al,R(pc98_system_parameter)+1
|
||||
3:
|
||||
|
||||
movw R(_pc98_system_parameter+0x86),%ax
|
||||
movw %ax,R(_cpu_id)
|
||||
movw R(pc98_system_parameter+0x86),%ax
|
||||
movw %ax,R(cpu_id)
|
||||
#endif
|
||||
|
||||
call identify_cpu
|
||||
@ -309,8 +309,8 @@ NON_GPROF_ENTRY(btext)
|
||||
* are above 1MB to keep the gdt and idt away from the bss and page
|
||||
* tables. The idt is only used if BDE_DEBUGGER is enabled.
|
||||
*/
|
||||
movl $R(_end),%ecx
|
||||
movl $R(_edata),%edi
|
||||
movl $R(end),%ecx
|
||||
movl $R(edata),%edi
|
||||
subl %edi,%ecx
|
||||
xorl %eax,%eax
|
||||
cld
|
||||
@ -322,7 +322,7 @@ NON_GPROF_ENTRY(btext)
|
||||
/*
|
||||
* If the CPU has support for VME, turn it on.
|
||||
*/
|
||||
testl $CPUID_VME, R(_cpu_feature)
|
||||
testl $CPUID_VME, R(cpu_feature)
|
||||
jz 1f
|
||||
movl %cr4, %eax
|
||||
orl $CR4_VME, %eax
|
||||
@ -338,7 +338,7 @@ NON_GPROF_ENTRY(btext)
|
||||
#endif
|
||||
|
||||
/* Now enable paging */
|
||||
movl R(_IdlePTD), %eax
|
||||
movl R(IdlePTD), %eax
|
||||
movl %eax,%cr3 /* load ptd addr into mmu */
|
||||
movl %cr0,%eax /* get control word */
|
||||
orl $CR0_PE|CR0_PG,%eax /* enable paging */
|
||||
@ -359,16 +359,16 @@ NON_GPROF_ENTRY(btext)
|
||||
/* now running relocated at KERNBASE where the system is linked to run */
|
||||
begin:
|
||||
/* set up bootstrap stack */
|
||||
movl _proc0paddr,%eax /* location of in-kernel pages */
|
||||
movl proc0paddr,%eax /* location of in-kernel pages */
|
||||
leal UPAGES*PAGE_SIZE(%eax),%esp /* bootstrap stack end location */
|
||||
|
||||
xorl %ebp,%ebp /* mark end of frames */
|
||||
|
||||
movl _IdlePTD,%esi
|
||||
movl IdlePTD,%esi
|
||||
movl %esi,PCB_CR3(%eax)
|
||||
|
||||
pushl physfree /* value of first for init386(first) */
|
||||
call _init386 /* wire 386 chip for unix operation */
|
||||
call init386 /* wire 386 chip for unix operation */
|
||||
|
||||
/*
|
||||
* Clean up the stack in a way that db_numargs() understands, so
|
||||
@ -377,7 +377,7 @@ begin:
|
||||
*/
|
||||
addl $4,%esp
|
||||
|
||||
call _mi_startup /* autoconfiguration, mountroot etc */
|
||||
call mi_startup /* autoconfiguration, mountroot etc */
|
||||
/* NOTREACHED */
|
||||
addl $0,%esp /* for db_numargs() again */
|
||||
|
||||
@ -398,7 +398,7 @@ NON_GPROF_ENTRY(sigcode)
|
||||
0: jmp 0b
|
||||
|
||||
ALIGN_TEXT
|
||||
_osigcode:
|
||||
osigcode:
|
||||
call *SIGF_HANDLER(%esp) /* call signal handler */
|
||||
lea SIGF_SC(%esp),%eax /* get sigcontext */
|
||||
pushl %eax
|
||||
@ -413,14 +413,14 @@ _osigcode:
|
||||
0: jmp 0b
|
||||
|
||||
ALIGN_TEXT
|
||||
_esigcode:
|
||||
esigcode:
|
||||
|
||||
.data
|
||||
.globl _szsigcode, _szosigcode
|
||||
_szsigcode:
|
||||
.long _esigcode-_sigcode
|
||||
_szosigcode:
|
||||
.long _esigcode-_osigcode
|
||||
.globl szsigcode, szosigcode
|
||||
szsigcode:
|
||||
.long esigcode-sigcode
|
||||
szosigcode:
|
||||
.long esigcode-osigcode
|
||||
.text
|
||||
|
||||
/**********************************************************************
|
||||
@ -507,7 +507,7 @@ newboot:
|
||||
cmpl $0,%esi
|
||||
je 2f /* No kernelname */
|
||||
movl $MAXPATHLEN,%ecx /* Brute force!!! */
|
||||
movl $R(_kernelname),%edi
|
||||
movl $R(kernelname),%edi
|
||||
cmpb $'/',(%esi) /* Make sure it starts with a slash */
|
||||
je 1f
|
||||
movb $'/',(%edi)
|
||||
@ -535,7 +535,7 @@ got_bi_size:
|
||||
* Copy the common part of the bootinfo struct
|
||||
*/
|
||||
movl %ebx,%esi
|
||||
movl $R(_bootinfo),%edi
|
||||
movl $R(bootinfo),%edi
|
||||
cmpl $BOOTINFO_SIZE,%ecx
|
||||
jbe got_common_bi_size
|
||||
movl $BOOTINFO_SIZE,%ecx
|
||||
@ -552,12 +552,12 @@ got_common_bi_size:
|
||||
movl BI_NFS_DISKLESS(%ebx),%esi
|
||||
cmpl $0,%esi
|
||||
je olddiskboot
|
||||
movl $R(_nfs_diskless),%edi
|
||||
movl $R(nfs_diskless),%edi
|
||||
movl $NFSDISKLESS_SIZE,%ecx
|
||||
cld
|
||||
rep
|
||||
movsb
|
||||
movl $R(_nfs_diskless_valid),%edi
|
||||
movl $R(nfs_diskless_valid),%edi
|
||||
movl $1,(%edi)
|
||||
#endif
|
||||
#endif
|
||||
@ -570,9 +570,9 @@ got_common_bi_size:
|
||||
*/
|
||||
olddiskboot:
|
||||
movl 8(%ebp),%eax
|
||||
movl %eax,R(_boothowto)
|
||||
movl %eax,R(boothowto)
|
||||
movl 12(%ebp),%eax
|
||||
movl %eax,R(_bootdev)
|
||||
movl %eax,R(bootdev)
|
||||
|
||||
ret
|
||||
|
||||
@ -610,16 +610,16 @@ identify_cpu:
|
||||
divl %ecx
|
||||
jz trynexgen
|
||||
popfl
|
||||
movl $CPU_386,R(_cpu)
|
||||
movl $CPU_386,R(cpu)
|
||||
jmp 3f
|
||||
|
||||
trynexgen:
|
||||
popfl
|
||||
movl $CPU_NX586,R(_cpu)
|
||||
movl $0x4778654e,R(_cpu_vendor) # store vendor string
|
||||
movl $0x72446e65,R(_cpu_vendor+4)
|
||||
movl $0x6e657669,R(_cpu_vendor+8)
|
||||
movl $0,R(_cpu_vendor+12)
|
||||
movl $CPU_NX586,R(cpu)
|
||||
movl $0x4778654e,R(cpu_vendor) # store vendor string
|
||||
movl $0x72446e65,R(cpu_vendor+4)
|
||||
movl $0x6e657669,R(cpu_vendor+8)
|
||||
movl $0,R(cpu_vendor+12)
|
||||
jmp 3f
|
||||
|
||||
try486: /* Try to toggle identification flag; does not exist on early 486s. */
|
||||
@ -638,7 +638,7 @@ try486: /* Try to toggle identification flag; does not exist on early 486s. */
|
||||
|
||||
testl %eax,%eax
|
||||
jnz trycpuid
|
||||
movl $CPU_486,R(_cpu)
|
||||
movl $CPU_486,R(cpu)
|
||||
|
||||
/*
|
||||
* Check Cyrix CPU
|
||||
@ -665,41 +665,41 @@ trycyrix:
|
||||
* CPU, we couldn't distinguish it from Cyrix's (including IBM
|
||||
* brand of Cyrix CPUs).
|
||||
*/
|
||||
movl $0x69727943,R(_cpu_vendor) # store vendor string
|
||||
movl $0x736e4978,R(_cpu_vendor+4)
|
||||
movl $0x64616574,R(_cpu_vendor+8)
|
||||
movl $0x69727943,R(cpu_vendor) # store vendor string
|
||||
movl $0x736e4978,R(cpu_vendor+4)
|
||||
movl $0x64616574,R(cpu_vendor+8)
|
||||
jmp 3f
|
||||
|
||||
trycpuid: /* Use the `cpuid' instruction. */
|
||||
xorl %eax,%eax
|
||||
cpuid # cpuid 0
|
||||
movl %eax,R(_cpu_high) # highest capability
|
||||
movl %ebx,R(_cpu_vendor) # store vendor string
|
||||
movl %edx,R(_cpu_vendor+4)
|
||||
movl %ecx,R(_cpu_vendor+8)
|
||||
movb $0,R(_cpu_vendor+12)
|
||||
movl %eax,R(cpu_high) # highest capability
|
||||
movl %ebx,R(cpu_vendor) # store vendor string
|
||||
movl %edx,R(cpu_vendor+4)
|
||||
movl %ecx,R(cpu_vendor+8)
|
||||
movb $0,R(cpu_vendor+12)
|
||||
|
||||
movl $1,%eax
|
||||
cpuid # cpuid 1
|
||||
movl %eax,R(_cpu_id) # store cpu_id
|
||||
movl %edx,R(_cpu_feature) # store cpu_feature
|
||||
movl %eax,R(cpu_id) # store cpu_id
|
||||
movl %edx,R(cpu_feature) # store cpu_feature
|
||||
rorl $8,%eax # extract family type
|
||||
andl $15,%eax
|
||||
cmpl $5,%eax
|
||||
jae 1f
|
||||
|
||||
/* less than Pentium; must be 486 */
|
||||
movl $CPU_486,R(_cpu)
|
||||
movl $CPU_486,R(cpu)
|
||||
jmp 3f
|
||||
1:
|
||||
/* a Pentium? */
|
||||
cmpl $5,%eax
|
||||
jne 2f
|
||||
movl $CPU_586,R(_cpu)
|
||||
movl $CPU_586,R(cpu)
|
||||
jmp 3f
|
||||
2:
|
||||
/* Greater than Pentium...call it a Pentium Pro */
|
||||
movl $CPU_686,R(_cpu)
|
||||
movl $CPU_686,R(cpu)
|
||||
3:
|
||||
ret
|
||||
|
||||
@ -712,7 +712,7 @@ trycpuid: /* Use the `cpuid' instruction. */
|
||||
|
||||
create_pagetables:
|
||||
|
||||
testl $CPUID_PGE, R(_cpu_feature)
|
||||
testl $CPUID_PGE, R(cpu_feature)
|
||||
jz 1f
|
||||
movl %cr4, %eax
|
||||
orl $CR4_PGE, %eax
|
||||
@ -723,17 +723,17 @@ create_pagetables:
|
||||
movl $R(_end),%esi
|
||||
|
||||
/* Include symbols, if any. */
|
||||
movl R(_bootinfo+BI_ESYMTAB),%edi
|
||||
movl R(bootinfo+BI_ESYMTAB),%edi
|
||||
testl %edi,%edi
|
||||
je over_symalloc
|
||||
movl %edi,%esi
|
||||
movl $KERNBASE,%edi
|
||||
addl %edi,R(_bootinfo+BI_SYMTAB)
|
||||
addl %edi,R(_bootinfo+BI_ESYMTAB)
|
||||
addl %edi,R(bootinfo+BI_SYMTAB)
|
||||
addl %edi,R(bootinfo+BI_ESYMTAB)
|
||||
over_symalloc:
|
||||
|
||||
/* If we are told where the end of the kernel space is, believe it. */
|
||||
movl R(_bootinfo+BI_KERNEND),%edi
|
||||
movl R(bootinfo+BI_KERNEND),%edi
|
||||
testl %edi,%edi
|
||||
je no_kernend
|
||||
movl %edi,%esi
|
||||
@ -741,43 +741,43 @@ no_kernend:
|
||||
|
||||
addl $PAGE_MASK,%esi
|
||||
andl $~PAGE_MASK,%esi
|
||||
movl %esi,R(_KERNend) /* save end of kernel */
|
||||
movl %esi,R(KERNend) /* save end of kernel */
|
||||
movl %esi,R(physfree) /* next free page is at end of kernel */
|
||||
|
||||
/* Allocate Kernel Page Tables */
|
||||
ALLOCPAGES(NKPT)
|
||||
movl %esi,R(_KPTphys)
|
||||
movl %esi,R(KPTphys)
|
||||
|
||||
/* Allocate Page Table Directory */
|
||||
ALLOCPAGES(1)
|
||||
movl %esi,R(_IdlePTD)
|
||||
movl %esi,R(IdlePTD)
|
||||
|
||||
/* Allocate UPAGES */
|
||||
ALLOCPAGES(UPAGES)
|
||||
movl %esi,R(p0upa)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_proc0paddr)
|
||||
movl %esi, R(proc0paddr)
|
||||
|
||||
ALLOCPAGES(1) /* vm86/bios stack */
|
||||
movl %esi,R(vm86phystk)
|
||||
|
||||
ALLOCPAGES(3) /* pgtable + ext + IOPAGES */
|
||||
movl %esi,R(_vm86pa)
|
||||
movl %esi,R(vm86pa)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_vm86paddr)
|
||||
movl %esi, R(vm86paddr)
|
||||
|
||||
#ifdef SMP
|
||||
/* Allocate cpu0's private data page */
|
||||
ALLOCPAGES(1)
|
||||
movl %esi,R(cpu0pp)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_cpu0prvpage) /* relocated to KVM space */
|
||||
movl %esi, R(cpu0prvpage) /* relocated to KVM space */
|
||||
|
||||
/* Allocate SMP page table page */
|
||||
ALLOCPAGES(1)
|
||||
movl %esi,R(SMPptpa)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_SMPpt) /* relocated to KVM space */
|
||||
movl %esi, R(SMPpt) /* relocated to KVM space */
|
||||
#endif /* SMP */
|
||||
|
||||
/* Map read-only from zero to the end of the kernel text section */
|
||||
@ -790,35 +790,35 @@ no_kernend:
|
||||
xorl %edx,%edx
|
||||
|
||||
#if !defined(SMP)
|
||||
testl $CPUID_PGE, R(_cpu_feature)
|
||||
testl $CPUID_PGE, R(cpu_feature)
|
||||
jz 2f
|
||||
orl $PG_G,%edx
|
||||
#endif
|
||||
|
||||
2: movl $R(_etext),%ecx
|
||||
2: movl $R(etext),%ecx
|
||||
addl $PAGE_MASK,%ecx
|
||||
shrl $PAGE_SHIFT,%ecx
|
||||
fillkptphys(%edx)
|
||||
|
||||
/* Map read-write, data, bss and symbols */
|
||||
movl $R(_etext),%eax
|
||||
movl $R(etext),%eax
|
||||
addl $PAGE_MASK, %eax
|
||||
andl $~PAGE_MASK, %eax
|
||||
map_read_write:
|
||||
movl $PG_RW,%edx
|
||||
#if !defined(SMP)
|
||||
testl $CPUID_PGE, R(_cpu_feature)
|
||||
testl $CPUID_PGE, R(cpu_feature)
|
||||
jz 1f
|
||||
orl $PG_G,%edx
|
||||
#endif
|
||||
|
||||
1: movl R(_KERNend),%ecx
|
||||
1: movl R(KERNend),%ecx
|
||||
subl %eax,%ecx
|
||||
shrl $PAGE_SHIFT,%ecx
|
||||
fillkptphys(%edx)
|
||||
|
||||
/* Map page directory. */
|
||||
movl R(_IdlePTD), %eax
|
||||
movl R(IdlePTD), %eax
|
||||
movl $1, %ecx
|
||||
fillkptphys($PG_RW)
|
||||
|
||||
@ -841,13 +841,13 @@ map_read_write:
|
||||
movl $0, %eax
|
||||
movl $0, %ebx
|
||||
movl $1, %ecx
|
||||
fillkpt(R(_vm86pa), $PG_RW|PG_U)
|
||||
fillkpt(R(vm86pa), $PG_RW|PG_U)
|
||||
|
||||
/* ...likewise for the ISA hole */
|
||||
movl $ISA_HOLE_START, %eax
|
||||
movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
|
||||
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
|
||||
fillkpt(R(_vm86pa), $PG_RW|PG_U)
|
||||
fillkpt(R(vm86pa), $PG_RW|PG_U)
|
||||
|
||||
#ifdef SMP
|
||||
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
|
||||
@ -870,7 +870,7 @@ map_read_write:
|
||||
movl R(SMPptpa), %eax
|
||||
movl $MPPTDI, %ebx
|
||||
movl $1, %ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
/* Fakeup VA for the local apic to allow early traps. */
|
||||
ALLOCPAGES(1)
|
||||
@ -881,22 +881,22 @@ map_read_write:
|
||||
#endif /* SMP */
|
||||
|
||||
/* install a pde for temporary double map of bottom of VA */
|
||||
movl R(_KPTphys), %eax
|
||||
movl R(KPTphys), %eax
|
||||
xorl %ebx, %ebx
|
||||
movl $1, %ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
/* install pde's for pt's */
|
||||
movl R(_KPTphys), %eax
|
||||
movl R(KPTphys), %eax
|
||||
movl $KPTDI, %ebx
|
||||
movl $NKPT, %ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
/* install a pde recursively mapping page directory as a page table */
|
||||
movl R(_IdlePTD), %eax
|
||||
movl R(IdlePTD), %eax
|
||||
movl $PTDPTDI, %ebx
|
||||
movl $1,%ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
ret
|
||||
|
||||
@ -957,7 +957,7 @@ bdb_commit_paging:
|
||||
cmpl $0,_bdb_exists
|
||||
je bdb_commit_paging_exit
|
||||
|
||||
movl $_gdt+8*9,%eax /* adjust slots 9-17 */
|
||||
movl $gdt+8*9,%eax /* adjust slots 9-17 */
|
||||
movl $9,%ecx
|
||||
reloc_gdt:
|
||||
movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */
|
||||
|
@ -69,19 +69,19 @@
|
||||
* PTmap is recursive pagemap at top of virtual address space.
|
||||
* Within PTmap, the page directory can be found (third indirection).
|
||||
*/
|
||||
.globl _PTmap,_PTD,_PTDpde
|
||||
.set _PTmap,(PTDPTDI << PDRSHIFT)
|
||||
.set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
|
||||
.set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
|
||||
.globl PTmap,PTD,PTDpde
|
||||
.set PTmap,(PTDPTDI << PDRSHIFT)
|
||||
.set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
|
||||
.set PTDpde,PTD + (PTDPTDI * PDESIZE)
|
||||
|
||||
/*
|
||||
* APTmap, APTD is the alternate recursive pagemap.
|
||||
* It's used when modifying another process's page tables.
|
||||
*/
|
||||
.globl _APTmap,_APTD,_APTDpde
|
||||
.set _APTmap,APTDPTDI << PDRSHIFT
|
||||
.set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
|
||||
.set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
|
||||
.globl APTmap,APTD,APTDpde
|
||||
.set APTmap,APTDPTDI << PDRSHIFT
|
||||
.set APTD,APTmap + (APTDPTDI * PAGE_SIZE)
|
||||
.set APTDpde,PTD + (APTDPTDI * PDESIZE)
|
||||
|
||||
#ifdef SMP
|
||||
/*
|
||||
@ -89,9 +89,9 @@
|
||||
* This is "constructed" in locore.s on the BSP and in mp_machdep.c
|
||||
* for each AP. DO NOT REORDER THESE WITHOUT UPDATING THE REST!
|
||||
*/
|
||||
.globl _SMP_prvspace, _lapic
|
||||
.set _SMP_prvspace,(MPPTDI << PDRSHIFT)
|
||||
.set _lapic,_SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
|
||||
.globl SMP_prvspace, lapic
|
||||
.set SMP_prvspace,(MPPTDI << PDRSHIFT)
|
||||
.set lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
|
||||
#endif /* SMP */
|
||||
|
||||
/*
|
||||
@ -104,48 +104,48 @@
|
||||
.space 0x2000 /* space for tmpstk - temporary stack */
|
||||
HIDENAME(tmpstk):
|
||||
|
||||
.globl _boothowto,_bootdev
|
||||
.globl boothowto,bootdev
|
||||
|
||||
.globl _cpu,_cpu_vendor,_cpu_id,_bootinfo
|
||||
.globl _cpu_high, _cpu_feature
|
||||
.globl cpu,cpu_vendor,cpu_id,bootinfo
|
||||
.globl cpu_high, cpu_feature
|
||||
|
||||
_cpu: .long 0 /* are we 386, 386sx, or 486 */
|
||||
_cpu_id: .long 0 /* stepping ID */
|
||||
_cpu_high: .long 0 /* highest arg to CPUID */
|
||||
_cpu_feature: .long 0 /* features */
|
||||
_cpu_vendor: .space 20 /* CPU origin code */
|
||||
_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
|
||||
cpu: .long 0 /* are we 386, 386sx, or 486 */
|
||||
cpu_id: .long 0 /* stepping ID */
|
||||
cpu_high: .long 0 /* highest arg to CPUID */
|
||||
cpu_feature: .long 0 /* features */
|
||||
cpu_vendor: .space 20 /* CPU origin code */
|
||||
bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
|
||||
|
||||
_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
|
||||
KERNend: .long 0 /* phys addr end of kernel (just after bss) */
|
||||
physfree: .long 0 /* phys addr of next free page */
|
||||
|
||||
#ifdef SMP
|
||||
.globl _cpu0prvpage
|
||||
.globl cpu0prvpage
|
||||
cpu0pp: .long 0 /* phys addr cpu0 private pg */
|
||||
_cpu0prvpage: .long 0 /* relocated version */
|
||||
cpu0prvpage: .long 0 /* relocated version */
|
||||
|
||||
.globl _SMPpt
|
||||
.globl SMPpt
|
||||
SMPptpa: .long 0 /* phys addr SMP page table */
|
||||
_SMPpt: .long 0 /* relocated version */
|
||||
SMPpt: .long 0 /* relocated version */
|
||||
#endif /* SMP */
|
||||
|
||||
.globl _IdlePTD
|
||||
_IdlePTD: .long 0 /* phys addr of kernel PTD */
|
||||
.globl IdlePTD
|
||||
IdlePTD: .long 0 /* phys addr of kernel PTD */
|
||||
|
||||
#ifdef SMP
|
||||
.globl _KPTphys
|
||||
.globl KPTphys
|
||||
#endif
|
||||
_KPTphys: .long 0 /* phys addr of kernel page tables */
|
||||
KPTphys: .long 0 /* phys addr of kernel page tables */
|
||||
|
||||
.globl _proc0paddr
|
||||
_proc0paddr: .long 0 /* address of proc 0 address space */
|
||||
.globl proc0paddr
|
||||
proc0paddr: .long 0 /* address of proc 0 address space */
|
||||
p0upa: .long 0 /* phys addr of proc0's UPAGES */
|
||||
|
||||
vm86phystk: .long 0 /* PA of vm86/bios stack */
|
||||
|
||||
.globl _vm86paddr, _vm86pa
|
||||
_vm86paddr: .long 0 /* address of vm86 region */
|
||||
_vm86pa: .long 0 /* phys addr of vm86 region */
|
||||
.globl vm86paddr, vm86pa
|
||||
vm86paddr: .long 0 /* address of vm86 region */
|
||||
vm86pa: .long 0 /* phys addr of vm86 region */
|
||||
|
||||
#ifdef BDE_DEBUGGER
|
||||
.globl _bdb_exists /* flag to indicate BDE debugger is present */
|
||||
@ -153,8 +153,8 @@ _bdb_exists: .long 0
|
||||
#endif
|
||||
|
||||
#ifdef PC98
|
||||
.globl _pc98_system_parameter
|
||||
_pc98_system_parameter:
|
||||
.globl pc98_system_parameter
|
||||
pc98_system_parameter:
|
||||
.space 0x240
|
||||
#endif
|
||||
|
||||
@ -205,7 +205,7 @@ _pc98_system_parameter:
|
||||
#define fillkptphys(prot) \
|
||||
movl %eax, %ebx ; \
|
||||
shrl $PAGE_SHIFT, %ebx ; \
|
||||
fillkpt(R(_KPTphys), prot)
|
||||
fillkpt(R(KPTphys), prot)
|
||||
|
||||
.text
|
||||
/**********************************************************************
|
||||
@ -218,7 +218,7 @@ NON_GPROF_ENTRY(btext)
|
||||
#ifdef PC98
|
||||
/* save SYSTEM PARAMETER for resume (NS/T or other) */
|
||||
movl $0xa1400,%esi
|
||||
movl $R(_pc98_system_parameter),%edi
|
||||
movl $R(pc98_system_parameter),%edi
|
||||
movl $0x0240,%ecx
|
||||
cld
|
||||
rep
|
||||
@ -266,10 +266,10 @@ NON_GPROF_ENTRY(btext)
|
||||
|
||||
#ifdef PC98
|
||||
/* pc98_machine_type & M_EPSON_PC98 */
|
||||
testb $0x02,R(_pc98_system_parameter)+220
|
||||
testb $0x02,R(pc98_system_parameter)+220
|
||||
jz 3f
|
||||
/* epson_machine_id <= 0x0b */
|
||||
cmpb $0x0b,R(_pc98_system_parameter)+224
|
||||
cmpb $0x0b,R(pc98_system_parameter)+224
|
||||
ja 3f
|
||||
|
||||
/* count up memory */
|
||||
@ -284,11 +284,11 @@ NON_GPROF_ENTRY(btext)
|
||||
loop 1b
|
||||
2: subl $0x100000,%eax
|
||||
shrl $17,%eax
|
||||
movb %al,R(_pc98_system_parameter)+1
|
||||
movb %al,R(pc98_system_parameter)+1
|
||||
3:
|
||||
|
||||
movw R(_pc98_system_parameter+0x86),%ax
|
||||
movw %ax,R(_cpu_id)
|
||||
movw R(pc98_system_parameter+0x86),%ax
|
||||
movw %ax,R(cpu_id)
|
||||
#endif
|
||||
|
||||
call identify_cpu
|
||||
@ -309,8 +309,8 @@ NON_GPROF_ENTRY(btext)
|
||||
* are above 1MB to keep the gdt and idt away from the bss and page
|
||||
* tables. The idt is only used if BDE_DEBUGGER is enabled.
|
||||
*/
|
||||
movl $R(_end),%ecx
|
||||
movl $R(_edata),%edi
|
||||
movl $R(end),%ecx
|
||||
movl $R(edata),%edi
|
||||
subl %edi,%ecx
|
||||
xorl %eax,%eax
|
||||
cld
|
||||
@ -322,7 +322,7 @@ NON_GPROF_ENTRY(btext)
|
||||
/*
|
||||
* If the CPU has support for VME, turn it on.
|
||||
*/
|
||||
testl $CPUID_VME, R(_cpu_feature)
|
||||
testl $CPUID_VME, R(cpu_feature)
|
||||
jz 1f
|
||||
movl %cr4, %eax
|
||||
orl $CR4_VME, %eax
|
||||
@ -338,7 +338,7 @@ NON_GPROF_ENTRY(btext)
|
||||
#endif
|
||||
|
||||
/* Now enable paging */
|
||||
movl R(_IdlePTD), %eax
|
||||
movl R(IdlePTD), %eax
|
||||
movl %eax,%cr3 /* load ptd addr into mmu */
|
||||
movl %cr0,%eax /* get control word */
|
||||
orl $CR0_PE|CR0_PG,%eax /* enable paging */
|
||||
@ -359,16 +359,16 @@ NON_GPROF_ENTRY(btext)
|
||||
/* now running relocated at KERNBASE where the system is linked to run */
|
||||
begin:
|
||||
/* set up bootstrap stack */
|
||||
movl _proc0paddr,%eax /* location of in-kernel pages */
|
||||
movl proc0paddr,%eax /* location of in-kernel pages */
|
||||
leal UPAGES*PAGE_SIZE(%eax),%esp /* bootstrap stack end location */
|
||||
|
||||
xorl %ebp,%ebp /* mark end of frames */
|
||||
|
||||
movl _IdlePTD,%esi
|
||||
movl IdlePTD,%esi
|
||||
movl %esi,PCB_CR3(%eax)
|
||||
|
||||
pushl physfree /* value of first for init386(first) */
|
||||
call _init386 /* wire 386 chip for unix operation */
|
||||
call init386 /* wire 386 chip for unix operation */
|
||||
|
||||
/*
|
||||
* Clean up the stack in a way that db_numargs() understands, so
|
||||
@ -377,7 +377,7 @@ begin:
|
||||
*/
|
||||
addl $4,%esp
|
||||
|
||||
call _mi_startup /* autoconfiguration, mountroot etc */
|
||||
call mi_startup /* autoconfiguration, mountroot etc */
|
||||
/* NOTREACHED */
|
||||
addl $0,%esp /* for db_numargs() again */
|
||||
|
||||
@ -398,7 +398,7 @@ NON_GPROF_ENTRY(sigcode)
|
||||
0: jmp 0b
|
||||
|
||||
ALIGN_TEXT
|
||||
_osigcode:
|
||||
osigcode:
|
||||
call *SIGF_HANDLER(%esp) /* call signal handler */
|
||||
lea SIGF_SC(%esp),%eax /* get sigcontext */
|
||||
pushl %eax
|
||||
@ -413,14 +413,14 @@ _osigcode:
|
||||
0: jmp 0b
|
||||
|
||||
ALIGN_TEXT
|
||||
_esigcode:
|
||||
esigcode:
|
||||
|
||||
.data
|
||||
.globl _szsigcode, _szosigcode
|
||||
_szsigcode:
|
||||
.long _esigcode-_sigcode
|
||||
_szosigcode:
|
||||
.long _esigcode-_osigcode
|
||||
.globl szsigcode, szosigcode
|
||||
szsigcode:
|
||||
.long esigcode-sigcode
|
||||
szosigcode:
|
||||
.long esigcode-osigcode
|
||||
.text
|
||||
|
||||
/**********************************************************************
|
||||
@ -507,7 +507,7 @@ newboot:
|
||||
cmpl $0,%esi
|
||||
je 2f /* No kernelname */
|
||||
movl $MAXPATHLEN,%ecx /* Brute force!!! */
|
||||
movl $R(_kernelname),%edi
|
||||
movl $R(kernelname),%edi
|
||||
cmpb $'/',(%esi) /* Make sure it starts with a slash */
|
||||
je 1f
|
||||
movb $'/',(%edi)
|
||||
@ -535,7 +535,7 @@ got_bi_size:
|
||||
* Copy the common part of the bootinfo struct
|
||||
*/
|
||||
movl %ebx,%esi
|
||||
movl $R(_bootinfo),%edi
|
||||
movl $R(bootinfo),%edi
|
||||
cmpl $BOOTINFO_SIZE,%ecx
|
||||
jbe got_common_bi_size
|
||||
movl $BOOTINFO_SIZE,%ecx
|
||||
@ -552,12 +552,12 @@ got_common_bi_size:
|
||||
movl BI_NFS_DISKLESS(%ebx),%esi
|
||||
cmpl $0,%esi
|
||||
je olddiskboot
|
||||
movl $R(_nfs_diskless),%edi
|
||||
movl $R(nfs_diskless),%edi
|
||||
movl $NFSDISKLESS_SIZE,%ecx
|
||||
cld
|
||||
rep
|
||||
movsb
|
||||
movl $R(_nfs_diskless_valid),%edi
|
||||
movl $R(nfs_diskless_valid),%edi
|
||||
movl $1,(%edi)
|
||||
#endif
|
||||
#endif
|
||||
@ -570,9 +570,9 @@ got_common_bi_size:
|
||||
*/
|
||||
olddiskboot:
|
||||
movl 8(%ebp),%eax
|
||||
movl %eax,R(_boothowto)
|
||||
movl %eax,R(boothowto)
|
||||
movl 12(%ebp),%eax
|
||||
movl %eax,R(_bootdev)
|
||||
movl %eax,R(bootdev)
|
||||
|
||||
ret
|
||||
|
||||
@ -610,16 +610,16 @@ identify_cpu:
|
||||
divl %ecx
|
||||
jz trynexgen
|
||||
popfl
|
||||
movl $CPU_386,R(_cpu)
|
||||
movl $CPU_386,R(cpu)
|
||||
jmp 3f
|
||||
|
||||
trynexgen:
|
||||
popfl
|
||||
movl $CPU_NX586,R(_cpu)
|
||||
movl $0x4778654e,R(_cpu_vendor) # store vendor string
|
||||
movl $0x72446e65,R(_cpu_vendor+4)
|
||||
movl $0x6e657669,R(_cpu_vendor+8)
|
||||
movl $0,R(_cpu_vendor+12)
|
||||
movl $CPU_NX586,R(cpu)
|
||||
movl $0x4778654e,R(cpu_vendor) # store vendor string
|
||||
movl $0x72446e65,R(cpu_vendor+4)
|
||||
movl $0x6e657669,R(cpu_vendor+8)
|
||||
movl $0,R(cpu_vendor+12)
|
||||
jmp 3f
|
||||
|
||||
try486: /* Try to toggle identification flag; does not exist on early 486s. */
|
||||
@ -638,7 +638,7 @@ try486: /* Try to toggle identification flag; does not exist on early 486s. */
|
||||
|
||||
testl %eax,%eax
|
||||
jnz trycpuid
|
||||
movl $CPU_486,R(_cpu)
|
||||
movl $CPU_486,R(cpu)
|
||||
|
||||
/*
|
||||
* Check Cyrix CPU
|
||||
@ -665,41 +665,41 @@ trycyrix:
|
||||
* CPU, we couldn't distinguish it from Cyrix's (including IBM
|
||||
* brand of Cyrix CPUs).
|
||||
*/
|
||||
movl $0x69727943,R(_cpu_vendor) # store vendor string
|
||||
movl $0x736e4978,R(_cpu_vendor+4)
|
||||
movl $0x64616574,R(_cpu_vendor+8)
|
||||
movl $0x69727943,R(cpu_vendor) # store vendor string
|
||||
movl $0x736e4978,R(cpu_vendor+4)
|
||||
movl $0x64616574,R(cpu_vendor+8)
|
||||
jmp 3f
|
||||
|
||||
trycpuid: /* Use the `cpuid' instruction. */
|
||||
xorl %eax,%eax
|
||||
cpuid # cpuid 0
|
||||
movl %eax,R(_cpu_high) # highest capability
|
||||
movl %ebx,R(_cpu_vendor) # store vendor string
|
||||
movl %edx,R(_cpu_vendor+4)
|
||||
movl %ecx,R(_cpu_vendor+8)
|
||||
movb $0,R(_cpu_vendor+12)
|
||||
movl %eax,R(cpu_high) # highest capability
|
||||
movl %ebx,R(cpu_vendor) # store vendor string
|
||||
movl %edx,R(cpu_vendor+4)
|
||||
movl %ecx,R(cpu_vendor+8)
|
||||
movb $0,R(cpu_vendor+12)
|
||||
|
||||
movl $1,%eax
|
||||
cpuid # cpuid 1
|
||||
movl %eax,R(_cpu_id) # store cpu_id
|
||||
movl %edx,R(_cpu_feature) # store cpu_feature
|
||||
movl %eax,R(cpu_id) # store cpu_id
|
||||
movl %edx,R(cpu_feature) # store cpu_feature
|
||||
rorl $8,%eax # extract family type
|
||||
andl $15,%eax
|
||||
cmpl $5,%eax
|
||||
jae 1f
|
||||
|
||||
/* less than Pentium; must be 486 */
|
||||
movl $CPU_486,R(_cpu)
|
||||
movl $CPU_486,R(cpu)
|
||||
jmp 3f
|
||||
1:
|
||||
/* a Pentium? */
|
||||
cmpl $5,%eax
|
||||
jne 2f
|
||||
movl $CPU_586,R(_cpu)
|
||||
movl $CPU_586,R(cpu)
|
||||
jmp 3f
|
||||
2:
|
||||
/* Greater than Pentium...call it a Pentium Pro */
|
||||
movl $CPU_686,R(_cpu)
|
||||
movl $CPU_686,R(cpu)
|
||||
3:
|
||||
ret
|
||||
|
||||
@ -712,7 +712,7 @@ trycpuid: /* Use the `cpuid' instruction. */
|
||||
|
||||
create_pagetables:
|
||||
|
||||
testl $CPUID_PGE, R(_cpu_feature)
|
||||
testl $CPUID_PGE, R(cpu_feature)
|
||||
jz 1f
|
||||
movl %cr4, %eax
|
||||
orl $CR4_PGE, %eax
|
||||
@ -723,17 +723,17 @@ create_pagetables:
|
||||
movl $R(_end),%esi
|
||||
|
||||
/* Include symbols, if any. */
|
||||
movl R(_bootinfo+BI_ESYMTAB),%edi
|
||||
movl R(bootinfo+BI_ESYMTAB),%edi
|
||||
testl %edi,%edi
|
||||
je over_symalloc
|
||||
movl %edi,%esi
|
||||
movl $KERNBASE,%edi
|
||||
addl %edi,R(_bootinfo+BI_SYMTAB)
|
||||
addl %edi,R(_bootinfo+BI_ESYMTAB)
|
||||
addl %edi,R(bootinfo+BI_SYMTAB)
|
||||
addl %edi,R(bootinfo+BI_ESYMTAB)
|
||||
over_symalloc:
|
||||
|
||||
/* If we are told where the end of the kernel space is, believe it. */
|
||||
movl R(_bootinfo+BI_KERNEND),%edi
|
||||
movl R(bootinfo+BI_KERNEND),%edi
|
||||
testl %edi,%edi
|
||||
je no_kernend
|
||||
movl %edi,%esi
|
||||
@ -741,43 +741,43 @@ no_kernend:
|
||||
|
||||
addl $PAGE_MASK,%esi
|
||||
andl $~PAGE_MASK,%esi
|
||||
movl %esi,R(_KERNend) /* save end of kernel */
|
||||
movl %esi,R(KERNend) /* save end of kernel */
|
||||
movl %esi,R(physfree) /* next free page is at end of kernel */
|
||||
|
||||
/* Allocate Kernel Page Tables */
|
||||
ALLOCPAGES(NKPT)
|
||||
movl %esi,R(_KPTphys)
|
||||
movl %esi,R(KPTphys)
|
||||
|
||||
/* Allocate Page Table Directory */
|
||||
ALLOCPAGES(1)
|
||||
movl %esi,R(_IdlePTD)
|
||||
movl %esi,R(IdlePTD)
|
||||
|
||||
/* Allocate UPAGES */
|
||||
ALLOCPAGES(UPAGES)
|
||||
movl %esi,R(p0upa)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_proc0paddr)
|
||||
movl %esi, R(proc0paddr)
|
||||
|
||||
ALLOCPAGES(1) /* vm86/bios stack */
|
||||
movl %esi,R(vm86phystk)
|
||||
|
||||
ALLOCPAGES(3) /* pgtable + ext + IOPAGES */
|
||||
movl %esi,R(_vm86pa)
|
||||
movl %esi,R(vm86pa)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_vm86paddr)
|
||||
movl %esi, R(vm86paddr)
|
||||
|
||||
#ifdef SMP
|
||||
/* Allocate cpu0's private data page */
|
||||
ALLOCPAGES(1)
|
||||
movl %esi,R(cpu0pp)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_cpu0prvpage) /* relocated to KVM space */
|
||||
movl %esi, R(cpu0prvpage) /* relocated to KVM space */
|
||||
|
||||
/* Allocate SMP page table page */
|
||||
ALLOCPAGES(1)
|
||||
movl %esi,R(SMPptpa)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_SMPpt) /* relocated to KVM space */
|
||||
movl %esi, R(SMPpt) /* relocated to KVM space */
|
||||
#endif /* SMP */
|
||||
|
||||
/* Map read-only from zero to the end of the kernel text section */
|
||||
@ -790,35 +790,35 @@ no_kernend:
|
||||
xorl %edx,%edx
|
||||
|
||||
#if !defined(SMP)
|
||||
testl $CPUID_PGE, R(_cpu_feature)
|
||||
testl $CPUID_PGE, R(cpu_feature)
|
||||
jz 2f
|
||||
orl $PG_G,%edx
|
||||
#endif
|
||||
|
||||
2: movl $R(_etext),%ecx
|
||||
2: movl $R(etext),%ecx
|
||||
addl $PAGE_MASK,%ecx
|
||||
shrl $PAGE_SHIFT,%ecx
|
||||
fillkptphys(%edx)
|
||||
|
||||
/* Map read-write, data, bss and symbols */
|
||||
movl $R(_etext),%eax
|
||||
movl $R(etext),%eax
|
||||
addl $PAGE_MASK, %eax
|
||||
andl $~PAGE_MASK, %eax
|
||||
map_read_write:
|
||||
movl $PG_RW,%edx
|
||||
#if !defined(SMP)
|
||||
testl $CPUID_PGE, R(_cpu_feature)
|
||||
testl $CPUID_PGE, R(cpu_feature)
|
||||
jz 1f
|
||||
orl $PG_G,%edx
|
||||
#endif
|
||||
|
||||
1: movl R(_KERNend),%ecx
|
||||
1: movl R(KERNend),%ecx
|
||||
subl %eax,%ecx
|
||||
shrl $PAGE_SHIFT,%ecx
|
||||
fillkptphys(%edx)
|
||||
|
||||
/* Map page directory. */
|
||||
movl R(_IdlePTD), %eax
|
||||
movl R(IdlePTD), %eax
|
||||
movl $1, %ecx
|
||||
fillkptphys($PG_RW)
|
||||
|
||||
@ -841,13 +841,13 @@ map_read_write:
|
||||
movl $0, %eax
|
||||
movl $0, %ebx
|
||||
movl $1, %ecx
|
||||
fillkpt(R(_vm86pa), $PG_RW|PG_U)
|
||||
fillkpt(R(vm86pa), $PG_RW|PG_U)
|
||||
|
||||
/* ...likewise for the ISA hole */
|
||||
movl $ISA_HOLE_START, %eax
|
||||
movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
|
||||
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
|
||||
fillkpt(R(_vm86pa), $PG_RW|PG_U)
|
||||
fillkpt(R(vm86pa), $PG_RW|PG_U)
|
||||
|
||||
#ifdef SMP
|
||||
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
|
||||
@ -870,7 +870,7 @@ map_read_write:
|
||||
movl R(SMPptpa), %eax
|
||||
movl $MPPTDI, %ebx
|
||||
movl $1, %ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
/* Fakeup VA for the local apic to allow early traps. */
|
||||
ALLOCPAGES(1)
|
||||
@ -881,22 +881,22 @@ map_read_write:
|
||||
#endif /* SMP */
|
||||
|
||||
/* install a pde for temporary double map of bottom of VA */
|
||||
movl R(_KPTphys), %eax
|
||||
movl R(KPTphys), %eax
|
||||
xorl %ebx, %ebx
|
||||
movl $1, %ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
/* install pde's for pt's */
|
||||
movl R(_KPTphys), %eax
|
||||
movl R(KPTphys), %eax
|
||||
movl $KPTDI, %ebx
|
||||
movl $NKPT, %ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
/* install a pde recursively mapping page directory as a page table */
|
||||
movl R(_IdlePTD), %eax
|
||||
movl R(IdlePTD), %eax
|
||||
movl $PTDPTDI, %ebx
|
||||
movl $1,%ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
ret
|
||||
|
||||
@ -957,7 +957,7 @@ bdb_commit_paging:
|
||||
cmpl $0,_bdb_exists
|
||||
je bdb_commit_paging_exit
|
||||
|
||||
movl $_gdt+8*9,%eax /* adjust slots 9-17 */
|
||||
movl $gdt+8*9,%eax /* adjust slots 9-17 */
|
||||
movl $9,%ecx
|
||||
reloc_gdt:
|
||||
movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */
|
||||
|
@ -74,12 +74,12 @@
|
||||
NON_GPROF_ENTRY(MPentry)
|
||||
CHECKPOINT(0x36, 3)
|
||||
/* Now enable paging mode */
|
||||
movl _IdlePTD-KERNBASE, %eax
|
||||
movl IdlePTD-KERNBASE, %eax
|
||||
movl %eax,%cr3
|
||||
movl %cr0,%eax
|
||||
orl $CR0_PE|CR0_PG,%eax /* enable paging */
|
||||
movl %eax,%cr0 /* let the games begin! */
|
||||
movl _bootSTK,%esp /* boot stack end loc. */
|
||||
movl bootSTK,%esp /* boot stack end loc. */
|
||||
|
||||
pushl $mp_begin /* jump to high mem */
|
||||
ret
|
||||
@ -89,13 +89,13 @@ NON_GPROF_ENTRY(MPentry)
|
||||
*/
|
||||
mp_begin: /* now running relocated at KERNBASE */
|
||||
CHECKPOINT(0x37, 4)
|
||||
call _init_secondary /* load i386 tables */
|
||||
call init_secondary /* load i386 tables */
|
||||
CHECKPOINT(0x38, 5)
|
||||
|
||||
/*
|
||||
* If the [BSP] CPU has support for VME, turn it on.
|
||||
*/
|
||||
testl $CPUID_VME, _cpu_feature /* XXX WRONG! BSP! */
|
||||
testl $CPUID_VME, cpu_feature /* XXX WRONG! BSP! */
|
||||
jz 1f
|
||||
movl %cr4, %eax
|
||||
orl $CR4_VME, %eax
|
||||
@ -103,19 +103,19 @@ mp_begin: /* now running relocated at KERNBASE */
|
||||
1:
|
||||
|
||||
/* disable the APIC, just to be SURE */
|
||||
movl _lapic+LA_SVR, %eax /* get spurious vector reg. */
|
||||
movl lapic+LA_SVR, %eax /* get spurious vector reg. */
|
||||
andl $~APIC_SVR_SWEN, %eax /* clear software enable bit */
|
||||
movl %eax, _lapic+LA_SVR
|
||||
movl %eax, lapic+LA_SVR
|
||||
|
||||
/* signal our startup to the BSP */
|
||||
movl _lapic+LA_VER, %eax /* our version reg contents */
|
||||
movl %eax, _cpu_apic_versions /* into [ 0 ] */
|
||||
incl _mp_ncpus /* signal BSP */
|
||||
movl lapic+LA_VER, %eax /* our version reg contents */
|
||||
movl %eax, cpu_apic_versions /* into [ 0 ] */
|
||||
incl mp_ncpus /* signal BSP */
|
||||
|
||||
CHECKPOINT(0x39, 6)
|
||||
|
||||
/* Now, let's prepare for some REAL WORK :-) This doesn't return. */
|
||||
call _ap_init
|
||||
call ap_init
|
||||
|
||||
/*
|
||||
* This is the embedded trampoline or bootstrap that is
|
||||
@ -150,10 +150,10 @@ NON_GPROF_ENTRY(bootMP)
|
||||
mov %ax, %fs
|
||||
mov %ax, %gs
|
||||
mov %ax, %ss
|
||||
mov $(boot_stk-_bootMP), %esp
|
||||
mov $(boot_stk-bootMP), %esp
|
||||
|
||||
/* Now load the global descriptor table */
|
||||
lgdt MP_GDTptr-_bootMP
|
||||
lgdt MP_GDTptr-bootMP
|
||||
|
||||
/* Enable protected mode */
|
||||
movl %cr0, %eax
|
||||
@ -165,7 +165,7 @@ NON_GPROF_ENTRY(bootMP)
|
||||
* reload CS register
|
||||
*/
|
||||
pushl $0x18
|
||||
pushl $(protmode-_bootMP)
|
||||
pushl $(protmode-bootMP)
|
||||
lretl
|
||||
|
||||
.code32
|
||||
@ -188,8 +188,8 @@ protmode:
|
||||
movw %bx, %gs
|
||||
movw %bx, %ss
|
||||
|
||||
.globl _bigJump
|
||||
_bigJump:
|
||||
.globl bigJump
|
||||
bigJump:
|
||||
/* this will be modified by mpInstallTramp() */
|
||||
ljmp $0x08, $0 /* far jmp to MPentry() */
|
||||
|
||||
@ -200,10 +200,10 @@ dead: hlt /* We should never get here */
|
||||
* MP boot strap Global Descriptor Table
|
||||
*/
|
||||
.p2align 4
|
||||
.globl _MP_GDT
|
||||
.globl _bootCodeSeg
|
||||
.globl _bootDataSeg
|
||||
_MP_GDT:
|
||||
.globl MP_GDT
|
||||
.globl bootCodeSeg
|
||||
.globl bootDataSeg
|
||||
MP_GDT:
|
||||
|
||||
nulldesc: /* offset = 0x0 */
|
||||
|
||||
@ -235,7 +235,7 @@ kerneldata: /* offset = 0x10 */
|
||||
bootcode: /* offset = 0x18 */
|
||||
|
||||
.word 0xffff /* segment limit 0..15 */
|
||||
_bootCodeSeg: /* this will be modified by mpInstallTramp() */
|
||||
bootCodeSeg: /* this will be modified by mpInstallTramp() */
|
||||
.word 0x0000 /* segment base 0..15 */
|
||||
.byte 0x00 /* segment base 16...23; set for 0x000xx000 */
|
||||
.byte 0x9e /* flags; Type */
|
||||
@ -245,7 +245,7 @@ _bootCodeSeg: /* this will be modified by mpInstallTramp() */
|
||||
bootdata: /* offset = 0x20 */
|
||||
|
||||
.word 0xffff
|
||||
_bootDataSeg: /* this will be modified by mpInstallTramp() */
|
||||
bootDataSeg: /* this will be modified by mpInstallTramp() */
|
||||
.word 0x0000 /* segment base 0..15 */
|
||||
.byte 0x00 /* segment base 16...23; set for 0x000xx000 */
|
||||
.byte 0x92
|
||||
@ -255,18 +255,18 @@ _bootDataSeg: /* this will be modified by mpInstallTramp() */
|
||||
/*
|
||||
* GDT pointer for the lgdt call
|
||||
*/
|
||||
.globl _mp_gdtbase
|
||||
.globl mp_gdtbase
|
||||
|
||||
MP_GDTptr:
|
||||
_mp_gdtlimit:
|
||||
mp_gdtlimit:
|
||||
.word 0x0028
|
||||
_mp_gdtbase: /* this will be modified by mpInstallTramp() */
|
||||
mp_gdtbase: /* this will be modified by mpInstallTramp() */
|
||||
.long 0
|
||||
|
||||
.space 0x100 /* space for boot_stk - 1st temporary stack */
|
||||
boot_stk:
|
||||
|
||||
BOOTMP2:
|
||||
.globl _bootMP_size
|
||||
_bootMP_size:
|
||||
.globl bootMP_size
|
||||
bootMP_size:
|
||||
.long BOOTMP2 - BOOTMP1
|
||||
|
@ -45,21 +45,21 @@
|
||||
#define IDXSHIFT 10
|
||||
|
||||
.data
|
||||
.globl _bcopy_vector
|
||||
_bcopy_vector:
|
||||
.long _generic_bcopy
|
||||
.globl _bzero
|
||||
_bzero:
|
||||
.long _generic_bzero
|
||||
.globl _copyin_vector
|
||||
_copyin_vector:
|
||||
.long _generic_copyin
|
||||
.globl _copyout_vector
|
||||
_copyout_vector:
|
||||
.long _generic_copyout
|
||||
.globl _ovbcopy_vector
|
||||
_ovbcopy_vector:
|
||||
.long _generic_bcopy
|
||||
.globl bcopy_vector
|
||||
bcopy_vector:
|
||||
.long generic_bcopy
|
||||
.globl bzero
|
||||
bzero:
|
||||
.long generic_bzero
|
||||
.globl copyin_vector
|
||||
copyin_vector:
|
||||
.long generic_copyin
|
||||
.globl copyout_vector
|
||||
copyout_vector:
|
||||
.long generic_copyout
|
||||
.globl ovbcopy_vector
|
||||
ovbcopy_vector:
|
||||
.long generic_bcopy
|
||||
#if defined(I586_CPU) && defined(DEV_NPX)
|
||||
kernel_fpu_lock:
|
||||
.byte 0xfe
|
||||
@ -428,11 +428,11 @@ ENTRY(bcopyb)
|
||||
|
||||
ENTRY(bcopy)
|
||||
MEXITCOUNT
|
||||
jmp *_bcopy_vector
|
||||
jmp *bcopy_vector
|
||||
|
||||
ENTRY(ovbcopy)
|
||||
MEXITCOUNT
|
||||
jmp *_ovbcopy_vector
|
||||
jmp *ovbcopy_vector
|
||||
|
||||
/*
|
||||
* generic_bcopy(src, dst, cnt)
|
||||
@ -667,7 +667,7 @@ ENTRY(memcpy)
|
||||
*/
|
||||
ENTRY(copyout)
|
||||
MEXITCOUNT
|
||||
jmp *_copyout_vector
|
||||
jmp *copyout_vector
|
||||
|
||||
ENTRY(generic_copyout)
|
||||
movl PCPU(CURPCB),%eax
|
||||
@ -725,12 +725,12 @@ ENTRY(generic_copyout)
|
||||
|
||||
1:
|
||||
/* check PTE for each page */
|
||||
leal _PTmap(%edx),%eax
|
||||
leal PTmap(%edx),%eax
|
||||
shrl $IDXSHIFT,%eax
|
||||
andb $0xfc,%al
|
||||
testb $PG_V,_PTmap(%eax) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%eax) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%al
|
||||
movb PTmap(%edx),%al
|
||||
andb $PG_V|PG_RW|PG_U,%al /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%al
|
||||
je 2f
|
||||
@ -741,7 +741,7 @@ ENTRY(generic_copyout)
|
||||
pushl %ecx
|
||||
shll $IDXSHIFT,%edx
|
||||
pushl %edx
|
||||
call _trapwrite /* trapwrite(addr) */
|
||||
call trapwrite /* trapwrite(addr) */
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %edx
|
||||
@ -839,7 +839,7 @@ ENTRY(i586_copyout)
|
||||
jb slow_copyout
|
||||
|
||||
pushl %ecx
|
||||
call _fastmove
|
||||
call fastmove
|
||||
addl $4,%esp
|
||||
jmp done_copyout
|
||||
#endif /* I586_CPU && defined(DEV_NPX) */
|
||||
@ -849,7 +849,7 @@ ENTRY(i586_copyout)
|
||||
*/
|
||||
ENTRY(copyin)
|
||||
MEXITCOUNT
|
||||
jmp *_copyin_vector
|
||||
jmp *copyin_vector
|
||||
|
||||
ENTRY(generic_copyin)
|
||||
movl PCPU(CURPCB),%eax
|
||||
@ -933,7 +933,7 @@ ENTRY(i586_copyin)
|
||||
|
||||
pushl %ebx /* XXX prepare for fastmove_fault */
|
||||
pushl %ecx
|
||||
call _fastmove
|
||||
call fastmove
|
||||
addl $8,%esp
|
||||
jmp done_copyin
|
||||
#endif /* I586_CPU && defined(DEV_NPX) */
|
||||
@ -1209,12 +1209,12 @@ ENTRY(suword)
|
||||
shrl $IDXSHIFT,%edx
|
||||
andb $0xfc,%dl
|
||||
|
||||
leal _PTmap(%edx),%ecx
|
||||
leal PTmap(%edx),%ecx
|
||||
shrl $IDXSHIFT,%ecx
|
||||
andb $0xfc,%cl
|
||||
testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%ecx) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%dl
|
||||
movb PTmap(%edx),%dl
|
||||
andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%dl
|
||||
je 1f
|
||||
@ -1222,7 +1222,7 @@ ENTRY(suword)
|
||||
4:
|
||||
/* simulate a trap */
|
||||
pushl %eax
|
||||
call _trapwrite
|
||||
call trapwrite
|
||||
popl %edx /* remove junk parameter from stack */
|
||||
testl %eax,%eax
|
||||
jnz fusufault
|
||||
@ -1258,9 +1258,9 @@ ENTRY(susword)
|
||||
leal _PTmap(%edx),%ecx
|
||||
shrl $IDXSHIFT,%ecx
|
||||
andb $0xfc,%cl
|
||||
testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%ecx) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%dl
|
||||
movb PTmap(%edx),%dl
|
||||
andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%dl
|
||||
je 1f
|
||||
@ -1268,7 +1268,7 @@ ENTRY(susword)
|
||||
4:
|
||||
/* simulate a trap */
|
||||
pushl %eax
|
||||
call _trapwrite
|
||||
call trapwrite
|
||||
popl %edx /* remove junk parameter from stack */
|
||||
testl %eax,%eax
|
||||
jnz fusufault
|
||||
@ -1301,12 +1301,12 @@ ENTRY(subyte)
|
||||
shrl $IDXSHIFT,%edx
|
||||
andb $0xfc,%dl
|
||||
|
||||
leal _PTmap(%edx),%ecx
|
||||
leal PTmap(%edx),%ecx
|
||||
shrl $IDXSHIFT,%ecx
|
||||
andb $0xfc,%cl
|
||||
testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%ecx) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%dl
|
||||
movb PTmap(%edx),%dl
|
||||
andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%dl
|
||||
je 1f
|
||||
@ -1314,7 +1314,7 @@ ENTRY(subyte)
|
||||
4:
|
||||
/* simulate a trap */
|
||||
pushl %eax
|
||||
call _trapwrite
|
||||
call trapwrite
|
||||
popl %edx /* remove junk parameter from stack */
|
||||
testl %eax,%eax
|
||||
jnz fusufault
|
||||
@ -1564,7 +1564,7 @@ ENTRY(rcr3)
|
||||
/* void load_cr3(caddr_t cr3) */
|
||||
ENTRY(load_cr3)
|
||||
#ifdef SWTCH_OPTIM_STATS
|
||||
incl _tlb_flush_count
|
||||
incl tlb_flush_count
|
||||
#endif
|
||||
movl 4(%esp),%eax
|
||||
movl %eax,%cr3
|
||||
|
@ -45,21 +45,21 @@
|
||||
#define IDXSHIFT 10
|
||||
|
||||
.data
|
||||
.globl _bcopy_vector
|
||||
_bcopy_vector:
|
||||
.long _generic_bcopy
|
||||
.globl _bzero
|
||||
_bzero:
|
||||
.long _generic_bzero
|
||||
.globl _copyin_vector
|
||||
_copyin_vector:
|
||||
.long _generic_copyin
|
||||
.globl _copyout_vector
|
||||
_copyout_vector:
|
||||
.long _generic_copyout
|
||||
.globl _ovbcopy_vector
|
||||
_ovbcopy_vector:
|
||||
.long _generic_bcopy
|
||||
.globl bcopy_vector
|
||||
bcopy_vector:
|
||||
.long generic_bcopy
|
||||
.globl bzero
|
||||
bzero:
|
||||
.long generic_bzero
|
||||
.globl copyin_vector
|
||||
copyin_vector:
|
||||
.long generic_copyin
|
||||
.globl copyout_vector
|
||||
copyout_vector:
|
||||
.long generic_copyout
|
||||
.globl ovbcopy_vector
|
||||
ovbcopy_vector:
|
||||
.long generic_bcopy
|
||||
#if defined(I586_CPU) && defined(DEV_NPX)
|
||||
kernel_fpu_lock:
|
||||
.byte 0xfe
|
||||
@ -428,11 +428,11 @@ ENTRY(bcopyb)
|
||||
|
||||
ENTRY(bcopy)
|
||||
MEXITCOUNT
|
||||
jmp *_bcopy_vector
|
||||
jmp *bcopy_vector
|
||||
|
||||
ENTRY(ovbcopy)
|
||||
MEXITCOUNT
|
||||
jmp *_ovbcopy_vector
|
||||
jmp *ovbcopy_vector
|
||||
|
||||
/*
|
||||
* generic_bcopy(src, dst, cnt)
|
||||
@ -667,7 +667,7 @@ ENTRY(memcpy)
|
||||
*/
|
||||
ENTRY(copyout)
|
||||
MEXITCOUNT
|
||||
jmp *_copyout_vector
|
||||
jmp *copyout_vector
|
||||
|
||||
ENTRY(generic_copyout)
|
||||
movl PCPU(CURPCB),%eax
|
||||
@ -725,12 +725,12 @@ ENTRY(generic_copyout)
|
||||
|
||||
1:
|
||||
/* check PTE for each page */
|
||||
leal _PTmap(%edx),%eax
|
||||
leal PTmap(%edx),%eax
|
||||
shrl $IDXSHIFT,%eax
|
||||
andb $0xfc,%al
|
||||
testb $PG_V,_PTmap(%eax) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%eax) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%al
|
||||
movb PTmap(%edx),%al
|
||||
andb $PG_V|PG_RW|PG_U,%al /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%al
|
||||
je 2f
|
||||
@ -741,7 +741,7 @@ ENTRY(generic_copyout)
|
||||
pushl %ecx
|
||||
shll $IDXSHIFT,%edx
|
||||
pushl %edx
|
||||
call _trapwrite /* trapwrite(addr) */
|
||||
call trapwrite /* trapwrite(addr) */
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %edx
|
||||
@ -839,7 +839,7 @@ ENTRY(i586_copyout)
|
||||
jb slow_copyout
|
||||
|
||||
pushl %ecx
|
||||
call _fastmove
|
||||
call fastmove
|
||||
addl $4,%esp
|
||||
jmp done_copyout
|
||||
#endif /* I586_CPU && defined(DEV_NPX) */
|
||||
@ -849,7 +849,7 @@ ENTRY(i586_copyout)
|
||||
*/
|
||||
ENTRY(copyin)
|
||||
MEXITCOUNT
|
||||
jmp *_copyin_vector
|
||||
jmp *copyin_vector
|
||||
|
||||
ENTRY(generic_copyin)
|
||||
movl PCPU(CURPCB),%eax
|
||||
@ -933,7 +933,7 @@ ENTRY(i586_copyin)
|
||||
|
||||
pushl %ebx /* XXX prepare for fastmove_fault */
|
||||
pushl %ecx
|
||||
call _fastmove
|
||||
call fastmove
|
||||
addl $8,%esp
|
||||
jmp done_copyin
|
||||
#endif /* I586_CPU && defined(DEV_NPX) */
|
||||
@ -1209,12 +1209,12 @@ ENTRY(suword)
|
||||
shrl $IDXSHIFT,%edx
|
||||
andb $0xfc,%dl
|
||||
|
||||
leal _PTmap(%edx),%ecx
|
||||
leal PTmap(%edx),%ecx
|
||||
shrl $IDXSHIFT,%ecx
|
||||
andb $0xfc,%cl
|
||||
testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%ecx) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%dl
|
||||
movb PTmap(%edx),%dl
|
||||
andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%dl
|
||||
je 1f
|
||||
@ -1222,7 +1222,7 @@ ENTRY(suword)
|
||||
4:
|
||||
/* simulate a trap */
|
||||
pushl %eax
|
||||
call _trapwrite
|
||||
call trapwrite
|
||||
popl %edx /* remove junk parameter from stack */
|
||||
testl %eax,%eax
|
||||
jnz fusufault
|
||||
@ -1258,9 +1258,9 @@ ENTRY(susword)
|
||||
leal _PTmap(%edx),%ecx
|
||||
shrl $IDXSHIFT,%ecx
|
||||
andb $0xfc,%cl
|
||||
testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%ecx) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%dl
|
||||
movb PTmap(%edx),%dl
|
||||
andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%dl
|
||||
je 1f
|
||||
@ -1268,7 +1268,7 @@ ENTRY(susword)
|
||||
4:
|
||||
/* simulate a trap */
|
||||
pushl %eax
|
||||
call _trapwrite
|
||||
call trapwrite
|
||||
popl %edx /* remove junk parameter from stack */
|
||||
testl %eax,%eax
|
||||
jnz fusufault
|
||||
@ -1301,12 +1301,12 @@ ENTRY(subyte)
|
||||
shrl $IDXSHIFT,%edx
|
||||
andb $0xfc,%dl
|
||||
|
||||
leal _PTmap(%edx),%ecx
|
||||
leal PTmap(%edx),%ecx
|
||||
shrl $IDXSHIFT,%ecx
|
||||
andb $0xfc,%cl
|
||||
testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%ecx) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%dl
|
||||
movb PTmap(%edx),%dl
|
||||
andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%dl
|
||||
je 1f
|
||||
@ -1314,7 +1314,7 @@ ENTRY(subyte)
|
||||
4:
|
||||
/* simulate a trap */
|
||||
pushl %eax
|
||||
call _trapwrite
|
||||
call trapwrite
|
||||
popl %edx /* remove junk parameter from stack */
|
||||
testl %eax,%eax
|
||||
jnz fusufault
|
||||
@ -1564,7 +1564,7 @@ ENTRY(rcr3)
|
||||
/* void load_cr3(caddr_t cr3) */
|
||||
ENTRY(load_cr3)
|
||||
#ifdef SWTCH_OPTIM_STATS
|
||||
incl _tlb_flush_count
|
||||
incl tlb_flush_count
|
||||
#endif
|
||||
movl 4(%esp),%eax
|
||||
movl %eax,%cr3
|
||||
|
@ -56,12 +56,12 @@
|
||||
|
||||
.data
|
||||
|
||||
.globl _panic
|
||||
.globl panic
|
||||
|
||||
#if defined(SWTCH_OPTIM_STATS)
|
||||
.globl _swtch_optim_stats, _tlb_flush_count
|
||||
_swtch_optim_stats: .long 0 /* number of _swtch_optims */
|
||||
_tlb_flush_count: .long 0
|
||||
.globl swtch_optim_stats, tlb_flush_count
|
||||
swtch_optim_stats: .long 0 /* number of _swtch_optims */
|
||||
tlb_flush_count: .long 0
|
||||
#endif
|
||||
|
||||
.text
|
||||
@ -129,7 +129,7 @@ ENTRY(cpu_switch)
|
||||
jne 1f
|
||||
addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
|
||||
pushl %edx
|
||||
call _npxsave /* do it in a big C function */
|
||||
call npxsave /* do it in a big C function */
|
||||
popl %eax
|
||||
1:
|
||||
#endif /* DEV_NPX */
|
||||
@ -139,7 +139,7 @@ sw1:
|
||||
|
||||
#ifdef SMP
|
||||
/* Stop scheduling if smp_active goes zero and we are not BSP */
|
||||
cmpl $0,_smp_active
|
||||
cmpl $0,smp_active
|
||||
jne 1f
|
||||
cmpl $0,PCPU(CPUID)
|
||||
je 1f
|
||||
@ -154,7 +154,7 @@ sw1:
|
||||
* if it cannot find another process to run.
|
||||
*/
|
||||
sw1a:
|
||||
call _chooseproc /* trash ecx, edx, ret eax*/
|
||||
call chooseproc /* trash ecx, edx, ret eax*/
|
||||
|
||||
#ifdef INVARIANTS
|
||||
testl %eax,%eax /* no process? */
|
||||
@ -171,15 +171,15 @@ sw1b:
|
||||
movl P_ADDR(%ecx),%edx
|
||||
|
||||
#if defined(SWTCH_OPTIM_STATS)
|
||||
incl _swtch_optim_stats
|
||||
incl swtch_optim_stats
|
||||
#endif
|
||||
/* switch address space */
|
||||
movl %cr3,%ebx
|
||||
cmpl PCB_CR3(%edx),%ebx
|
||||
je 4f
|
||||
#if defined(SWTCH_OPTIM_STATS)
|
||||
decl _swtch_optim_stats
|
||||
incl _tlb_flush_count
|
||||
decl swtch_optim_stats
|
||||
incl tlb_flush_count
|
||||
#endif
|
||||
movl PCB_CR3(%edx),%ebx
|
||||
movl %ebx,%cr3
|
||||
@ -188,7 +188,7 @@ sw1b:
|
||||
movl PCPU(CPUID), %esi
|
||||
cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
|
||||
je 1f
|
||||
btsl %esi, _private_tss /* mark use of private tss */
|
||||
btsl %esi, private_tss /* mark use of private tss */
|
||||
movl PCB_EXT(%edx), %edi /* new tss descriptor */
|
||||
jmp 2f
|
||||
1:
|
||||
@ -198,7 +198,7 @@ sw1b:
|
||||
addl $(UPAGES * PAGE_SIZE - 16), %ebx
|
||||
movl %ebx, PCPU(COMMON_TSS) + TSS_ESP0
|
||||
|
||||
btrl %esi, _private_tss
|
||||
btrl %esi, private_tss
|
||||
jae 3f
|
||||
PCPU_ADDR(COMMON_TSSD, %edi)
|
||||
2:
|
||||
@ -227,9 +227,9 @@ sw1b:
|
||||
#ifdef SMP
|
||||
#ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
|
||||
#ifdef CHEAP_TPR
|
||||
movl $0, _lapic+LA_TPR
|
||||
movl $0, lapic+LA_TPR
|
||||
#else
|
||||
andl $~APIC_TPR_PRIO, _lapic+LA_TPR
|
||||
andl $~APIC_TPR_PRIO, lapic+LA_TPR
|
||||
#endif /** CHEAP_TPR */
|
||||
#endif /** GRAB_LOPRIO */
|
||||
#endif /* SMP */
|
||||
@ -242,14 +242,14 @@ sw1b:
|
||||
|
||||
cmpl $0, PCB_USERLDT(%edx)
|
||||
jnz 1f
|
||||
movl __default_ldt,%eax
|
||||
movl _default_ldt,%eax
|
||||
cmpl PCPU(CURRENTLDT),%eax
|
||||
je 2f
|
||||
lldt __default_ldt
|
||||
lldt _default_ldt
|
||||
movl %eax,PCPU(CURRENTLDT)
|
||||
jmp 2f
|
||||
1: pushl %edx
|
||||
call _set_user_ldt
|
||||
call set_user_ldt
|
||||
popl %edx
|
||||
2:
|
||||
|
||||
@ -282,13 +282,13 @@ CROSSJUMPTARGET(sw1a)
|
||||
#ifdef INVARIANTS
|
||||
badsw2:
|
||||
pushl $sw0_2
|
||||
call _panic
|
||||
call panic
|
||||
|
||||
sw0_2: .asciz "cpu_switch: not SRUN"
|
||||
|
||||
badsw3:
|
||||
pushl $sw0_3
|
||||
call _panic
|
||||
call panic
|
||||
|
||||
sw0_3: .asciz "cpu_switch: chooseproc returned NULL"
|
||||
#endif
|
||||
@ -337,7 +337,7 @@ ENTRY(savectx)
|
||||
leal PCB_SAVEFPU(%eax),%eax
|
||||
pushl %eax
|
||||
pushl %eax
|
||||
call _npxsave
|
||||
call npxsave
|
||||
addl $4,%esp
|
||||
popl %eax
|
||||
popl %ecx
|
||||
@ -346,7 +346,7 @@ ENTRY(savectx)
|
||||
leal PCB_SAVEFPU(%ecx),%ecx
|
||||
pushl %ecx
|
||||
pushl %eax
|
||||
call _bcopy
|
||||
call bcopy
|
||||
addl $12,%esp
|
||||
#endif /* DEV_NPX */
|
||||
|
||||
|
@ -470,8 +470,8 @@ typedef struct IOAPIC ioapic_t;
|
||||
/*
|
||||
* Protects the IO APIC and apic_imen as a critical region.
|
||||
*/
|
||||
#define IMASK_LOCK MTX_LOCK_SPIN(_imen_mtx, 0)
|
||||
#define IMASK_UNLOCK MTX_UNLOCK_SPIN(_imen_mtx)
|
||||
#define IMASK_LOCK MTX_LOCK_SPIN(imen_mtx, 0)
|
||||
#define IMASK_UNLOCK MTX_UNLOCK_SPIN(imen_mtx)
|
||||
|
||||
#else /* SMP */
|
||||
|
||||
|
@ -37,7 +37,6 @@
|
||||
#define _MACHINE_ASMACROS_H_
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
#include <machine/asnames.h>
|
||||
|
||||
/* XXX too much duplication in various asm*.h's. */
|
||||
|
||||
|
@ -63,16 +63,16 @@ IDTVEC(vec_name) ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
incl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl _intr_unit + (irq_num) * 4 ; \
|
||||
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
|
||||
addl $4,%esp ; \
|
||||
incl _cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl _intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Slow, threaded interrupts.
|
||||
@ -99,9 +99,9 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
maybe_extra_ipending ; \
|
||||
movb _imen + IRQ_BYTE(irq_num),%al ; \
|
||||
movb imen + IRQ_BYTE(irq_num),%al ; \
|
||||
orb $IRQ_BIT(irq_num),%al ; \
|
||||
movb %al,_imen + IRQ_BYTE(irq_num) ; \
|
||||
movb %al,imen + IRQ_BYTE(irq_num) ; \
|
||||
outb %al,$icu+ICU_IMR_OFFSET ; \
|
||||
enable_icus ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
@ -110,13 +110,13 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
|
||||
pushl $irq_num; /* pass the IRQ */ \
|
||||
sti ; \
|
||||
call _sched_ithd ; \
|
||||
call sched_ithd ; \
|
||||
addl $4, %esp ; /* discard the parameter */ \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
/* We could usually avoid the following jmp by inlining some of */ \
|
||||
/* _doreti, but it's probably better to use less cache. */ \
|
||||
jmp _doreti /* and catch up inside doreti */
|
||||
/* doreti, but it's probably better to use less cache. */ \
|
||||
jmp doreti /* and catch up inside doreti */
|
||||
|
||||
MCOUNT_LABEL(bintr)
|
||||
FAST_INTR(0,fastintr0, ENABLE_ICU1)
|
||||
|
@ -41,8 +41,8 @@
|
||||
ALIGN_DATA
|
||||
|
||||
/* interrupt mask enable (all h/w off) */
|
||||
.globl _imen
|
||||
_imen: .long HWI_MASK
|
||||
.globl imen
|
||||
imen: .long HWI_MASK
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
|
@ -41,8 +41,8 @@
|
||||
ALIGN_DATA
|
||||
|
||||
/* interrupt mask enable (all h/w off) */
|
||||
.globl _imen
|
||||
_imen: .long HWI_MASK
|
||||
.globl imen
|
||||
imen: .long HWI_MASK
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
|
@ -63,16 +63,16 @@ IDTVEC(vec_name) ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
incl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl _intr_unit + (irq_num) * 4 ; \
|
||||
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
|
||||
addl $4,%esp ; \
|
||||
incl _cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl _intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Slow, threaded interrupts.
|
||||
@ -99,9 +99,9 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
maybe_extra_ipending ; \
|
||||
movb _imen + IRQ_BYTE(irq_num),%al ; \
|
||||
movb imen + IRQ_BYTE(irq_num),%al ; \
|
||||
orb $IRQ_BIT(irq_num),%al ; \
|
||||
movb %al,_imen + IRQ_BYTE(irq_num) ; \
|
||||
movb %al,imen + IRQ_BYTE(irq_num) ; \
|
||||
outb %al,$icu+ICU_IMR_OFFSET ; \
|
||||
enable_icus ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
@ -110,13 +110,13 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
|
||||
pushl $irq_num; /* pass the IRQ */ \
|
||||
sti ; \
|
||||
call _sched_ithd ; \
|
||||
call sched_ithd ; \
|
||||
addl $4, %esp ; /* discard the parameter */ \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
/* We could usually avoid the following jmp by inlining some of */ \
|
||||
/* _doreti, but it's probably better to use less cache. */ \
|
||||
jmp _doreti /* and catch up inside doreti */
|
||||
/* doreti, but it's probably better to use less cache. */ \
|
||||
jmp doreti /* and catch up inside doreti */
|
||||
|
||||
MCOUNT_LABEL(bintr)
|
||||
FAST_INTR(0,fastintr0, ENABLE_ICU1)
|
||||
|
@ -63,16 +63,16 @@ IDTVEC(vec_name) ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
incl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl _intr_unit + (irq_num) * 4 ; \
|
||||
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
|
||||
addl $4,%esp ; \
|
||||
incl _cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl _intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Slow, threaded interrupts.
|
||||
@ -99,9 +99,9 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
maybe_extra_ipending ; \
|
||||
movb _imen + IRQ_BYTE(irq_num),%al ; \
|
||||
movb imen + IRQ_BYTE(irq_num),%al ; \
|
||||
orb $IRQ_BIT(irq_num),%al ; \
|
||||
movb %al,_imen + IRQ_BYTE(irq_num) ; \
|
||||
movb %al,imen + IRQ_BYTE(irq_num) ; \
|
||||
outb %al,$icu+ICU_IMR_OFFSET ; \
|
||||
enable_icus ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
@ -110,13 +110,13 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
|
||||
pushl $irq_num; /* pass the IRQ */ \
|
||||
sti ; \
|
||||
call _sched_ithd ; \
|
||||
call sched_ithd ; \
|
||||
addl $4, %esp ; /* discard the parameter */ \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
/* We could usually avoid the following jmp by inlining some of */ \
|
||||
/* _doreti, but it's probably better to use less cache. */ \
|
||||
jmp _doreti /* and catch up inside doreti */
|
||||
/* doreti, but it's probably better to use less cache. */ \
|
||||
jmp doreti /* and catch up inside doreti */
|
||||
|
||||
MCOUNT_LABEL(bintr)
|
||||
FAST_INTR(0,fastintr0, ENABLE_ICU1)
|
||||
|
@ -45,15 +45,15 @@
|
||||
*/
|
||||
#define NR_INTRNAMES (1 + ICU_LEN + 2 * ICU_LEN)
|
||||
|
||||
.globl _intrcnt, _eintrcnt
|
||||
_intrcnt:
|
||||
.globl intrcnt, eintrcnt
|
||||
intrcnt:
|
||||
.space NR_INTRNAMES * 4
|
||||
_eintrcnt:
|
||||
eintrcnt:
|
||||
|
||||
.globl _intrnames, _eintrnames
|
||||
_intrnames:
|
||||
.globl intrnames, eintrnames
|
||||
intrnames:
|
||||
.space NR_INTRNAMES * 16
|
||||
_eintrnames:
|
||||
eintrnames:
|
||||
.text
|
||||
|
||||
/*
|
||||
|
@ -45,15 +45,15 @@
|
||||
*/
|
||||
#define NR_INTRNAMES (1 + ICU_LEN + 2 * ICU_LEN)
|
||||
|
||||
.globl _intrcnt, _eintrcnt
|
||||
_intrcnt:
|
||||
.globl intrcnt, eintrcnt
|
||||
intrcnt:
|
||||
.space NR_INTRNAMES * 4
|
||||
_eintrcnt:
|
||||
eintrcnt:
|
||||
|
||||
.globl _intrnames, _eintrnames
|
||||
_intrnames:
|
||||
.globl intrnames, eintrnames
|
||||
intrnames:
|
||||
.space NR_INTRNAMES * 16
|
||||
_eintrnames:
|
||||
eintrnames:
|
||||
.text
|
||||
|
||||
/*
|
||||
|
@ -64,7 +64,7 @@
|
||||
#include <machine/asmacros.h>
|
||||
#include <gnu/i386/fpemul/fpu_emu.h>
|
||||
|
||||
#define EXCEPTION _exception
|
||||
#define EXCEPTION exception
|
||||
|
||||
|
||||
#define PARAM1 8(%ebp)
|
||||
|
@ -89,7 +89,7 @@ ENTRY(reg_div)
|
||||
cmpl EXP_UNDER,EXP(%esi)
|
||||
jg xL_arg1_not_denormal
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
@ -97,7 +97,7 @@ xL_arg1_not_denormal:
|
||||
cmpl EXP_UNDER,EXP(%ebx)
|
||||
jg xL_arg2_not_denormal
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
@ -117,7 +117,7 @@ xL_arg2_not_denormal:
|
||||
addl EXP_BIAS,%edx
|
||||
movl %edx,EXP(%edi)
|
||||
|
||||
jmp _divide_kernel
|
||||
jmp divide_kernel
|
||||
|
||||
|
||||
/*-----------------------------------------------------------------------*/
|
||||
@ -134,14 +134,14 @@ L_arg2_NaN:
|
||||
pushl %edi /* Destination */
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
call _real_2op_NaN
|
||||
call real_2op_NaN
|
||||
jmp LDiv_exit
|
||||
|
||||
/* Invalid operations */
|
||||
L_zero_zero:
|
||||
L_inf_inf:
|
||||
pushl %edi /* Destination */
|
||||
call _arith_invalid /* 0/0 or Infinity/Infinity */
|
||||
call arith_invalid /* 0/0 or Infinity/Infinity */
|
||||
jmp LDiv_exit
|
||||
|
||||
L_no_NaN_arg:
|
||||
@ -168,7 +168,7 @@ L_inf_valid:
|
||||
cmpl EXP_UNDER,EXP(%ebx)
|
||||
jg L_copy_arg1 /* Answer is Inf */
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
#endif DENORM_OPERAND
|
||||
@ -193,7 +193,7 @@ L_arg1_not_inf:
|
||||
movb SIGN(%esi),%al
|
||||
xorb SIGN(%ebx),%al
|
||||
pushl %eax /* lower 8 bits have the sign */
|
||||
call _divide_by_zero
|
||||
call divide_by_zero
|
||||
jmp LDiv_exit
|
||||
|
||||
L_arg2_not_zero:
|
||||
@ -207,7 +207,7 @@ L_arg2_not_zero:
|
||||
cmpl EXP_UNDER,EXP(%esi)
|
||||
jg L_return_zero /* Answer is zero */
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
#endif DENORM_OPERAND
|
||||
@ -227,7 +227,7 @@ L_arg2_not_inf:
|
||||
cmpl EXP_UNDER,EXP(%ebx)
|
||||
jg L_copy_arg1 /* Answer is zero */
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
#endif DENORM_OPERAND
|
||||
|
@ -125,13 +125,13 @@ L_zero:
|
||||
|
||||
L_underflow:
|
||||
push %ebx
|
||||
call _arith_underflow
|
||||
call arith_underflow
|
||||
pop %ebx
|
||||
jmp L_exit
|
||||
|
||||
L_overflow:
|
||||
push %ebx
|
||||
call _arith_overflow
|
||||
call arith_overflow
|
||||
pop %ebx
|
||||
jmp L_exit
|
||||
|
||||
|
@ -443,7 +443,7 @@ FPU_Arith_exit:
|
||||
/* round-up.*/
|
||||
xL_precision_lost_up:
|
||||
push %eax
|
||||
call _set_precision_flag_up
|
||||
call set_precision_flag_up
|
||||
popl %eax
|
||||
jmp xL_no_precision_loss
|
||||
|
||||
@ -451,7 +451,7 @@ xL_precision_lost_up:
|
||||
/* truncation.*/
|
||||
xL_precision_lost_down:
|
||||
push %eax
|
||||
call _set_precision_flag_down
|
||||
call set_precision_flag_down
|
||||
popl %eax
|
||||
jmp xL_no_precision_loss
|
||||
|
||||
@ -588,7 +588,7 @@ LNormalise_shift_done:
|
||||
/* There must be a masked underflow*/
|
||||
push %eax
|
||||
pushl EX_Underflow
|
||||
call _exception
|
||||
call exception
|
||||
popl %eax
|
||||
popl %eax
|
||||
jmp xL_Normalised
|
||||
@ -598,12 +598,12 @@ LNormalise_shift_done:
|
||||
// Masked response.*/
|
||||
L_underflow_to_zero:
|
||||
push %eax
|
||||
call _set_precision_flag_down
|
||||
call set_precision_flag_down
|
||||
popl %eax
|
||||
|
||||
push %eax
|
||||
pushl EX_Underflow
|
||||
call _exception
|
||||
call exception
|
||||
popl %eax
|
||||
popl %eax
|
||||
|
||||
@ -614,7 +614,7 @@ L_underflow_to_zero:
|
||||
/* The operations resulted in a number too large to represent.*/
|
||||
L_overflow:
|
||||
push %edi
|
||||
call _arith_overflow
|
||||
call arith_overflow
|
||||
pop %edi
|
||||
jmp FPU_Arith_exit
|
||||
|
||||
|
@ -94,7 +94,7 @@ ENTRY(reg_u_add)
|
||||
cmpl EXP_UNDER,EXP(%esi)
|
||||
jg xOp1_not_denorm
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
@ -102,7 +102,7 @@ xOp1_not_denorm:
|
||||
cmpl EXP_UNDER,EXP(%edi)
|
||||
jg xOp2_not_denorm
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
|
@ -102,7 +102,7 @@ ovfl_flag:
|
||||
|
||||
.text
|
||||
|
||||
.globl _divide_kernel
|
||||
.globl divide_kernel
|
||||
|
||||
ENTRY(reg_u_div)
|
||||
pushl %ebp
|
||||
@ -121,7 +121,7 @@ ENTRY(reg_u_div)
|
||||
cmpl EXP_UNDER,%eax
|
||||
jg xOp1_not_denorm
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
@ -130,14 +130,14 @@ xOp1_not_denorm:
|
||||
cmpl EXP_UNDER,%eax
|
||||
jg xOp2_not_denorm
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
xOp2_not_denorm:
|
||||
#endif DENORM_OPERAND
|
||||
|
||||
_divide_kernel:
|
||||
divide_kernel:
|
||||
#ifdef PARANOID
|
||||
/* testl $0x80000000, SIGH(%esi) *//* Dividend */
|
||||
/* je L_bugged */
|
||||
|
@ -105,7 +105,7 @@ ENTRY(reg_u_mul)
|
||||
cmpl EXP_UNDER,%eax
|
||||
jg xOp1_not_denorm
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
@ -114,7 +114,7 @@ xOp1_not_denorm:
|
||||
cmpl EXP_UNDER,%eax
|
||||
jg xOp2_not_denorm
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
|
@ -93,7 +93,7 @@ ENTRY(reg_u_sub)
|
||||
cmpl EXP_UNDER,EXP(%esi)
|
||||
jg xOp1_not_denorm
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
@ -101,7 +101,7 @@ xOp1_not_denorm:
|
||||
cmpl EXP_UNDER,EXP(%edi)
|
||||
jg xOp2_not_denorm
|
||||
|
||||
call _denormal_operand
|
||||
call denormal_operand
|
||||
orl %eax,%eax
|
||||
jnz FPU_Arith_exit
|
||||
|
||||
@ -351,7 +351,7 @@ L_exit:
|
||||
|
||||
L_underflow:
|
||||
push %edi
|
||||
call _arith_underflow
|
||||
call arith_underflow
|
||||
pop %ebx
|
||||
jmp L_exit
|
||||
|
||||
|
@ -163,8 +163,8 @@ L_more_than_95:
|
||||
| part which has been shifted out of the arg. |
|
||||
| Results returned in the 64 bit arg and eax. |
|
||||
+---------------------------------------------------------------------------*/
|
||||
.globl _shrxs
|
||||
_shrxs:
|
||||
.globl shrxs
|
||||
shrxs:
|
||||
push %ebp
|
||||
movl %esp,%ebp
|
||||
pushl %esi
|
||||
|
@ -50,27 +50,27 @@ IDTVEC(vec_name) ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
incl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl _intr_unit + (irq_num) * 4 ; \
|
||||
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
addl $4, %esp ; \
|
||||
movl $0, _lapic+LA_EOI ; \
|
||||
movl $0, lapic+LA_EOI ; \
|
||||
lock ; \
|
||||
incl _cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl _intr_countp + (irq_num) * 4, %eax ; \
|
||||
incl cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl intr_countp + (irq_num) * 4, %eax ; \
|
||||
lock ; \
|
||||
incl (%eax) ; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
|
||||
#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
|
||||
|
||||
#define MASK_IRQ(irq_num) \
|
||||
IMASK_LOCK ; /* into critical reg */ \
|
||||
testl $IRQ_BIT(irq_num), _apic_imen ; \
|
||||
testl $IRQ_BIT(irq_num), apic_imen ; \
|
||||
jne 7f ; /* masked, don't mask */ \
|
||||
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
|
||||
orl $IRQ_BIT(irq_num), apic_imen ; /* set the mask bit */ \
|
||||
movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
|
||||
movl REDIRIDX(irq_num), %eax ; /* get the index */ \
|
||||
movl %eax, (%ecx) ; /* write the index */ \
|
||||
@ -85,7 +85,7 @@ IDTVEC(vec_name) ; \
|
||||
* and the EOI cycle would cause redundant INTs to occur.
|
||||
*/
|
||||
#define MASK_LEVEL_IRQ(irq_num) \
|
||||
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
|
||||
testl $IRQ_BIT(irq_num), apic_pin_trigger ; \
|
||||
jz 9f ; /* edge, don't mask */ \
|
||||
MASK_IRQ(irq_num) ; \
|
||||
9:
|
||||
@ -93,18 +93,18 @@ IDTVEC(vec_name) ; \
|
||||
|
||||
#ifdef APIC_INTR_REORDER
|
||||
#define EOI_IRQ(irq_num) \
|
||||
movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
|
||||
movl apic_isrbit_location + 8 * (irq_num), %eax ; \
|
||||
movl (%eax), %eax ; \
|
||||
testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
|
||||
testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
|
||||
jz 9f ; /* not active */ \
|
||||
movl $0, _lapic+LA_EOI ; \
|
||||
movl $0, lapic+LA_EOI ; \
|
||||
9:
|
||||
|
||||
#else
|
||||
#define EOI_IRQ(irq_num) \
|
||||
testl $IRQ_BIT(irq_num), _lapic+LA_ISR1; \
|
||||
testl $IRQ_BIT(irq_num), lapic+LA_ISR1; \
|
||||
jz 9f ; /* not active */ \
|
||||
movl $0, _lapic+LA_EOI; \
|
||||
movl $0, lapic+LA_EOI; \
|
||||
9:
|
||||
#endif
|
||||
|
||||
@ -160,12 +160,12 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
|
||||
pushl $irq_num; /* pass the IRQ */ \
|
||||
sti ; \
|
||||
call _sched_ithd ; \
|
||||
call sched_ithd ; \
|
||||
addl $4, %esp ; /* discard the parameter */ \
|
||||
; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Handle "spurious INTerrupts".
|
||||
@ -176,8 +176,8 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
*/
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xspuriousint
|
||||
_Xspuriousint:
|
||||
.globl Xspuriousint
|
||||
Xspuriousint:
|
||||
|
||||
/* No EOI cycle used here */
|
||||
|
||||
@ -189,8 +189,8 @@ _Xspuriousint:
|
||||
*/
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xinvltlb
|
||||
_Xinvltlb:
|
||||
.globl Xinvltlb
|
||||
Xinvltlb:
|
||||
pushl %eax
|
||||
|
||||
#ifdef COUNT_XINVLTLB_HITS
|
||||
@ -207,7 +207,7 @@ _Xinvltlb:
|
||||
movl %eax, %cr3
|
||||
|
||||
ss /* stack segment, avoid %ds load */
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
popl %eax
|
||||
iret
|
||||
@ -229,11 +229,11 @@ _Xinvltlb:
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xcpucheckstate
|
||||
.globl _checkstate_cpustate
|
||||
.globl _checkstate_curproc
|
||||
.globl _checkstate_pc
|
||||
_Xcpucheckstate:
|
||||
.globl Xcpucheckstate
|
||||
.globl checkstate_cpustate
|
||||
.globl checkstate_curproc
|
||||
.globl checkstate_pc
|
||||
Xcpucheckstate:
|
||||
pushl %eax
|
||||
pushl %ebx
|
||||
pushl %ds /* save current data segment */
|
||||
@ -244,7 +244,7 @@ _Xcpucheckstate:
|
||||
movl $KPSEL, %eax
|
||||
mov %ax, %fs
|
||||
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
movl $0, %ebx
|
||||
movl 20(%esp), %eax
|
||||
@ -256,15 +256,15 @@ _Xcpucheckstate:
|
||||
incl %ebx /* system or interrupt */
|
||||
1:
|
||||
movl PCPU(CPUID), %eax
|
||||
movl %ebx, _checkstate_cpustate(,%eax,4)
|
||||
movl %ebx, checkstate_cpustate(,%eax,4)
|
||||
movl PCPU(CURPROC), %ebx
|
||||
movl %ebx, _checkstate_curproc(,%eax,4)
|
||||
movl %ebx, checkstate_curproc(,%eax,4)
|
||||
|
||||
movl 16(%esp), %ebx
|
||||
movl %ebx, _checkstate_pc(,%eax,4)
|
||||
movl %ebx, checkstate_pc(,%eax,4)
|
||||
|
||||
lock /* checkstate_probed_cpus |= (1<<id) */
|
||||
btsl %eax, _checkstate_probed_cpus
|
||||
btsl %eax, checkstate_probed_cpus
|
||||
|
||||
popl %fs
|
||||
popl %ds /* restore previous data segment */
|
||||
@ -284,8 +284,8 @@ _Xcpucheckstate:
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xcpuast
|
||||
_Xcpuast:
|
||||
.globl Xcpuast
|
||||
Xcpuast:
|
||||
PUSH_FRAME
|
||||
movl $KDSEL, %eax
|
||||
mov %ax, %ds /* use KERNEL data segment */
|
||||
@ -295,11 +295,11 @@ _Xcpuast:
|
||||
|
||||
movl PCPU(CPUID), %eax
|
||||
lock /* checkstate_need_ast &= ~(1<<id) */
|
||||
btrl %eax, _checkstate_need_ast
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
btrl %eax, checkstate_need_ast
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
lock
|
||||
btsl %eax, _checkstate_pending_ast
|
||||
btsl %eax, checkstate_pending_ast
|
||||
jc 1f
|
||||
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
@ -310,7 +310,7 @@ _Xcpuast:
|
||||
|
||||
movl PCPU(CPUID), %eax
|
||||
lock
|
||||
btrl %eax, _checkstate_pending_ast
|
||||
btrl %eax, checkstate_pending_ast
|
||||
lock
|
||||
btrl %eax, CNAME(resched_cpus)
|
||||
jnc 2f
|
||||
@ -322,7 +322,7 @@ _Xcpuast:
|
||||
lock
|
||||
incl CNAME(cpuast_cnt)
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
1:
|
||||
/* We are already in the process of delivering an ast for this CPU */
|
||||
POP_FRAME
|
||||
@ -338,8 +338,8 @@ _Xcpuast:
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xcpustop
|
||||
_Xcpustop:
|
||||
.globl Xcpustop
|
||||
Xcpustop:
|
||||
pushl %ebp
|
||||
movl %esp, %ebp
|
||||
pushl %eax
|
||||
@ -353,7 +353,7 @@ _Xcpustop:
|
||||
movl $KPSEL, %eax
|
||||
mov %ax, %fs
|
||||
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
movl PCPU(CPUID), %eax
|
||||
imull $PCB_SIZE, %eax
|
||||
@ -366,15 +366,15 @@ _Xcpustop:
|
||||
movl PCPU(CPUID), %eax
|
||||
|
||||
lock
|
||||
btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
|
||||
btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
|
||||
1:
|
||||
btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
|
||||
btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
|
||||
jnc 1b
|
||||
|
||||
lock
|
||||
btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
|
||||
btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
|
||||
lock
|
||||
btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
|
||||
btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
|
||||
|
||||
test %eax, %eax
|
||||
jnz 2f
|
||||
@ -472,8 +472,8 @@ MCOUNT_LABEL(eintr)
|
||||
*/
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xrendezvous
|
||||
_Xrendezvous:
|
||||
.globl Xrendezvous
|
||||
Xrendezvous:
|
||||
PUSH_FRAME
|
||||
movl $KDSEL, %eax
|
||||
mov %ax, %ds /* use KERNEL data segment */
|
||||
@ -481,9 +481,9 @@ _Xrendezvous:
|
||||
movl $KPSEL, %eax
|
||||
mov %ax, %fs
|
||||
|
||||
call _smp_rendezvous_action
|
||||
call smp_rendezvous_action
|
||||
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
POP_FRAME
|
||||
iret
|
||||
|
||||
@ -497,21 +497,21 @@ _xhits:
|
||||
#endif /* COUNT_XINVLTLB_HITS */
|
||||
|
||||
/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
|
||||
.globl _stopped_cpus, _started_cpus
|
||||
_stopped_cpus:
|
||||
.globl stopped_cpus, started_cpus
|
||||
stopped_cpus:
|
||||
.long 0
|
||||
_started_cpus:
|
||||
started_cpus:
|
||||
.long 0
|
||||
|
||||
#ifdef BETTER_CLOCK
|
||||
.globl _checkstate_probed_cpus
|
||||
_checkstate_probed_cpus:
|
||||
.globl checkstate_probed_cpus
|
||||
checkstate_probed_cpus:
|
||||
.long 0
|
||||
#endif /* BETTER_CLOCK */
|
||||
.globl _checkstate_need_ast
|
||||
_checkstate_need_ast:
|
||||
.globl checkstate_need_ast
|
||||
checkstate_need_ast:
|
||||
.long 0
|
||||
_checkstate_pending_ast:
|
||||
checkstate_pending_ast:
|
||||
.long 0
|
||||
.globl CNAME(resched_cpus)
|
||||
.globl CNAME(want_resched_cnt)
|
||||
@ -526,8 +526,8 @@ CNAME(cpuast_cnt):
|
||||
CNAME(cpustop_restartfunc):
|
||||
.long 0
|
||||
|
||||
.globl _apic_pin_trigger
|
||||
_apic_pin_trigger:
|
||||
.globl apic_pin_trigger
|
||||
apic_pin_trigger:
|
||||
.long 0
|
||||
|
||||
.text
|
||||
|
@ -49,9 +49,9 @@ bioscall_stack: .long 0
|
||||
ENTRY(bios32)
|
||||
pushl %ebp
|
||||
movl 16(%esp),%ebp
|
||||
mov %bp,_bioscall_vector+4
|
||||
mov %bp,bioscall_vector+4
|
||||
movl 12(%esp),%ebp
|
||||
movl %ebp,_bioscall_vector
|
||||
movl %ebp,bioscall_vector
|
||||
movl 8(%esp),%ebp
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
@ -63,7 +63,7 @@ ENTRY(bios32)
|
||||
movl 16(%ebp),%esi
|
||||
movl 20(%ebp),%edi
|
||||
pushl %ebp
|
||||
lcall *_bioscall_vector
|
||||
lcall *bioscall_vector
|
||||
popl %ebp
|
||||
movl %eax,0(%ebp)
|
||||
movl %ebx,4(%ebp)
|
||||
@ -127,7 +127,7 @@ ENTRY(bios16_call)
|
||||
lret /* ...continue below */
|
||||
.globl CNAME(bios16_jmp)
|
||||
CNAME(bios16_jmp):
|
||||
lcallw *_bioscall_vector /* 16-bit call */
|
||||
lcallw *bioscall_vector /* 16-bit call */
|
||||
|
||||
jc 1f
|
||||
pushl $0 /* success */
|
||||
|
@ -81,18 +81,18 @@
|
||||
* On entry to a trap or interrupt WE DO NOT OWN THE MP LOCK. This means
|
||||
* that we must be careful in regards to accessing global variables. We
|
||||
* save (push) the current cpl (our software interrupt disable mask), call
|
||||
* the trap function, then call _doreti to restore the cpl and deal with
|
||||
* ASTs (software interrupts). _doreti will determine if the restoration
|
||||
* the trap function, then call doreti to restore the cpl and deal with
|
||||
* ASTs (software interrupts). doreti will determine if the restoration
|
||||
* of the cpl unmasked any pending interrupts and will issue those interrupts
|
||||
* synchronously prior to doing the iret.
|
||||
*
|
||||
* At the moment we must own the MP lock to do any cpl manipulation, which
|
||||
* means we must own it prior to calling _doreti. The syscall case attempts
|
||||
* means we must own it prior to calling doreti. The syscall case attempts
|
||||
* to avoid this by handling a reduced set of cases itself and iret'ing.
|
||||
*/
|
||||
#define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(_X,name); \
|
||||
.type __CONCAT(_X,name),@function; __CONCAT(_X,name):
|
||||
#define TRAP(a) pushl $(a) ; jmp _alltraps
|
||||
#define IDTVEC(name) ALIGN_TEXT; .globl __CONCAT(X,name); \
|
||||
.type __CONCAT(X,name),@function; __CONCAT(X,name):
|
||||
#define TRAP(a) pushl $(a) ; jmp alltraps
|
||||
|
||||
#ifdef BDE_DEBUGGER
|
||||
#define BDBTRAP(name) \
|
||||
@ -171,14 +171,14 @@ IDTVEC(fpu)
|
||||
mov %ax,%fs
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
|
||||
MPLOCKED incl _cnt+V_TRAP
|
||||
MPLOCKED incl cnt+V_TRAP
|
||||
pushl $0 /* dummy unit to finish intr frame */
|
||||
|
||||
call _npx_intr
|
||||
call npx_intr
|
||||
|
||||
addl $4,%esp
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
#else /* DEV_NPX */
|
||||
pushl $0; TRAP(T_ARITHTRAP)
|
||||
#endif /* DEV_NPX */
|
||||
@ -187,16 +187,16 @@ IDTVEC(align)
|
||||
TRAP(T_ALIGNFLT)
|
||||
|
||||
/*
|
||||
* _alltraps entry point. Interrupts are enabled if this was a trap
|
||||
* alltraps entry point. Interrupts are enabled if this was a trap
|
||||
* gate (TGT), else disabled if this was an interrupt gate (IGT).
|
||||
* Note that int0x80_syscall is a trap gate. Only page faults
|
||||
* use an interrupt gate.
|
||||
*/
|
||||
|
||||
SUPERALIGN_TEXT
|
||||
.globl _alltraps
|
||||
.type _alltraps,@function
|
||||
_alltraps:
|
||||
.globl alltraps
|
||||
.type alltraps,@function
|
||||
alltraps:
|
||||
pushal
|
||||
pushl %ds
|
||||
pushl %es
|
||||
@ -209,14 +209,14 @@ alltraps_with_regs_pushed:
|
||||
mov %ax,%fs
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
calltrap:
|
||||
FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */
|
||||
call _trap
|
||||
FAKE_MCOUNT(btrap) /* init "from" btrap -> calltrap */
|
||||
call trap
|
||||
|
||||
/*
|
||||
* Return via _doreti to handle ASTs.
|
||||
* Return via doreti to handle ASTs.
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* SYSCALL CALL GATE (old entry point for a.out binaries)
|
||||
@ -265,27 +265,27 @@ syscall_with_err_pushed:
|
||||
mov $KPSEL,%ax
|
||||
mov %ax,%fs
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
call _syscall
|
||||
call syscall
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
ENTRY(fork_trampoline)
|
||||
pushl %esp /* trapframe pointer */
|
||||
pushl %ebx /* arg1 */
|
||||
pushl %esi /* function */
|
||||
call _fork_exit
|
||||
call fork_exit
|
||||
addl $12,%esp
|
||||
/* cut from syscall */
|
||||
|
||||
/*
|
||||
* Return via _doreti to handle ASTs.
|
||||
* Return via doreti to handle ASTs.
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
|
||||
/*
|
||||
* Include vm86 call routines, which want to call _doreti.
|
||||
* Include vm86 call routines, which want to call doreti.
|
||||
*/
|
||||
#include "i386/i386/vm86bios.s"
|
||||
|
||||
|
@ -69,19 +69,19 @@
|
||||
* PTmap is recursive pagemap at top of virtual address space.
|
||||
* Within PTmap, the page directory can be found (third indirection).
|
||||
*/
|
||||
.globl _PTmap,_PTD,_PTDpde
|
||||
.set _PTmap,(PTDPTDI << PDRSHIFT)
|
||||
.set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE)
|
||||
.set _PTDpde,_PTD + (PTDPTDI * PDESIZE)
|
||||
.globl PTmap,PTD,PTDpde
|
||||
.set PTmap,(PTDPTDI << PDRSHIFT)
|
||||
.set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
|
||||
.set PTDpde,PTD + (PTDPTDI * PDESIZE)
|
||||
|
||||
/*
|
||||
* APTmap, APTD is the alternate recursive pagemap.
|
||||
* It's used when modifying another process's page tables.
|
||||
*/
|
||||
.globl _APTmap,_APTD,_APTDpde
|
||||
.set _APTmap,APTDPTDI << PDRSHIFT
|
||||
.set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE)
|
||||
.set _APTDpde,_PTD + (APTDPTDI * PDESIZE)
|
||||
.globl APTmap,APTD,APTDpde
|
||||
.set APTmap,APTDPTDI << PDRSHIFT
|
||||
.set APTD,APTmap + (APTDPTDI * PAGE_SIZE)
|
||||
.set APTDpde,PTD + (APTDPTDI * PDESIZE)
|
||||
|
||||
#ifdef SMP
|
||||
/*
|
||||
@ -89,9 +89,9 @@
|
||||
* This is "constructed" in locore.s on the BSP and in mp_machdep.c
|
||||
* for each AP. DO NOT REORDER THESE WITHOUT UPDATING THE REST!
|
||||
*/
|
||||
.globl _SMP_prvspace, _lapic
|
||||
.set _SMP_prvspace,(MPPTDI << PDRSHIFT)
|
||||
.set _lapic,_SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
|
||||
.globl SMP_prvspace, lapic
|
||||
.set SMP_prvspace,(MPPTDI << PDRSHIFT)
|
||||
.set lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
|
||||
#endif /* SMP */
|
||||
|
||||
/*
|
||||
@ -104,48 +104,48 @@
|
||||
.space 0x2000 /* space for tmpstk - temporary stack */
|
||||
HIDENAME(tmpstk):
|
||||
|
||||
.globl _boothowto,_bootdev
|
||||
.globl boothowto,bootdev
|
||||
|
||||
.globl _cpu,_cpu_vendor,_cpu_id,_bootinfo
|
||||
.globl _cpu_high, _cpu_feature
|
||||
.globl cpu,cpu_vendor,cpu_id,bootinfo
|
||||
.globl cpu_high, cpu_feature
|
||||
|
||||
_cpu: .long 0 /* are we 386, 386sx, or 486 */
|
||||
_cpu_id: .long 0 /* stepping ID */
|
||||
_cpu_high: .long 0 /* highest arg to CPUID */
|
||||
_cpu_feature: .long 0 /* features */
|
||||
_cpu_vendor: .space 20 /* CPU origin code */
|
||||
_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
|
||||
cpu: .long 0 /* are we 386, 386sx, or 486 */
|
||||
cpu_id: .long 0 /* stepping ID */
|
||||
cpu_high: .long 0 /* highest arg to CPUID */
|
||||
cpu_feature: .long 0 /* features */
|
||||
cpu_vendor: .space 20 /* CPU origin code */
|
||||
bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
|
||||
|
||||
_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
|
||||
KERNend: .long 0 /* phys addr end of kernel (just after bss) */
|
||||
physfree: .long 0 /* phys addr of next free page */
|
||||
|
||||
#ifdef SMP
|
||||
.globl _cpu0prvpage
|
||||
.globl cpu0prvpage
|
||||
cpu0pp: .long 0 /* phys addr cpu0 private pg */
|
||||
_cpu0prvpage: .long 0 /* relocated version */
|
||||
cpu0prvpage: .long 0 /* relocated version */
|
||||
|
||||
.globl _SMPpt
|
||||
.globl SMPpt
|
||||
SMPptpa: .long 0 /* phys addr SMP page table */
|
||||
_SMPpt: .long 0 /* relocated version */
|
||||
SMPpt: .long 0 /* relocated version */
|
||||
#endif /* SMP */
|
||||
|
||||
.globl _IdlePTD
|
||||
_IdlePTD: .long 0 /* phys addr of kernel PTD */
|
||||
.globl IdlePTD
|
||||
IdlePTD: .long 0 /* phys addr of kernel PTD */
|
||||
|
||||
#ifdef SMP
|
||||
.globl _KPTphys
|
||||
.globl KPTphys
|
||||
#endif
|
||||
_KPTphys: .long 0 /* phys addr of kernel page tables */
|
||||
KPTphys: .long 0 /* phys addr of kernel page tables */
|
||||
|
||||
.globl _proc0paddr
|
||||
_proc0paddr: .long 0 /* address of proc 0 address space */
|
||||
.globl proc0paddr
|
||||
proc0paddr: .long 0 /* address of proc 0 address space */
|
||||
p0upa: .long 0 /* phys addr of proc0's UPAGES */
|
||||
|
||||
vm86phystk: .long 0 /* PA of vm86/bios stack */
|
||||
|
||||
.globl _vm86paddr, _vm86pa
|
||||
_vm86paddr: .long 0 /* address of vm86 region */
|
||||
_vm86pa: .long 0 /* phys addr of vm86 region */
|
||||
.globl vm86paddr, vm86pa
|
||||
vm86paddr: .long 0 /* address of vm86 region */
|
||||
vm86pa: .long 0 /* phys addr of vm86 region */
|
||||
|
||||
#ifdef BDE_DEBUGGER
|
||||
.globl _bdb_exists /* flag to indicate BDE debugger is present */
|
||||
@ -153,8 +153,8 @@ _bdb_exists: .long 0
|
||||
#endif
|
||||
|
||||
#ifdef PC98
|
||||
.globl _pc98_system_parameter
|
||||
_pc98_system_parameter:
|
||||
.globl pc98_system_parameter
|
||||
pc98_system_parameter:
|
||||
.space 0x240
|
||||
#endif
|
||||
|
||||
@ -205,7 +205,7 @@ _pc98_system_parameter:
|
||||
#define fillkptphys(prot) \
|
||||
movl %eax, %ebx ; \
|
||||
shrl $PAGE_SHIFT, %ebx ; \
|
||||
fillkpt(R(_KPTphys), prot)
|
||||
fillkpt(R(KPTphys), prot)
|
||||
|
||||
.text
|
||||
/**********************************************************************
|
||||
@ -218,7 +218,7 @@ NON_GPROF_ENTRY(btext)
|
||||
#ifdef PC98
|
||||
/* save SYSTEM PARAMETER for resume (NS/T or other) */
|
||||
movl $0xa1400,%esi
|
||||
movl $R(_pc98_system_parameter),%edi
|
||||
movl $R(pc98_system_parameter),%edi
|
||||
movl $0x0240,%ecx
|
||||
cld
|
||||
rep
|
||||
@ -266,10 +266,10 @@ NON_GPROF_ENTRY(btext)
|
||||
|
||||
#ifdef PC98
|
||||
/* pc98_machine_type & M_EPSON_PC98 */
|
||||
testb $0x02,R(_pc98_system_parameter)+220
|
||||
testb $0x02,R(pc98_system_parameter)+220
|
||||
jz 3f
|
||||
/* epson_machine_id <= 0x0b */
|
||||
cmpb $0x0b,R(_pc98_system_parameter)+224
|
||||
cmpb $0x0b,R(pc98_system_parameter)+224
|
||||
ja 3f
|
||||
|
||||
/* count up memory */
|
||||
@ -284,11 +284,11 @@ NON_GPROF_ENTRY(btext)
|
||||
loop 1b
|
||||
2: subl $0x100000,%eax
|
||||
shrl $17,%eax
|
||||
movb %al,R(_pc98_system_parameter)+1
|
||||
movb %al,R(pc98_system_parameter)+1
|
||||
3:
|
||||
|
||||
movw R(_pc98_system_parameter+0x86),%ax
|
||||
movw %ax,R(_cpu_id)
|
||||
movw R(pc98_system_parameter+0x86),%ax
|
||||
movw %ax,R(cpu_id)
|
||||
#endif
|
||||
|
||||
call identify_cpu
|
||||
@ -309,8 +309,8 @@ NON_GPROF_ENTRY(btext)
|
||||
* are above 1MB to keep the gdt and idt away from the bss and page
|
||||
* tables. The idt is only used if BDE_DEBUGGER is enabled.
|
||||
*/
|
||||
movl $R(_end),%ecx
|
||||
movl $R(_edata),%edi
|
||||
movl $R(end),%ecx
|
||||
movl $R(edata),%edi
|
||||
subl %edi,%ecx
|
||||
xorl %eax,%eax
|
||||
cld
|
||||
@ -322,7 +322,7 @@ NON_GPROF_ENTRY(btext)
|
||||
/*
|
||||
* If the CPU has support for VME, turn it on.
|
||||
*/
|
||||
testl $CPUID_VME, R(_cpu_feature)
|
||||
testl $CPUID_VME, R(cpu_feature)
|
||||
jz 1f
|
||||
movl %cr4, %eax
|
||||
orl $CR4_VME, %eax
|
||||
@ -338,7 +338,7 @@ NON_GPROF_ENTRY(btext)
|
||||
#endif
|
||||
|
||||
/* Now enable paging */
|
||||
movl R(_IdlePTD), %eax
|
||||
movl R(IdlePTD), %eax
|
||||
movl %eax,%cr3 /* load ptd addr into mmu */
|
||||
movl %cr0,%eax /* get control word */
|
||||
orl $CR0_PE|CR0_PG,%eax /* enable paging */
|
||||
@ -359,16 +359,16 @@ NON_GPROF_ENTRY(btext)
|
||||
/* now running relocated at KERNBASE where the system is linked to run */
|
||||
begin:
|
||||
/* set up bootstrap stack */
|
||||
movl _proc0paddr,%eax /* location of in-kernel pages */
|
||||
movl proc0paddr,%eax /* location of in-kernel pages */
|
||||
leal UPAGES*PAGE_SIZE(%eax),%esp /* bootstrap stack end location */
|
||||
|
||||
xorl %ebp,%ebp /* mark end of frames */
|
||||
|
||||
movl _IdlePTD,%esi
|
||||
movl IdlePTD,%esi
|
||||
movl %esi,PCB_CR3(%eax)
|
||||
|
||||
pushl physfree /* value of first for init386(first) */
|
||||
call _init386 /* wire 386 chip for unix operation */
|
||||
call init386 /* wire 386 chip for unix operation */
|
||||
|
||||
/*
|
||||
* Clean up the stack in a way that db_numargs() understands, so
|
||||
@ -377,7 +377,7 @@ begin:
|
||||
*/
|
||||
addl $4,%esp
|
||||
|
||||
call _mi_startup /* autoconfiguration, mountroot etc */
|
||||
call mi_startup /* autoconfiguration, mountroot etc */
|
||||
/* NOTREACHED */
|
||||
addl $0,%esp /* for db_numargs() again */
|
||||
|
||||
@ -398,7 +398,7 @@ NON_GPROF_ENTRY(sigcode)
|
||||
0: jmp 0b
|
||||
|
||||
ALIGN_TEXT
|
||||
_osigcode:
|
||||
osigcode:
|
||||
call *SIGF_HANDLER(%esp) /* call signal handler */
|
||||
lea SIGF_SC(%esp),%eax /* get sigcontext */
|
||||
pushl %eax
|
||||
@ -413,14 +413,14 @@ _osigcode:
|
||||
0: jmp 0b
|
||||
|
||||
ALIGN_TEXT
|
||||
_esigcode:
|
||||
esigcode:
|
||||
|
||||
.data
|
||||
.globl _szsigcode, _szosigcode
|
||||
_szsigcode:
|
||||
.long _esigcode-_sigcode
|
||||
_szosigcode:
|
||||
.long _esigcode-_osigcode
|
||||
.globl szsigcode, szosigcode
|
||||
szsigcode:
|
||||
.long esigcode-sigcode
|
||||
szosigcode:
|
||||
.long esigcode-osigcode
|
||||
.text
|
||||
|
||||
/**********************************************************************
|
||||
@ -507,7 +507,7 @@ newboot:
|
||||
cmpl $0,%esi
|
||||
je 2f /* No kernelname */
|
||||
movl $MAXPATHLEN,%ecx /* Brute force!!! */
|
||||
movl $R(_kernelname),%edi
|
||||
movl $R(kernelname),%edi
|
||||
cmpb $'/',(%esi) /* Make sure it starts with a slash */
|
||||
je 1f
|
||||
movb $'/',(%edi)
|
||||
@ -535,7 +535,7 @@ got_bi_size:
|
||||
* Copy the common part of the bootinfo struct
|
||||
*/
|
||||
movl %ebx,%esi
|
||||
movl $R(_bootinfo),%edi
|
||||
movl $R(bootinfo),%edi
|
||||
cmpl $BOOTINFO_SIZE,%ecx
|
||||
jbe got_common_bi_size
|
||||
movl $BOOTINFO_SIZE,%ecx
|
||||
@ -552,12 +552,12 @@ got_common_bi_size:
|
||||
movl BI_NFS_DISKLESS(%ebx),%esi
|
||||
cmpl $0,%esi
|
||||
je olddiskboot
|
||||
movl $R(_nfs_diskless),%edi
|
||||
movl $R(nfs_diskless),%edi
|
||||
movl $NFSDISKLESS_SIZE,%ecx
|
||||
cld
|
||||
rep
|
||||
movsb
|
||||
movl $R(_nfs_diskless_valid),%edi
|
||||
movl $R(nfs_diskless_valid),%edi
|
||||
movl $1,(%edi)
|
||||
#endif
|
||||
#endif
|
||||
@ -570,9 +570,9 @@ got_common_bi_size:
|
||||
*/
|
||||
olddiskboot:
|
||||
movl 8(%ebp),%eax
|
||||
movl %eax,R(_boothowto)
|
||||
movl %eax,R(boothowto)
|
||||
movl 12(%ebp),%eax
|
||||
movl %eax,R(_bootdev)
|
||||
movl %eax,R(bootdev)
|
||||
|
||||
ret
|
||||
|
||||
@ -610,16 +610,16 @@ identify_cpu:
|
||||
divl %ecx
|
||||
jz trynexgen
|
||||
popfl
|
||||
movl $CPU_386,R(_cpu)
|
||||
movl $CPU_386,R(cpu)
|
||||
jmp 3f
|
||||
|
||||
trynexgen:
|
||||
popfl
|
||||
movl $CPU_NX586,R(_cpu)
|
||||
movl $0x4778654e,R(_cpu_vendor) # store vendor string
|
||||
movl $0x72446e65,R(_cpu_vendor+4)
|
||||
movl $0x6e657669,R(_cpu_vendor+8)
|
||||
movl $0,R(_cpu_vendor+12)
|
||||
movl $CPU_NX586,R(cpu)
|
||||
movl $0x4778654e,R(cpu_vendor) # store vendor string
|
||||
movl $0x72446e65,R(cpu_vendor+4)
|
||||
movl $0x6e657669,R(cpu_vendor+8)
|
||||
movl $0,R(cpu_vendor+12)
|
||||
jmp 3f
|
||||
|
||||
try486: /* Try to toggle identification flag; does not exist on early 486s. */
|
||||
@ -638,7 +638,7 @@ try486: /* Try to toggle identification flag; does not exist on early 486s. */
|
||||
|
||||
testl %eax,%eax
|
||||
jnz trycpuid
|
||||
movl $CPU_486,R(_cpu)
|
||||
movl $CPU_486,R(cpu)
|
||||
|
||||
/*
|
||||
* Check Cyrix CPU
|
||||
@ -665,41 +665,41 @@ trycyrix:
|
||||
* CPU, we couldn't distinguish it from Cyrix's (including IBM
|
||||
* brand of Cyrix CPUs).
|
||||
*/
|
||||
movl $0x69727943,R(_cpu_vendor) # store vendor string
|
||||
movl $0x736e4978,R(_cpu_vendor+4)
|
||||
movl $0x64616574,R(_cpu_vendor+8)
|
||||
movl $0x69727943,R(cpu_vendor) # store vendor string
|
||||
movl $0x736e4978,R(cpu_vendor+4)
|
||||
movl $0x64616574,R(cpu_vendor+8)
|
||||
jmp 3f
|
||||
|
||||
trycpuid: /* Use the `cpuid' instruction. */
|
||||
xorl %eax,%eax
|
||||
cpuid # cpuid 0
|
||||
movl %eax,R(_cpu_high) # highest capability
|
||||
movl %ebx,R(_cpu_vendor) # store vendor string
|
||||
movl %edx,R(_cpu_vendor+4)
|
||||
movl %ecx,R(_cpu_vendor+8)
|
||||
movb $0,R(_cpu_vendor+12)
|
||||
movl %eax,R(cpu_high) # highest capability
|
||||
movl %ebx,R(cpu_vendor) # store vendor string
|
||||
movl %edx,R(cpu_vendor+4)
|
||||
movl %ecx,R(cpu_vendor+8)
|
||||
movb $0,R(cpu_vendor+12)
|
||||
|
||||
movl $1,%eax
|
||||
cpuid # cpuid 1
|
||||
movl %eax,R(_cpu_id) # store cpu_id
|
||||
movl %edx,R(_cpu_feature) # store cpu_feature
|
||||
movl %eax,R(cpu_id) # store cpu_id
|
||||
movl %edx,R(cpu_feature) # store cpu_feature
|
||||
rorl $8,%eax # extract family type
|
||||
andl $15,%eax
|
||||
cmpl $5,%eax
|
||||
jae 1f
|
||||
|
||||
/* less than Pentium; must be 486 */
|
||||
movl $CPU_486,R(_cpu)
|
||||
movl $CPU_486,R(cpu)
|
||||
jmp 3f
|
||||
1:
|
||||
/* a Pentium? */
|
||||
cmpl $5,%eax
|
||||
jne 2f
|
||||
movl $CPU_586,R(_cpu)
|
||||
movl $CPU_586,R(cpu)
|
||||
jmp 3f
|
||||
2:
|
||||
/* Greater than Pentium...call it a Pentium Pro */
|
||||
movl $CPU_686,R(_cpu)
|
||||
movl $CPU_686,R(cpu)
|
||||
3:
|
||||
ret
|
||||
|
||||
@ -712,7 +712,7 @@ trycpuid: /* Use the `cpuid' instruction. */
|
||||
|
||||
create_pagetables:
|
||||
|
||||
testl $CPUID_PGE, R(_cpu_feature)
|
||||
testl $CPUID_PGE, R(cpu_feature)
|
||||
jz 1f
|
||||
movl %cr4, %eax
|
||||
orl $CR4_PGE, %eax
|
||||
@ -723,17 +723,17 @@ create_pagetables:
|
||||
movl $R(_end),%esi
|
||||
|
||||
/* Include symbols, if any. */
|
||||
movl R(_bootinfo+BI_ESYMTAB),%edi
|
||||
movl R(bootinfo+BI_ESYMTAB),%edi
|
||||
testl %edi,%edi
|
||||
je over_symalloc
|
||||
movl %edi,%esi
|
||||
movl $KERNBASE,%edi
|
||||
addl %edi,R(_bootinfo+BI_SYMTAB)
|
||||
addl %edi,R(_bootinfo+BI_ESYMTAB)
|
||||
addl %edi,R(bootinfo+BI_SYMTAB)
|
||||
addl %edi,R(bootinfo+BI_ESYMTAB)
|
||||
over_symalloc:
|
||||
|
||||
/* If we are told where the end of the kernel space is, believe it. */
|
||||
movl R(_bootinfo+BI_KERNEND),%edi
|
||||
movl R(bootinfo+BI_KERNEND),%edi
|
||||
testl %edi,%edi
|
||||
je no_kernend
|
||||
movl %edi,%esi
|
||||
@ -741,43 +741,43 @@ no_kernend:
|
||||
|
||||
addl $PAGE_MASK,%esi
|
||||
andl $~PAGE_MASK,%esi
|
||||
movl %esi,R(_KERNend) /* save end of kernel */
|
||||
movl %esi,R(KERNend) /* save end of kernel */
|
||||
movl %esi,R(physfree) /* next free page is at end of kernel */
|
||||
|
||||
/* Allocate Kernel Page Tables */
|
||||
ALLOCPAGES(NKPT)
|
||||
movl %esi,R(_KPTphys)
|
||||
movl %esi,R(KPTphys)
|
||||
|
||||
/* Allocate Page Table Directory */
|
||||
ALLOCPAGES(1)
|
||||
movl %esi,R(_IdlePTD)
|
||||
movl %esi,R(IdlePTD)
|
||||
|
||||
/* Allocate UPAGES */
|
||||
ALLOCPAGES(UPAGES)
|
||||
movl %esi,R(p0upa)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_proc0paddr)
|
||||
movl %esi, R(proc0paddr)
|
||||
|
||||
ALLOCPAGES(1) /* vm86/bios stack */
|
||||
movl %esi,R(vm86phystk)
|
||||
|
||||
ALLOCPAGES(3) /* pgtable + ext + IOPAGES */
|
||||
movl %esi,R(_vm86pa)
|
||||
movl %esi,R(vm86pa)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_vm86paddr)
|
||||
movl %esi, R(vm86paddr)
|
||||
|
||||
#ifdef SMP
|
||||
/* Allocate cpu0's private data page */
|
||||
ALLOCPAGES(1)
|
||||
movl %esi,R(cpu0pp)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_cpu0prvpage) /* relocated to KVM space */
|
||||
movl %esi, R(cpu0prvpage) /* relocated to KVM space */
|
||||
|
||||
/* Allocate SMP page table page */
|
||||
ALLOCPAGES(1)
|
||||
movl %esi,R(SMPptpa)
|
||||
addl $KERNBASE, %esi
|
||||
movl %esi, R(_SMPpt) /* relocated to KVM space */
|
||||
movl %esi, R(SMPpt) /* relocated to KVM space */
|
||||
#endif /* SMP */
|
||||
|
||||
/* Map read-only from zero to the end of the kernel text section */
|
||||
@ -790,35 +790,35 @@ no_kernend:
|
||||
xorl %edx,%edx
|
||||
|
||||
#if !defined(SMP)
|
||||
testl $CPUID_PGE, R(_cpu_feature)
|
||||
testl $CPUID_PGE, R(cpu_feature)
|
||||
jz 2f
|
||||
orl $PG_G,%edx
|
||||
#endif
|
||||
|
||||
2: movl $R(_etext),%ecx
|
||||
2: movl $R(etext),%ecx
|
||||
addl $PAGE_MASK,%ecx
|
||||
shrl $PAGE_SHIFT,%ecx
|
||||
fillkptphys(%edx)
|
||||
|
||||
/* Map read-write, data, bss and symbols */
|
||||
movl $R(_etext),%eax
|
||||
movl $R(etext),%eax
|
||||
addl $PAGE_MASK, %eax
|
||||
andl $~PAGE_MASK, %eax
|
||||
map_read_write:
|
||||
movl $PG_RW,%edx
|
||||
#if !defined(SMP)
|
||||
testl $CPUID_PGE, R(_cpu_feature)
|
||||
testl $CPUID_PGE, R(cpu_feature)
|
||||
jz 1f
|
||||
orl $PG_G,%edx
|
||||
#endif
|
||||
|
||||
1: movl R(_KERNend),%ecx
|
||||
1: movl R(KERNend),%ecx
|
||||
subl %eax,%ecx
|
||||
shrl $PAGE_SHIFT,%ecx
|
||||
fillkptphys(%edx)
|
||||
|
||||
/* Map page directory. */
|
||||
movl R(_IdlePTD), %eax
|
||||
movl R(IdlePTD), %eax
|
||||
movl $1, %ecx
|
||||
fillkptphys($PG_RW)
|
||||
|
||||
@ -841,13 +841,13 @@ map_read_write:
|
||||
movl $0, %eax
|
||||
movl $0, %ebx
|
||||
movl $1, %ecx
|
||||
fillkpt(R(_vm86pa), $PG_RW|PG_U)
|
||||
fillkpt(R(vm86pa), $PG_RW|PG_U)
|
||||
|
||||
/* ...likewise for the ISA hole */
|
||||
movl $ISA_HOLE_START, %eax
|
||||
movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
|
||||
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
|
||||
fillkpt(R(_vm86pa), $PG_RW|PG_U)
|
||||
fillkpt(R(vm86pa), $PG_RW|PG_U)
|
||||
|
||||
#ifdef SMP
|
||||
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
|
||||
@ -870,7 +870,7 @@ map_read_write:
|
||||
movl R(SMPptpa), %eax
|
||||
movl $MPPTDI, %ebx
|
||||
movl $1, %ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
/* Fakeup VA for the local apic to allow early traps. */
|
||||
ALLOCPAGES(1)
|
||||
@ -881,22 +881,22 @@ map_read_write:
|
||||
#endif /* SMP */
|
||||
|
||||
/* install a pde for temporary double map of bottom of VA */
|
||||
movl R(_KPTphys), %eax
|
||||
movl R(KPTphys), %eax
|
||||
xorl %ebx, %ebx
|
||||
movl $1, %ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
/* install pde's for pt's */
|
||||
movl R(_KPTphys), %eax
|
||||
movl R(KPTphys), %eax
|
||||
movl $KPTDI, %ebx
|
||||
movl $NKPT, %ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
/* install a pde recursively mapping page directory as a page table */
|
||||
movl R(_IdlePTD), %eax
|
||||
movl R(IdlePTD), %eax
|
||||
movl $PTDPTDI, %ebx
|
||||
movl $1,%ecx
|
||||
fillkpt(R(_IdlePTD), $PG_RW)
|
||||
fillkpt(R(IdlePTD), $PG_RW)
|
||||
|
||||
ret
|
||||
|
||||
@ -957,7 +957,7 @@ bdb_commit_paging:
|
||||
cmpl $0,_bdb_exists
|
||||
je bdb_commit_paging_exit
|
||||
|
||||
movl $_gdt+8*9,%eax /* adjust slots 9-17 */
|
||||
movl $gdt+8*9,%eax /* adjust slots 9-17 */
|
||||
movl $9,%ecx
|
||||
reloc_gdt:
|
||||
movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */
|
||||
|
@ -74,12 +74,12 @@
|
||||
NON_GPROF_ENTRY(MPentry)
|
||||
CHECKPOINT(0x36, 3)
|
||||
/* Now enable paging mode */
|
||||
movl _IdlePTD-KERNBASE, %eax
|
||||
movl IdlePTD-KERNBASE, %eax
|
||||
movl %eax,%cr3
|
||||
movl %cr0,%eax
|
||||
orl $CR0_PE|CR0_PG,%eax /* enable paging */
|
||||
movl %eax,%cr0 /* let the games begin! */
|
||||
movl _bootSTK,%esp /* boot stack end loc. */
|
||||
movl bootSTK,%esp /* boot stack end loc. */
|
||||
|
||||
pushl $mp_begin /* jump to high mem */
|
||||
ret
|
||||
@ -89,13 +89,13 @@ NON_GPROF_ENTRY(MPentry)
|
||||
*/
|
||||
mp_begin: /* now running relocated at KERNBASE */
|
||||
CHECKPOINT(0x37, 4)
|
||||
call _init_secondary /* load i386 tables */
|
||||
call init_secondary /* load i386 tables */
|
||||
CHECKPOINT(0x38, 5)
|
||||
|
||||
/*
|
||||
* If the [BSP] CPU has support for VME, turn it on.
|
||||
*/
|
||||
testl $CPUID_VME, _cpu_feature /* XXX WRONG! BSP! */
|
||||
testl $CPUID_VME, cpu_feature /* XXX WRONG! BSP! */
|
||||
jz 1f
|
||||
movl %cr4, %eax
|
||||
orl $CR4_VME, %eax
|
||||
@ -103,19 +103,19 @@ mp_begin: /* now running relocated at KERNBASE */
|
||||
1:
|
||||
|
||||
/* disable the APIC, just to be SURE */
|
||||
movl _lapic+LA_SVR, %eax /* get spurious vector reg. */
|
||||
movl lapic+LA_SVR, %eax /* get spurious vector reg. */
|
||||
andl $~APIC_SVR_SWEN, %eax /* clear software enable bit */
|
||||
movl %eax, _lapic+LA_SVR
|
||||
movl %eax, lapic+LA_SVR
|
||||
|
||||
/* signal our startup to the BSP */
|
||||
movl _lapic+LA_VER, %eax /* our version reg contents */
|
||||
movl %eax, _cpu_apic_versions /* into [ 0 ] */
|
||||
incl _mp_ncpus /* signal BSP */
|
||||
movl lapic+LA_VER, %eax /* our version reg contents */
|
||||
movl %eax, cpu_apic_versions /* into [ 0 ] */
|
||||
incl mp_ncpus /* signal BSP */
|
||||
|
||||
CHECKPOINT(0x39, 6)
|
||||
|
||||
/* Now, let's prepare for some REAL WORK :-) This doesn't return. */
|
||||
call _ap_init
|
||||
call ap_init
|
||||
|
||||
/*
|
||||
* This is the embedded trampoline or bootstrap that is
|
||||
@ -150,10 +150,10 @@ NON_GPROF_ENTRY(bootMP)
|
||||
mov %ax, %fs
|
||||
mov %ax, %gs
|
||||
mov %ax, %ss
|
||||
mov $(boot_stk-_bootMP), %esp
|
||||
mov $(boot_stk-bootMP), %esp
|
||||
|
||||
/* Now load the global descriptor table */
|
||||
lgdt MP_GDTptr-_bootMP
|
||||
lgdt MP_GDTptr-bootMP
|
||||
|
||||
/* Enable protected mode */
|
||||
movl %cr0, %eax
|
||||
@ -165,7 +165,7 @@ NON_GPROF_ENTRY(bootMP)
|
||||
* reload CS register
|
||||
*/
|
||||
pushl $0x18
|
||||
pushl $(protmode-_bootMP)
|
||||
pushl $(protmode-bootMP)
|
||||
lretl
|
||||
|
||||
.code32
|
||||
@ -188,8 +188,8 @@ protmode:
|
||||
movw %bx, %gs
|
||||
movw %bx, %ss
|
||||
|
||||
.globl _bigJump
|
||||
_bigJump:
|
||||
.globl bigJump
|
||||
bigJump:
|
||||
/* this will be modified by mpInstallTramp() */
|
||||
ljmp $0x08, $0 /* far jmp to MPentry() */
|
||||
|
||||
@ -200,10 +200,10 @@ dead: hlt /* We should never get here */
|
||||
* MP boot strap Global Descriptor Table
|
||||
*/
|
||||
.p2align 4
|
||||
.globl _MP_GDT
|
||||
.globl _bootCodeSeg
|
||||
.globl _bootDataSeg
|
||||
_MP_GDT:
|
||||
.globl MP_GDT
|
||||
.globl bootCodeSeg
|
||||
.globl bootDataSeg
|
||||
MP_GDT:
|
||||
|
||||
nulldesc: /* offset = 0x0 */
|
||||
|
||||
@ -235,7 +235,7 @@ kerneldata: /* offset = 0x10 */
|
||||
bootcode: /* offset = 0x18 */
|
||||
|
||||
.word 0xffff /* segment limit 0..15 */
|
||||
_bootCodeSeg: /* this will be modified by mpInstallTramp() */
|
||||
bootCodeSeg: /* this will be modified by mpInstallTramp() */
|
||||
.word 0x0000 /* segment base 0..15 */
|
||||
.byte 0x00 /* segment base 16...23; set for 0x000xx000 */
|
||||
.byte 0x9e /* flags; Type */
|
||||
@ -245,7 +245,7 @@ _bootCodeSeg: /* this will be modified by mpInstallTramp() */
|
||||
bootdata: /* offset = 0x20 */
|
||||
|
||||
.word 0xffff
|
||||
_bootDataSeg: /* this will be modified by mpInstallTramp() */
|
||||
bootDataSeg: /* this will be modified by mpInstallTramp() */
|
||||
.word 0x0000 /* segment base 0..15 */
|
||||
.byte 0x00 /* segment base 16...23; set for 0x000xx000 */
|
||||
.byte 0x92
|
||||
@ -255,18 +255,18 @@ _bootDataSeg: /* this will be modified by mpInstallTramp() */
|
||||
/*
|
||||
* GDT pointer for the lgdt call
|
||||
*/
|
||||
.globl _mp_gdtbase
|
||||
.globl mp_gdtbase
|
||||
|
||||
MP_GDTptr:
|
||||
_mp_gdtlimit:
|
||||
mp_gdtlimit:
|
||||
.word 0x0028
|
||||
_mp_gdtbase: /* this will be modified by mpInstallTramp() */
|
||||
mp_gdtbase: /* this will be modified by mpInstallTramp() */
|
||||
.long 0
|
||||
|
||||
.space 0x100 /* space for boot_stk - 1st temporary stack */
|
||||
boot_stk:
|
||||
|
||||
BOOTMP2:
|
||||
.globl _bootMP_size
|
||||
_bootMP_size:
|
||||
.globl bootMP_size
|
||||
bootMP_size:
|
||||
.long BOOTMP2 - BOOTMP1
|
||||
|
@ -45,21 +45,21 @@
|
||||
#define IDXSHIFT 10
|
||||
|
||||
.data
|
||||
.globl _bcopy_vector
|
||||
_bcopy_vector:
|
||||
.long _generic_bcopy
|
||||
.globl _bzero
|
||||
_bzero:
|
||||
.long _generic_bzero
|
||||
.globl _copyin_vector
|
||||
_copyin_vector:
|
||||
.long _generic_copyin
|
||||
.globl _copyout_vector
|
||||
_copyout_vector:
|
||||
.long _generic_copyout
|
||||
.globl _ovbcopy_vector
|
||||
_ovbcopy_vector:
|
||||
.long _generic_bcopy
|
||||
.globl bcopy_vector
|
||||
bcopy_vector:
|
||||
.long generic_bcopy
|
||||
.globl bzero
|
||||
bzero:
|
||||
.long generic_bzero
|
||||
.globl copyin_vector
|
||||
copyin_vector:
|
||||
.long generic_copyin
|
||||
.globl copyout_vector
|
||||
copyout_vector:
|
||||
.long generic_copyout
|
||||
.globl ovbcopy_vector
|
||||
ovbcopy_vector:
|
||||
.long generic_bcopy
|
||||
#if defined(I586_CPU) && defined(DEV_NPX)
|
||||
kernel_fpu_lock:
|
||||
.byte 0xfe
|
||||
@ -428,11 +428,11 @@ ENTRY(bcopyb)
|
||||
|
||||
ENTRY(bcopy)
|
||||
MEXITCOUNT
|
||||
jmp *_bcopy_vector
|
||||
jmp *bcopy_vector
|
||||
|
||||
ENTRY(ovbcopy)
|
||||
MEXITCOUNT
|
||||
jmp *_ovbcopy_vector
|
||||
jmp *ovbcopy_vector
|
||||
|
||||
/*
|
||||
* generic_bcopy(src, dst, cnt)
|
||||
@ -667,7 +667,7 @@ ENTRY(memcpy)
|
||||
*/
|
||||
ENTRY(copyout)
|
||||
MEXITCOUNT
|
||||
jmp *_copyout_vector
|
||||
jmp *copyout_vector
|
||||
|
||||
ENTRY(generic_copyout)
|
||||
movl PCPU(CURPCB),%eax
|
||||
@ -725,12 +725,12 @@ ENTRY(generic_copyout)
|
||||
|
||||
1:
|
||||
/* check PTE for each page */
|
||||
leal _PTmap(%edx),%eax
|
||||
leal PTmap(%edx),%eax
|
||||
shrl $IDXSHIFT,%eax
|
||||
andb $0xfc,%al
|
||||
testb $PG_V,_PTmap(%eax) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%eax) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%al
|
||||
movb PTmap(%edx),%al
|
||||
andb $PG_V|PG_RW|PG_U,%al /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%al
|
||||
je 2f
|
||||
@ -741,7 +741,7 @@ ENTRY(generic_copyout)
|
||||
pushl %ecx
|
||||
shll $IDXSHIFT,%edx
|
||||
pushl %edx
|
||||
call _trapwrite /* trapwrite(addr) */
|
||||
call trapwrite /* trapwrite(addr) */
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %edx
|
||||
@ -839,7 +839,7 @@ ENTRY(i586_copyout)
|
||||
jb slow_copyout
|
||||
|
||||
pushl %ecx
|
||||
call _fastmove
|
||||
call fastmove
|
||||
addl $4,%esp
|
||||
jmp done_copyout
|
||||
#endif /* I586_CPU && defined(DEV_NPX) */
|
||||
@ -849,7 +849,7 @@ ENTRY(i586_copyout)
|
||||
*/
|
||||
ENTRY(copyin)
|
||||
MEXITCOUNT
|
||||
jmp *_copyin_vector
|
||||
jmp *copyin_vector
|
||||
|
||||
ENTRY(generic_copyin)
|
||||
movl PCPU(CURPCB),%eax
|
||||
@ -933,7 +933,7 @@ ENTRY(i586_copyin)
|
||||
|
||||
pushl %ebx /* XXX prepare for fastmove_fault */
|
||||
pushl %ecx
|
||||
call _fastmove
|
||||
call fastmove
|
||||
addl $8,%esp
|
||||
jmp done_copyin
|
||||
#endif /* I586_CPU && defined(DEV_NPX) */
|
||||
@ -1209,12 +1209,12 @@ ENTRY(suword)
|
||||
shrl $IDXSHIFT,%edx
|
||||
andb $0xfc,%dl
|
||||
|
||||
leal _PTmap(%edx),%ecx
|
||||
leal PTmap(%edx),%ecx
|
||||
shrl $IDXSHIFT,%ecx
|
||||
andb $0xfc,%cl
|
||||
testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%ecx) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%dl
|
||||
movb PTmap(%edx),%dl
|
||||
andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%dl
|
||||
je 1f
|
||||
@ -1222,7 +1222,7 @@ ENTRY(suword)
|
||||
4:
|
||||
/* simulate a trap */
|
||||
pushl %eax
|
||||
call _trapwrite
|
||||
call trapwrite
|
||||
popl %edx /* remove junk parameter from stack */
|
||||
testl %eax,%eax
|
||||
jnz fusufault
|
||||
@ -1258,9 +1258,9 @@ ENTRY(susword)
|
||||
leal _PTmap(%edx),%ecx
|
||||
shrl $IDXSHIFT,%ecx
|
||||
andb $0xfc,%cl
|
||||
testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%ecx) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%dl
|
||||
movb PTmap(%edx),%dl
|
||||
andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%dl
|
||||
je 1f
|
||||
@ -1268,7 +1268,7 @@ ENTRY(susword)
|
||||
4:
|
||||
/* simulate a trap */
|
||||
pushl %eax
|
||||
call _trapwrite
|
||||
call trapwrite
|
||||
popl %edx /* remove junk parameter from stack */
|
||||
testl %eax,%eax
|
||||
jnz fusufault
|
||||
@ -1301,12 +1301,12 @@ ENTRY(subyte)
|
||||
shrl $IDXSHIFT,%edx
|
||||
andb $0xfc,%dl
|
||||
|
||||
leal _PTmap(%edx),%ecx
|
||||
leal PTmap(%edx),%ecx
|
||||
shrl $IDXSHIFT,%ecx
|
||||
andb $0xfc,%cl
|
||||
testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
|
||||
testb $PG_V,PTmap(%ecx) /* PTE page must be valid */
|
||||
je 4f
|
||||
movb _PTmap(%edx),%dl
|
||||
movb PTmap(%edx),%dl
|
||||
andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
|
||||
cmpb $PG_V|PG_RW|PG_U,%dl
|
||||
je 1f
|
||||
@ -1314,7 +1314,7 @@ ENTRY(subyte)
|
||||
4:
|
||||
/* simulate a trap */
|
||||
pushl %eax
|
||||
call _trapwrite
|
||||
call trapwrite
|
||||
popl %edx /* remove junk parameter from stack */
|
||||
testl %eax,%eax
|
||||
jnz fusufault
|
||||
@ -1564,7 +1564,7 @@ ENTRY(rcr3)
|
||||
/* void load_cr3(caddr_t cr3) */
|
||||
ENTRY(load_cr3)
|
||||
#ifdef SWTCH_OPTIM_STATS
|
||||
incl _tlb_flush_count
|
||||
incl tlb_flush_count
|
||||
#endif
|
||||
movl 4(%esp),%eax
|
||||
movl %eax,%cr3
|
||||
|
@ -56,12 +56,12 @@
|
||||
|
||||
.data
|
||||
|
||||
.globl _panic
|
||||
.globl panic
|
||||
|
||||
#if defined(SWTCH_OPTIM_STATS)
|
||||
.globl _swtch_optim_stats, _tlb_flush_count
|
||||
_swtch_optim_stats: .long 0 /* number of _swtch_optims */
|
||||
_tlb_flush_count: .long 0
|
||||
.globl swtch_optim_stats, tlb_flush_count
|
||||
swtch_optim_stats: .long 0 /* number of _swtch_optims */
|
||||
tlb_flush_count: .long 0
|
||||
#endif
|
||||
|
||||
.text
|
||||
@ -129,7 +129,7 @@ ENTRY(cpu_switch)
|
||||
jne 1f
|
||||
addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */
|
||||
pushl %edx
|
||||
call _npxsave /* do it in a big C function */
|
||||
call npxsave /* do it in a big C function */
|
||||
popl %eax
|
||||
1:
|
||||
#endif /* DEV_NPX */
|
||||
@ -139,7 +139,7 @@ sw1:
|
||||
|
||||
#ifdef SMP
|
||||
/* Stop scheduling if smp_active goes zero and we are not BSP */
|
||||
cmpl $0,_smp_active
|
||||
cmpl $0,smp_active
|
||||
jne 1f
|
||||
cmpl $0,PCPU(CPUID)
|
||||
je 1f
|
||||
@ -154,7 +154,7 @@ sw1:
|
||||
* if it cannot find another process to run.
|
||||
*/
|
||||
sw1a:
|
||||
call _chooseproc /* trash ecx, edx, ret eax*/
|
||||
call chooseproc /* trash ecx, edx, ret eax*/
|
||||
|
||||
#ifdef INVARIANTS
|
||||
testl %eax,%eax /* no process? */
|
||||
@ -171,15 +171,15 @@ sw1b:
|
||||
movl P_ADDR(%ecx),%edx
|
||||
|
||||
#if defined(SWTCH_OPTIM_STATS)
|
||||
incl _swtch_optim_stats
|
||||
incl swtch_optim_stats
|
||||
#endif
|
||||
/* switch address space */
|
||||
movl %cr3,%ebx
|
||||
cmpl PCB_CR3(%edx),%ebx
|
||||
je 4f
|
||||
#if defined(SWTCH_OPTIM_STATS)
|
||||
decl _swtch_optim_stats
|
||||
incl _tlb_flush_count
|
||||
decl swtch_optim_stats
|
||||
incl tlb_flush_count
|
||||
#endif
|
||||
movl PCB_CR3(%edx),%ebx
|
||||
movl %ebx,%cr3
|
||||
@ -188,7 +188,7 @@ sw1b:
|
||||
movl PCPU(CPUID), %esi
|
||||
cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
|
||||
je 1f
|
||||
btsl %esi, _private_tss /* mark use of private tss */
|
||||
btsl %esi, private_tss /* mark use of private tss */
|
||||
movl PCB_EXT(%edx), %edi /* new tss descriptor */
|
||||
jmp 2f
|
||||
1:
|
||||
@ -198,7 +198,7 @@ sw1b:
|
||||
addl $(UPAGES * PAGE_SIZE - 16), %ebx
|
||||
movl %ebx, PCPU(COMMON_TSS) + TSS_ESP0
|
||||
|
||||
btrl %esi, _private_tss
|
||||
btrl %esi, private_tss
|
||||
jae 3f
|
||||
PCPU_ADDR(COMMON_TSSD, %edi)
|
||||
2:
|
||||
@ -227,9 +227,9 @@ sw1b:
|
||||
#ifdef SMP
|
||||
#ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */
|
||||
#ifdef CHEAP_TPR
|
||||
movl $0, _lapic+LA_TPR
|
||||
movl $0, lapic+LA_TPR
|
||||
#else
|
||||
andl $~APIC_TPR_PRIO, _lapic+LA_TPR
|
||||
andl $~APIC_TPR_PRIO, lapic+LA_TPR
|
||||
#endif /** CHEAP_TPR */
|
||||
#endif /** GRAB_LOPRIO */
|
||||
#endif /* SMP */
|
||||
@ -242,14 +242,14 @@ sw1b:
|
||||
|
||||
cmpl $0, PCB_USERLDT(%edx)
|
||||
jnz 1f
|
||||
movl __default_ldt,%eax
|
||||
movl _default_ldt,%eax
|
||||
cmpl PCPU(CURRENTLDT),%eax
|
||||
je 2f
|
||||
lldt __default_ldt
|
||||
lldt _default_ldt
|
||||
movl %eax,PCPU(CURRENTLDT)
|
||||
jmp 2f
|
||||
1: pushl %edx
|
||||
call _set_user_ldt
|
||||
call set_user_ldt
|
||||
popl %edx
|
||||
2:
|
||||
|
||||
@ -282,13 +282,13 @@ CROSSJUMPTARGET(sw1a)
|
||||
#ifdef INVARIANTS
|
||||
badsw2:
|
||||
pushl $sw0_2
|
||||
call _panic
|
||||
call panic
|
||||
|
||||
sw0_2: .asciz "cpu_switch: not SRUN"
|
||||
|
||||
badsw3:
|
||||
pushl $sw0_3
|
||||
call _panic
|
||||
call panic
|
||||
|
||||
sw0_3: .asciz "cpu_switch: chooseproc returned NULL"
|
||||
#endif
|
||||
@ -337,7 +337,7 @@ ENTRY(savectx)
|
||||
leal PCB_SAVEFPU(%eax),%eax
|
||||
pushl %eax
|
||||
pushl %eax
|
||||
call _npxsave
|
||||
call npxsave
|
||||
addl $4,%esp
|
||||
popl %eax
|
||||
popl %ecx
|
||||
@ -346,7 +346,7 @@ ENTRY(savectx)
|
||||
leal PCB_SAVEFPU(%ecx),%ecx
|
||||
pushl %ecx
|
||||
pushl %eax
|
||||
call _bcopy
|
||||
call bcopy
|
||||
addl $12,%esp
|
||||
#endif /* DEV_NPX */
|
||||
|
||||
|
@ -44,10 +44,10 @@
|
||||
.data
|
||||
ALIGN_DATA
|
||||
|
||||
.globl _in_vm86call, _vm86pcb
|
||||
.globl in_vm86call, vm86pcb
|
||||
|
||||
_in_vm86call: .long 0
|
||||
_vm86pcb: .long 0
|
||||
in_vm86call: .long 0
|
||||
vm86pcb: .long 0
|
||||
|
||||
.text
|
||||
|
||||
@ -55,7 +55,7 @@ _vm86pcb: .long 0
|
||||
* vm86_bioscall(struct trapframe_vm86 *vm86)
|
||||
*/
|
||||
ENTRY(vm86_bioscall)
|
||||
movl _vm86pcb,%edx /* scratch data area */
|
||||
movl vm86pcb,%edx /* scratch data area */
|
||||
movl 4(%esp),%eax
|
||||
movl %eax,SCR_ARGFRAME(%edx) /* save argument pointer */
|
||||
pushl %ebx
|
||||
@ -74,7 +74,7 @@ ENTRY(vm86_bioscall)
|
||||
movl P_ADDR(%ecx),%ecx
|
||||
addl $PCB_SAVEFPU,%ecx
|
||||
pushl %ecx
|
||||
call _npxsave
|
||||
call npxsave
|
||||
popl %ecx
|
||||
popl %edx /* recover our pcb */
|
||||
#endif
|
||||
@ -109,7 +109,7 @@ ENTRY(vm86_bioscall)
|
||||
|
||||
movl %cr3,%eax
|
||||
pushl %eax /* save address space */
|
||||
movl _IdlePTD,%ecx
|
||||
movl IdlePTD,%ecx
|
||||
movl %ecx,%ebx
|
||||
addl $KERNBASE,%ebx /* va of Idle PTD */
|
||||
movl 0(%ebx),%eax
|
||||
@ -124,22 +124,22 @@ ENTRY(vm86_bioscall)
|
||||
movl %ecx,%cr3 /* new page tables */
|
||||
movl SCR_VMFRAME(%edx),%esp /* switch to new stack */
|
||||
|
||||
call _vm86_prepcall /* finish setup */
|
||||
call vm86_prepcall /* finish setup */
|
||||
|
||||
movl $1,_in_vm86call /* set flag for trap() */
|
||||
movl $1,in_vm86call /* set flag for trap() */
|
||||
|
||||
/*
|
||||
* Return via _doreti
|
||||
* Return via doreti
|
||||
*/
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
|
||||
/*
|
||||
* vm86_biosret(struct trapframe_vm86 *vm86)
|
||||
*/
|
||||
ENTRY(vm86_biosret)
|
||||
movl _vm86pcb,%edx /* data area */
|
||||
movl vm86pcb,%edx /* data area */
|
||||
|
||||
movl 4(%esp),%esi /* source */
|
||||
movl SCR_ARGFRAME(%edx),%edi /* destination */
|
||||
@ -155,7 +155,7 @@ ENTRY(vm86_biosret)
|
||||
popl %eax
|
||||
movl %eax,%cr3 /* install old page table */
|
||||
|
||||
movl $0,_in_vm86call /* reset trapflag */
|
||||
movl $0,in_vm86call /* reset trapflag */
|
||||
|
||||
movl PCPU(TSS_GDT),%ebx /* entry in GDT */
|
||||
movl SCR_TSS0(%edx),%eax
|
||||
|
@ -470,8 +470,8 @@ typedef struct IOAPIC ioapic_t;
|
||||
/*
|
||||
* Protects the IO APIC and apic_imen as a critical region.
|
||||
*/
|
||||
#define IMASK_LOCK MTX_LOCK_SPIN(_imen_mtx, 0)
|
||||
#define IMASK_UNLOCK MTX_UNLOCK_SPIN(_imen_mtx)
|
||||
#define IMASK_LOCK MTX_LOCK_SPIN(imen_mtx, 0)
|
||||
#define IMASK_UNLOCK MTX_UNLOCK_SPIN(imen_mtx)
|
||||
|
||||
#else /* SMP */
|
||||
|
||||
|
@ -470,8 +470,8 @@ typedef struct IOAPIC ioapic_t;
|
||||
/*
|
||||
* Protects the IO APIC and apic_imen as a critical region.
|
||||
*/
|
||||
#define IMASK_LOCK MTX_LOCK_SPIN(_imen_mtx, 0)
|
||||
#define IMASK_UNLOCK MTX_UNLOCK_SPIN(_imen_mtx)
|
||||
#define IMASK_LOCK MTX_LOCK_SPIN(imen_mtx, 0)
|
||||
#define IMASK_UNLOCK MTX_UNLOCK_SPIN(imen_mtx)
|
||||
|
||||
#else /* SMP */
|
||||
|
||||
|
@ -37,7 +37,6 @@
|
||||
#define _MACHINE_ASMACROS_H_
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
#include <machine/asnames.h>
|
||||
|
||||
/* XXX too much duplication in various asm*.h's. */
|
||||
|
||||
|
@ -1,324 +0,0 @@
|
||||
/*-
|
||||
* Copyright (c) 1997 John D. Polstra
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _MACHINE_ASNAMES_H_
|
||||
#define _MACHINE_ASNAMES_H_
|
||||
|
||||
/*
|
||||
* This file is used by the kernel assembly language sources to provide
|
||||
* the proper mapping between the global names used in assembly language
|
||||
* code and the corresponding C symbols. By convention, all C symbols
|
||||
* that are referenced from assembly language are prefixed with `_'.
|
||||
* That happens to be the same prefix that the a.out compiler attaches
|
||||
* to each C symbol.
|
||||
*
|
||||
* When using the ELF compiler, C symbols are identical to the corresponding
|
||||
* assembly language symbols. Thus the extra underscores cause problems.
|
||||
* The defines in this file map the underscore names back to the proper
|
||||
* unadorned names.
|
||||
*
|
||||
* Every global symbol that is referenced from both C source and assembly
|
||||
* language source must have an entry in this file, or the kernel will
|
||||
* not build properly using the ELF compiler.
|
||||
*
|
||||
* This file is included by <machine/asmacros.h>, and it is OK to rely
|
||||
* on that.
|
||||
*/
|
||||
|
||||
#ifdef __ELF__
|
||||
|
||||
#define _APTD APTD
|
||||
#define _APTDpde APTDpde
|
||||
#define _APTmap APTmap
|
||||
#define _CONST_QNaN CONST_QNaN
|
||||
#define _IdlePTD IdlePTD
|
||||
#define _KPTphys KPTphys
|
||||
#define _MP_GDT MP_GDT
|
||||
#define _PTD PTD
|
||||
#define _PTDpde PTDpde
|
||||
#define _PTmap PTmap
|
||||
#define _SMP_prvspace SMP_prvspace
|
||||
#define _SMPpt SMPpt
|
||||
#define _Xalign Xalign
|
||||
#define _Xbnd Xbnd
|
||||
#define _Xbpt Xbpt
|
||||
#define _Xcpuast Xcpuast
|
||||
#define _Xcpucheckstate Xcpucheckstate
|
||||
#define _Xcpustop Xcpustop
|
||||
#define _Xdbg Xdbg
|
||||
#define _Xdiv Xdiv
|
||||
#define _Xdna Xdna
|
||||
#define _Xfastintr0 Xfastintr0
|
||||
#define _Xfastintr1 Xfastintr1
|
||||
#define _Xfastintr10 Xfastintr10
|
||||
#define _Xfastintr11 Xfastintr11
|
||||
#define _Xfastintr12 Xfastintr12
|
||||
#define _Xfastintr13 Xfastintr13
|
||||
#define _Xfastintr14 Xfastintr14
|
||||
#define _Xfastintr15 Xfastintr15
|
||||
#define _Xfastintr16 Xfastintr16
|
||||
#define _Xfastintr17 Xfastintr17
|
||||
#define _Xfastintr18 Xfastintr18
|
||||
#define _Xfastintr19 Xfastintr19
|
||||
#define _Xfastintr2 Xfastintr2
|
||||
#define _Xfastintr20 Xfastintr20
|
||||
#define _Xfastintr21 Xfastintr21
|
||||
#define _Xfastintr22 Xfastintr22
|
||||
#define _Xfastintr23 Xfastintr23
|
||||
#define _Xfastintr24 Xfastintr24
|
||||
#define _Xfastintr25 Xfastintr25
|
||||
#define _Xfastintr26 Xfastintr26
|
||||
#define _Xfastintr27 Xfastintr27
|
||||
#define _Xfastintr28 Xfastintr28
|
||||
#define _Xfastintr29 Xfastintr29
|
||||
#define _Xfastintr3 Xfastintr3
|
||||
#define _Xfastintr30 Xfastintr30
|
||||
#define _Xfastintr31 Xfastintr31
|
||||
#define _Xfastintr4 Xfastintr4
|
||||
#define _Xfastintr5 Xfastintr5
|
||||
#define _Xfastintr6 Xfastintr6
|
||||
#define _Xfastintr7 Xfastintr7
|
||||
#define _Xfastintr8 Xfastintr8
|
||||
#define _Xfastintr9 Xfastintr9
|
||||
#define _Xfpu Xfpu
|
||||
#define _Xfpusegm Xfpusegm
|
||||
#define _Xill Xill
|
||||
#define _Xint0x80_syscall Xint0x80_syscall
|
||||
#define _Xintr0 Xintr0
|
||||
#define _Xintr1 Xintr1
|
||||
#define _Xintr10 Xintr10
|
||||
#define _Xintr11 Xintr11
|
||||
#define _Xintr12 Xintr12
|
||||
#define _Xintr13 Xintr13
|
||||
#define _Xintr14 Xintr14
|
||||
#define _Xintr15 Xintr15
|
||||
#define _Xintr16 Xintr16
|
||||
#define _Xintr17 Xintr17
|
||||
#define _Xintr18 Xintr18
|
||||
#define _Xintr19 Xintr19
|
||||
#define _Xintr2 Xintr2
|
||||
#define _Xintr20 Xintr20
|
||||
#define _Xintr21 Xintr21
|
||||
#define _Xintr22 Xintr22
|
||||
#define _Xintr23 Xintr23
|
||||
#define _Xintr24 Xintr24
|
||||
#define _Xintr25 Xintr25
|
||||
#define _Xintr26 Xintr26
|
||||
#define _Xintr27 Xintr27
|
||||
#define _Xintr28 Xintr28
|
||||
#define _Xintr29 Xintr29
|
||||
#define _Xintr3 Xintr3
|
||||
#define _Xintr30 Xintr30
|
||||
#define _Xintr31 Xintr31
|
||||
#define _Xintr4 Xintr4
|
||||
#define _Xintr5 Xintr5
|
||||
#define _Xintr6 Xintr6
|
||||
#define _Xintr7 Xintr7
|
||||
#define _Xintr8 Xintr8
|
||||
#define _Xintr9 Xintr9
|
||||
#define _Xtintr0 Xtintr0
|
||||
#define _Xinvltlb Xinvltlb
|
||||
#define _Xrendezvous Xrendezvous
|
||||
#define _Xmchk Xmchk
|
||||
#define _Xmissing Xmissing
|
||||
#define _Xnmi Xnmi
|
||||
#define _Xofl Xofl
|
||||
#define _Xpage Xpage
|
||||
#define _Xprot Xprot
|
||||
#define _Xrsvd Xrsvd
|
||||
#define _Xspuriousint Xspuriousint
|
||||
#define _Xstk Xstk
|
||||
#define _Xlcall_syscall Xlcall_syscall
|
||||
#define _Xtss Xtss
|
||||
#define __default_ldt _default_ldt
|
||||
#define __ucodesel _ucodesel
|
||||
#define __udatasel _udatasel
|
||||
#define _alltraps alltraps
|
||||
#define _ap_init ap_init
|
||||
#define _apic_imen apic_imen
|
||||
#define _apic_isrbit_location apic_isrbit_location
|
||||
#define _apic_pin_trigger apic_pin_trigger
|
||||
#define _arith_invalid arith_invalid
|
||||
#define _arith_overflow arith_overflow
|
||||
#define _arith_underflow arith_underflow
|
||||
#define _ast ast
|
||||
#define _bcopy bcopy
|
||||
#define _bcopy_vector bcopy_vector
|
||||
#define _bigJump bigJump
|
||||
#define _bintr bintr
|
||||
#define _bioscall_vector bioscall_vector
|
||||
#define _bootCodeSeg bootCodeSeg
|
||||
#define _bootDataSeg bootDataSeg
|
||||
#define _bootMP bootMP
|
||||
#define _bootMP_size bootMP_size
|
||||
#define _bootSTK bootSTK
|
||||
#define _boot_get_mplock boot_get_mplock
|
||||
#define _bootdev bootdev
|
||||
#define _boothowto boothowto
|
||||
#define _bootinfo bootinfo
|
||||
#define _btrap btrap
|
||||
#define _bzero bzero
|
||||
#define _checkstate_cpus checkstate_cpus
|
||||
#define _checkstate_cpustate checkstate_cpustate
|
||||
#define _checkstate_curproc checkstate_curproc
|
||||
#define _checkstate_need_ast checkstate_need_ast
|
||||
#define _checkstate_pc checkstate_pc
|
||||
#define _checkstate_pending_ast checkstate_pending_ast
|
||||
#define _checkstate_probed_cpus checkstate_probed_cpus
|
||||
#define _chooseproc chooseproc
|
||||
#define _cnt cnt
|
||||
#define _copyin_vector copyin_vector
|
||||
#define _copyout_vector copyout_vector
|
||||
#define _cpu cpu
|
||||
#define _cpu0prvpage cpu0prvpage
|
||||
#define _cpu_apic_versions cpu_apic_versions
|
||||
#define _cpu_class cpu_class
|
||||
#define _cpu_feature cpu_feature
|
||||
#define _cpu_high cpu_high
|
||||
#define _cpu_id cpu_id
|
||||
#define _cpu_num_to_apic_id cpu_num_to_apic_id
|
||||
#define _cpu_switch cpu_switch
|
||||
#define _cpu_vendor cpu_vendor
|
||||
#define _default_halt default_halt
|
||||
#define _denormal_operand denormal_operand
|
||||
#define _div_small div_small
|
||||
#define _divide_by_zero divide_by_zero
|
||||
#define _divide_kernel divide_kernel
|
||||
#define _do_page_zero_idle do_page_zero_idle
|
||||
#define _doreti doreti
|
||||
#define _edata edata
|
||||
#define _eintrcnt eintrcnt
|
||||
#define _eintrnames eintrnames
|
||||
#define _end end
|
||||
#define _etext etext
|
||||
#define _exception exception
|
||||
#define _fastmove fastmove
|
||||
#define _fork_exit fork_exit
|
||||
#define _gdt gdt
|
||||
#define _generic_bcopy generic_bcopy
|
||||
#define _generic_bzero generic_bzero
|
||||
#define _generic_copyin generic_copyin
|
||||
#define _generic_copyout generic_copyout
|
||||
#define _get_align_lock get_align_lock
|
||||
#define _get_altsyscall_lock get_altsyscall_lock
|
||||
#define _get_fpu_lock get_fpu_lock
|
||||
#define _get_isrlock get_isrlock
|
||||
#define _get_mplock get_mplock
|
||||
#define _get_syscall_lock get_syscall_lock
|
||||
#define _Giant Giant
|
||||
#define _idle idle
|
||||
#define _imen imen
|
||||
#define _imen_mtx imen_mtx
|
||||
#define _in_vm86call in_vm86call
|
||||
#define _init386 init386
|
||||
#define _init_secondary init_secondary
|
||||
#define _intr_countp intr_countp
|
||||
#define _intr_handler intr_handler
|
||||
#define _intr_mask intr_mask
|
||||
#define _intr_unit intr_unit
|
||||
#define _intrcnt intrcnt
|
||||
#define _intrnames intrnames
|
||||
#define _invltlb_ok invltlb_ok
|
||||
#define _ioapic ioapic
|
||||
#define _isr_lock isr_lock
|
||||
#define _kernelname kernelname
|
||||
#define _lapic lapic
|
||||
#define _linux_sigcode linux_sigcode
|
||||
#define _linux_szsigcode linux_szsigcode
|
||||
#define _mi_startup mi_startup
|
||||
#define _microuptime microuptime
|
||||
#define _mp_gdtbase mp_gdtbase
|
||||
#define _mp_lock mp_lock
|
||||
#define _mp_ncpus mp_ncpus
|
||||
#define _mul64 mul64
|
||||
#define _nfs_diskless nfs_diskless
|
||||
#define _nfs_diskless_valid nfs_diskless_valid
|
||||
#define _normalize normalize
|
||||
#define _normalize_nuo normalize_nuo
|
||||
#define _npx_intr npx_intr
|
||||
#define _npxsave npxsave
|
||||
#define _szosigcode szosigcode
|
||||
#define _ovbcopy_vector ovbcopy_vector
|
||||
#define _panic panic
|
||||
#define _pc98_system_parameter pc98_system_parameter
|
||||
#define _poly_div16 poly_div16
|
||||
#define _poly_div2 poly_div2
|
||||
#define _poly_div4 poly_div4
|
||||
#define _polynomial polynomial
|
||||
#define _private_tss private_tss
|
||||
#define _proc0 proc0
|
||||
#define _proc0paddr proc0paddr
|
||||
#define _procrunnable procrunnable
|
||||
#define _real_2op_NaN real_2op_NaN
|
||||
#define _reg_div reg_div
|
||||
#define _reg_u_add reg_u_add
|
||||
#define _reg_u_div reg_u_div
|
||||
#define _reg_u_mul reg_u_mul
|
||||
#define _reg_u_sub reg_u_sub
|
||||
#define _rel_mplock rel_mplock
|
||||
#define _round_reg round_reg
|
||||
#define _sched_ithd sched_ithd
|
||||
#define _sched_lock sched_lock
|
||||
#define _set_precision_flag_down set_precision_flag_down
|
||||
#define _set_precision_flag_up set_precision_flag_up
|
||||
#define _set_user_ldt set_user_ldt
|
||||
#define _shrx shrx
|
||||
#define _shrxs shrxs
|
||||
#define _sigcode sigcode
|
||||
#define _smp_active smp_active
|
||||
#define _smp_rendezvous_action smp_rendezvous_action
|
||||
#define _softclock softclock
|
||||
#define _spending spending
|
||||
#define _started_cpus started_cpus
|
||||
#define _stopped_cpus stopped_cpus
|
||||
#define _svr4_sigcode svr4_sigcode
|
||||
#define _svr4_sys_context svr4_sys_context
|
||||
#define _svr4_szsigcode svr4_szsigcode
|
||||
#define _swi_dispatcher swi_dispatcher
|
||||
#define _swi_generic swi_generic
|
||||
#define _swi_net swi_net
|
||||
#define _swi_null swi_null
|
||||
#define _swi_vm swi_vm
|
||||
#define _syscall syscall
|
||||
#define _szsigcode szsigcode
|
||||
#define _ticks ticks
|
||||
#define _time time
|
||||
#define _trap trap
|
||||
#define _trapwrite trapwrite
|
||||
#define _vec vec
|
||||
#define _vec8254 vec8254
|
||||
#define _vm86_prepcall vm86_prepcall
|
||||
#define _vm86pa vm86pa
|
||||
#define _vm86paddr vm86paddr
|
||||
#define _vm86pcb vm86pcb
|
||||
#define _vm_page_zero_idle vm_page_zero_idle
|
||||
#define _wm_sqrt wm_sqrt
|
||||
|
||||
#endif /* __ELF__ */
|
||||
|
||||
#endif /* !_MACHINE_ASNAMES_H_ */
|
@ -39,8 +39,8 @@
|
||||
* imen_dump()
|
||||
*/
|
||||
.p2align 2 /* MUST be 32bit aligned */
|
||||
.globl _apic_imen
|
||||
_apic_imen:
|
||||
.globl apic_imen
|
||||
apic_imen:
|
||||
.long HWI_MASK
|
||||
|
||||
.text
|
||||
@ -59,7 +59,7 @@ _apic_imen:
|
||||
andl %eax, %eax ; \
|
||||
jz 1f ; \
|
||||
pushl $bad_mask ; \
|
||||
call _panic ; \
|
||||
call panic ; \
|
||||
1:
|
||||
|
||||
bad_mask: .asciz "bad mask"
|
||||
@ -80,7 +80,7 @@ ENTRY(INTREN)
|
||||
|
||||
movl 8(%esp), %eax /* mask into %eax */
|
||||
bsfl %eax, %ecx /* get pin index */
|
||||
btrl %ecx, _apic_imen /* update _apic_imen */
|
||||
btrl %ecx, apic_imen /* update apic_imen */
|
||||
|
||||
QUALIFY_MASK
|
||||
|
||||
@ -112,7 +112,7 @@ ENTRY(INTRDIS)
|
||||
|
||||
movl 8(%esp), %eax /* mask into %eax */
|
||||
bsfl %eax, %ecx /* get pin index */
|
||||
btsl %ecx, _apic_imen /* update _apic_imen */
|
||||
btsl %ecx, apic_imen /* update apic_imen */
|
||||
|
||||
QUALIFY_MASK
|
||||
|
||||
|
@ -50,27 +50,27 @@ IDTVEC(vec_name) ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
incl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl _intr_unit + (irq_num) * 4 ; \
|
||||
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
addl $4, %esp ; \
|
||||
movl $0, _lapic+LA_EOI ; \
|
||||
movl $0, lapic+LA_EOI ; \
|
||||
lock ; \
|
||||
incl _cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl _intr_countp + (irq_num) * 4, %eax ; \
|
||||
incl cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl intr_countp + (irq_num) * 4, %eax ; \
|
||||
lock ; \
|
||||
incl (%eax) ; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
#define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8
|
||||
#define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12
|
||||
|
||||
#define MASK_IRQ(irq_num) \
|
||||
IMASK_LOCK ; /* into critical reg */ \
|
||||
testl $IRQ_BIT(irq_num), _apic_imen ; \
|
||||
testl $IRQ_BIT(irq_num), apic_imen ; \
|
||||
jne 7f ; /* masked, don't mask */ \
|
||||
orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \
|
||||
orl $IRQ_BIT(irq_num), apic_imen ; /* set the mask bit */ \
|
||||
movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \
|
||||
movl REDIRIDX(irq_num), %eax ; /* get the index */ \
|
||||
movl %eax, (%ecx) ; /* write the index */ \
|
||||
@ -85,7 +85,7 @@ IDTVEC(vec_name) ; \
|
||||
* and the EOI cycle would cause redundant INTs to occur.
|
||||
*/
|
||||
#define MASK_LEVEL_IRQ(irq_num) \
|
||||
testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \
|
||||
testl $IRQ_BIT(irq_num), apic_pin_trigger ; \
|
||||
jz 9f ; /* edge, don't mask */ \
|
||||
MASK_IRQ(irq_num) ; \
|
||||
9:
|
||||
@ -93,18 +93,18 @@ IDTVEC(vec_name) ; \
|
||||
|
||||
#ifdef APIC_INTR_REORDER
|
||||
#define EOI_IRQ(irq_num) \
|
||||
movl _apic_isrbit_location + 8 * (irq_num), %eax ; \
|
||||
movl apic_isrbit_location + 8 * (irq_num), %eax ; \
|
||||
movl (%eax), %eax ; \
|
||||
testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
|
||||
testl apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \
|
||||
jz 9f ; /* not active */ \
|
||||
movl $0, _lapic+LA_EOI ; \
|
||||
movl $0, lapic+LA_EOI ; \
|
||||
9:
|
||||
|
||||
#else
|
||||
#define EOI_IRQ(irq_num) \
|
||||
testl $IRQ_BIT(irq_num), _lapic+LA_ISR1; \
|
||||
testl $IRQ_BIT(irq_num), lapic+LA_ISR1; \
|
||||
jz 9f ; /* not active */ \
|
||||
movl $0, _lapic+LA_EOI; \
|
||||
movl $0, lapic+LA_EOI; \
|
||||
9:
|
||||
#endif
|
||||
|
||||
@ -160,12 +160,12 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \
|
||||
pushl $irq_num; /* pass the IRQ */ \
|
||||
sti ; \
|
||||
call _sched_ithd ; \
|
||||
call sched_ithd ; \
|
||||
addl $4, %esp ; /* discard the parameter */ \
|
||||
; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Handle "spurious INTerrupts".
|
||||
@ -176,8 +176,8 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
*/
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xspuriousint
|
||||
_Xspuriousint:
|
||||
.globl Xspuriousint
|
||||
Xspuriousint:
|
||||
|
||||
/* No EOI cycle used here */
|
||||
|
||||
@ -189,8 +189,8 @@ _Xspuriousint:
|
||||
*/
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xinvltlb
|
||||
_Xinvltlb:
|
||||
.globl Xinvltlb
|
||||
Xinvltlb:
|
||||
pushl %eax
|
||||
|
||||
#ifdef COUNT_XINVLTLB_HITS
|
||||
@ -207,7 +207,7 @@ _Xinvltlb:
|
||||
movl %eax, %cr3
|
||||
|
||||
ss /* stack segment, avoid %ds load */
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
popl %eax
|
||||
iret
|
||||
@ -229,11 +229,11 @@ _Xinvltlb:
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xcpucheckstate
|
||||
.globl _checkstate_cpustate
|
||||
.globl _checkstate_curproc
|
||||
.globl _checkstate_pc
|
||||
_Xcpucheckstate:
|
||||
.globl Xcpucheckstate
|
||||
.globl checkstate_cpustate
|
||||
.globl checkstate_curproc
|
||||
.globl checkstate_pc
|
||||
Xcpucheckstate:
|
||||
pushl %eax
|
||||
pushl %ebx
|
||||
pushl %ds /* save current data segment */
|
||||
@ -244,7 +244,7 @@ _Xcpucheckstate:
|
||||
movl $KPSEL, %eax
|
||||
mov %ax, %fs
|
||||
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
movl $0, %ebx
|
||||
movl 20(%esp), %eax
|
||||
@ -256,15 +256,15 @@ _Xcpucheckstate:
|
||||
incl %ebx /* system or interrupt */
|
||||
1:
|
||||
movl PCPU(CPUID), %eax
|
||||
movl %ebx, _checkstate_cpustate(,%eax,4)
|
||||
movl %ebx, checkstate_cpustate(,%eax,4)
|
||||
movl PCPU(CURPROC), %ebx
|
||||
movl %ebx, _checkstate_curproc(,%eax,4)
|
||||
movl %ebx, checkstate_curproc(,%eax,4)
|
||||
|
||||
movl 16(%esp), %ebx
|
||||
movl %ebx, _checkstate_pc(,%eax,4)
|
||||
movl %ebx, checkstate_pc(,%eax,4)
|
||||
|
||||
lock /* checkstate_probed_cpus |= (1<<id) */
|
||||
btsl %eax, _checkstate_probed_cpus
|
||||
btsl %eax, checkstate_probed_cpus
|
||||
|
||||
popl %fs
|
||||
popl %ds /* restore previous data segment */
|
||||
@ -284,8 +284,8 @@ _Xcpucheckstate:
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xcpuast
|
||||
_Xcpuast:
|
||||
.globl Xcpuast
|
||||
Xcpuast:
|
||||
PUSH_FRAME
|
||||
movl $KDSEL, %eax
|
||||
mov %ax, %ds /* use KERNEL data segment */
|
||||
@ -295,11 +295,11 @@ _Xcpuast:
|
||||
|
||||
movl PCPU(CPUID), %eax
|
||||
lock /* checkstate_need_ast &= ~(1<<id) */
|
||||
btrl %eax, _checkstate_need_ast
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
btrl %eax, checkstate_need_ast
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
lock
|
||||
btsl %eax, _checkstate_pending_ast
|
||||
btsl %eax, checkstate_pending_ast
|
||||
jc 1f
|
||||
|
||||
FAKE_MCOUNT(13*4(%esp))
|
||||
@ -310,7 +310,7 @@ _Xcpuast:
|
||||
|
||||
movl PCPU(CPUID), %eax
|
||||
lock
|
||||
btrl %eax, _checkstate_pending_ast
|
||||
btrl %eax, checkstate_pending_ast
|
||||
lock
|
||||
btrl %eax, CNAME(resched_cpus)
|
||||
jnc 2f
|
||||
@ -322,7 +322,7 @@ _Xcpuast:
|
||||
lock
|
||||
incl CNAME(cpuast_cnt)
|
||||
MEXITCOUNT
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
1:
|
||||
/* We are already in the process of delivering an ast for this CPU */
|
||||
POP_FRAME
|
||||
@ -338,8 +338,8 @@ _Xcpuast:
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xcpustop
|
||||
_Xcpustop:
|
||||
.globl Xcpustop
|
||||
Xcpustop:
|
||||
pushl %ebp
|
||||
movl %esp, %ebp
|
||||
pushl %eax
|
||||
@ -353,7 +353,7 @@ _Xcpustop:
|
||||
movl $KPSEL, %eax
|
||||
mov %ax, %fs
|
||||
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
|
||||
movl PCPU(CPUID), %eax
|
||||
imull $PCB_SIZE, %eax
|
||||
@ -366,15 +366,15 @@ _Xcpustop:
|
||||
movl PCPU(CPUID), %eax
|
||||
|
||||
lock
|
||||
btsl %eax, _stopped_cpus /* stopped_cpus |= (1<<id) */
|
||||
btsl %eax, stopped_cpus /* stopped_cpus |= (1<<id) */
|
||||
1:
|
||||
btl %eax, _started_cpus /* while (!(started_cpus & (1<<id))) */
|
||||
btl %eax, started_cpus /* while (!(started_cpus & (1<<id))) */
|
||||
jnc 1b
|
||||
|
||||
lock
|
||||
btrl %eax, _started_cpus /* started_cpus &= ~(1<<id) */
|
||||
btrl %eax, started_cpus /* started_cpus &= ~(1<<id) */
|
||||
lock
|
||||
btrl %eax, _stopped_cpus /* stopped_cpus &= ~(1<<id) */
|
||||
btrl %eax, stopped_cpus /* stopped_cpus &= ~(1<<id) */
|
||||
|
||||
test %eax, %eax
|
||||
jnz 2f
|
||||
@ -472,8 +472,8 @@ MCOUNT_LABEL(eintr)
|
||||
*/
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
.globl _Xrendezvous
|
||||
_Xrendezvous:
|
||||
.globl Xrendezvous
|
||||
Xrendezvous:
|
||||
PUSH_FRAME
|
||||
movl $KDSEL, %eax
|
||||
mov %ax, %ds /* use KERNEL data segment */
|
||||
@ -481,9 +481,9 @@ _Xrendezvous:
|
||||
movl $KPSEL, %eax
|
||||
mov %ax, %fs
|
||||
|
||||
call _smp_rendezvous_action
|
||||
call smp_rendezvous_action
|
||||
|
||||
movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
movl $0, lapic+LA_EOI /* End Of Interrupt to APIC */
|
||||
POP_FRAME
|
||||
iret
|
||||
|
||||
@ -497,21 +497,21 @@ _xhits:
|
||||
#endif /* COUNT_XINVLTLB_HITS */
|
||||
|
||||
/* variables used by stop_cpus()/restart_cpus()/Xcpustop */
|
||||
.globl _stopped_cpus, _started_cpus
|
||||
_stopped_cpus:
|
||||
.globl stopped_cpus, started_cpus
|
||||
stopped_cpus:
|
||||
.long 0
|
||||
_started_cpus:
|
||||
started_cpus:
|
||||
.long 0
|
||||
|
||||
#ifdef BETTER_CLOCK
|
||||
.globl _checkstate_probed_cpus
|
||||
_checkstate_probed_cpus:
|
||||
.globl checkstate_probed_cpus
|
||||
checkstate_probed_cpus:
|
||||
.long 0
|
||||
#endif /* BETTER_CLOCK */
|
||||
.globl _checkstate_need_ast
|
||||
_checkstate_need_ast:
|
||||
.globl checkstate_need_ast
|
||||
checkstate_need_ast:
|
||||
.long 0
|
||||
_checkstate_pending_ast:
|
||||
checkstate_pending_ast:
|
||||
.long 0
|
||||
.globl CNAME(resched_cpus)
|
||||
.globl CNAME(want_resched_cnt)
|
||||
@ -526,8 +526,8 @@ CNAME(cpuast_cnt):
|
||||
CNAME(cpustop_restartfunc):
|
||||
.long 0
|
||||
|
||||
.globl _apic_pin_trigger
|
||||
_apic_pin_trigger:
|
||||
.globl apic_pin_trigger
|
||||
apic_pin_trigger:
|
||||
.long 0
|
||||
|
||||
.text
|
||||
|
@ -63,16 +63,16 @@ IDTVEC(vec_name) ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
incl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl _intr_unit + (irq_num) * 4 ; \
|
||||
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
|
||||
addl $4,%esp ; \
|
||||
incl _cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl _intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Slow, threaded interrupts.
|
||||
@ -99,9 +99,9 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
maybe_extra_ipending ; \
|
||||
movb _imen + IRQ_BYTE(irq_num),%al ; \
|
||||
movb imen + IRQ_BYTE(irq_num),%al ; \
|
||||
orb $IRQ_BIT(irq_num),%al ; \
|
||||
movb %al,_imen + IRQ_BYTE(irq_num) ; \
|
||||
movb %al,imen + IRQ_BYTE(irq_num) ; \
|
||||
outb %al,$icu+ICU_IMR_OFFSET ; \
|
||||
enable_icus ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
@ -110,13 +110,13 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
|
||||
pushl $irq_num; /* pass the IRQ */ \
|
||||
sti ; \
|
||||
call _sched_ithd ; \
|
||||
call sched_ithd ; \
|
||||
addl $4, %esp ; /* discard the parameter */ \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
/* We could usually avoid the following jmp by inlining some of */ \
|
||||
/* _doreti, but it's probably better to use less cache. */ \
|
||||
jmp _doreti /* and catch up inside doreti */
|
||||
/* doreti, but it's probably better to use less cache. */ \
|
||||
jmp doreti /* and catch up inside doreti */
|
||||
|
||||
MCOUNT_LABEL(bintr)
|
||||
FAST_INTR(0,fastintr0, ENABLE_ICU1)
|
||||
|
@ -41,8 +41,8 @@
|
||||
ALIGN_DATA
|
||||
|
||||
/* interrupt mask enable (all h/w off) */
|
||||
.globl _imen
|
||||
_imen: .long HWI_MASK
|
||||
.globl imen
|
||||
imen: .long HWI_MASK
|
||||
|
||||
.text
|
||||
SUPERALIGN_TEXT
|
||||
|
@ -63,16 +63,16 @@ IDTVEC(vec_name) ; \
|
||||
FAKE_MCOUNT((12+ACTUALLY_PUSHED)*4(%esp)) ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
incl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
pushl _intr_unit + (irq_num) * 4 ; \
|
||||
call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
pushl intr_unit + (irq_num) * 4 ; \
|
||||
call *intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \
|
||||
enable_icus ; /* (re)enable ASAP (helps edge trigger?) */ \
|
||||
addl $4,%esp ; \
|
||||
incl _cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl _intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl cnt+V_INTR ; /* book-keeping can wait */ \
|
||||
movl intr_countp + (irq_num) * 4,%eax ; \
|
||||
incl (%eax) ; \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
jmp _doreti
|
||||
jmp doreti
|
||||
|
||||
/*
|
||||
* Slow, threaded interrupts.
|
||||
@ -99,9 +99,9 @@ IDTVEC(vec_name) ; \
|
||||
mov $KPSEL,%ax ; \
|
||||
mov %ax,%fs ; \
|
||||
maybe_extra_ipending ; \
|
||||
movb _imen + IRQ_BYTE(irq_num),%al ; \
|
||||
movb imen + IRQ_BYTE(irq_num),%al ; \
|
||||
orb $IRQ_BIT(irq_num),%al ; \
|
||||
movb %al,_imen + IRQ_BYTE(irq_num) ; \
|
||||
movb %al,imen + IRQ_BYTE(irq_num) ; \
|
||||
outb %al,$icu+ICU_IMR_OFFSET ; \
|
||||
enable_icus ; \
|
||||
movl PCPU(CURPROC),%ebx ; \
|
||||
@ -110,13 +110,13 @@ __CONCAT(Xresume,irq_num): ; \
|
||||
FAKE_MCOUNT(13*4(%esp)) ; /* XXX late to avoid double count */ \
|
||||
pushl $irq_num; /* pass the IRQ */ \
|
||||
sti ; \
|
||||
call _sched_ithd ; \
|
||||
call sched_ithd ; \
|
||||
addl $4, %esp ; /* discard the parameter */ \
|
||||
decl P_INTR_NESTING_LEVEL(%ebx) ; \
|
||||
MEXITCOUNT ; \
|
||||
/* We could usually avoid the following jmp by inlining some of */ \
|
||||
/* _doreti, but it's probably better to use less cache. */ \
|
||||
jmp _doreti /* and catch up inside doreti */
|
||||
/* doreti, but it's probably better to use less cache. */ \
|
||||
jmp doreti /* and catch up inside doreti */
|
||||
|
||||
MCOUNT_LABEL(bintr)
|
||||
FAST_INTR(0,fastintr0, ENABLE_ICU1)
|
||||
|
@ -53,22 +53,22 @@
|
||||
* Handle return from interrupts, traps and syscalls.
|
||||
*/
|
||||
SUPERALIGN_TEXT
|
||||
.type _doreti,@function
|
||||
_doreti:
|
||||
.type doreti,@function
|
||||
doreti:
|
||||
|
||||
FAKE_MCOUNT(_bintr) /* init "from" _bintr -> _doreti */
|
||||
FAKE_MCOUNT(bintr) /* init "from" bintr -> doreti */
|
||||
doreti_next:
|
||||
/* Check for ASTs that can be handled now. */
|
||||
testb $SEL_RPL_MASK,TF_CS(%esp) /* are we in user mode? */
|
||||
jne doreti_ast /* yes, do it now. */
|
||||
testl $PSL_VM,TF_EFLAGS(%esp) /* kernel mode */
|
||||
je doreti_exit /* and not VM86 mode, defer */
|
||||
cmpl $1,_in_vm86call /* are we in a VM86 call? */
|
||||
cmpl $1,in_vm86call /* are we in a VM86 call? */
|
||||
je doreti_exit /* no, defer */
|
||||
|
||||
doreti_ast:
|
||||
pushl %esp /* pass a pointer to the trapframe */
|
||||
call _ast
|
||||
call ast
|
||||
add $4, %esp
|
||||
|
||||
/*
|
||||
|
@ -45,15 +45,15 @@
|
||||
*/
|
||||
#define NR_INTRNAMES (1 + ICU_LEN + 2 * ICU_LEN)
|
||||
|
||||
.globl _intrcnt, _eintrcnt
|
||||
_intrcnt:
|
||||
.globl intrcnt, eintrcnt
|
||||
intrcnt:
|
||||
.space NR_INTRNAMES * 4
|
||||
_eintrcnt:
|
||||
eintrcnt:
|
||||
|
||||
.globl _intrnames, _eintrnames
|
||||
_intrnames:
|
||||
.globl intrnames, eintrnames
|
||||
intrnames:
|
||||
.space NR_INTRNAMES * 16
|
||||
_eintrnames:
|
||||
eintrnames:
|
||||
.text
|
||||
|
||||
/*
|
||||
|
@ -16,7 +16,7 @@ NON_GPROF_ENTRY(linux_sigcode)
|
||||
0: jmp 0b
|
||||
ALIGN_TEXT
|
||||
/* XXXXX */
|
||||
_linux_rt_sigcode:
|
||||
linux_rt_sigcode:
|
||||
call *LINUX_RT_SIGF_HANDLER(%esp)
|
||||
leal LINUX_RT_SIGF_UC(%esp),%ebx /* linux ucp */
|
||||
movl LINUX_SC_GS(%ebx),%gs
|
||||
@ -26,11 +26,11 @@ _linux_rt_sigcode:
|
||||
0: jmp 0b
|
||||
ALIGN_TEXT
|
||||
/* XXXXX */
|
||||
_linux_esigcode:
|
||||
linux_esigcode:
|
||||
|
||||
.data
|
||||
.globl _linux_szsigcode, _linux_sznonrtsigcode
|
||||
_linux_szsigcode:
|
||||
.long _linux_esigcode-_linux_sigcode
|
||||
_linux_sznonrtsigcode:
|
||||
.long _linux_rt_sigcode-_linux_sigcode
|
||||
.globl linux_szsigcode, linux_sznonrtsigcode
|
||||
linux_szsigcode:
|
||||
.long linux_esigcode-linux_sigcode
|
||||
linux_sznonrtsigcode:
|
||||
.long linux_rt_sigcode-linux_sigcode
|
||||
|
@ -199,7 +199,7 @@ elf_linux_fixup(register_t **stack_base, struct image_params *imgp)
|
||||
}
|
||||
|
||||
extern int _ucodesel, _udatasel;
|
||||
extern unsigned long _linux_sznonrtsigcode;
|
||||
extern unsigned long linux_sznonrtsigcode;
|
||||
|
||||
static void
|
||||
linux_rt_sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
|
||||
@ -329,7 +329,7 @@ linux_rt_sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
|
||||
*/
|
||||
regs->tf_esp = (int)fp;
|
||||
regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode) +
|
||||
_linux_sznonrtsigcode;
|
||||
linux_sznonrtsigcode;
|
||||
regs->tf_eflags &= ~PSL_VM;
|
||||
regs->tf_cs = _ucodesel;
|
||||
regs->tf_ds = _udatasel;
|
||||
|
@ -17,7 +17,7 @@ NON_GPROF_ENTRY(svr4_sigcode)
|
||||
movl SVR4_UC_GS(%eax),%gs
|
||||
1: pushl %eax # pointer to ucontext
|
||||
pushl $1 # set context
|
||||
movl $_svr4_sys_context,%eax
|
||||
movl $svr4_sys_context,%eax
|
||||
int $0x80 # enter kernel with args on stack
|
||||
0: jmp 0b
|
||||
|
||||
@ -25,9 +25,9 @@ NON_GPROF_ENTRY(svr4_sigcode)
|
||||
svr4_esigcode:
|
||||
|
||||
.data
|
||||
.globl _svr4_szsigcode
|
||||
_svr4_szsigcode:
|
||||
.long svr4_esigcode - _svr4_sigcode
|
||||
.globl svr4_szsigcode
|
||||
svr4_szsigcode:
|
||||
.long svr4_esigcode - svr4_sigcode
|
||||
|
||||
.text
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user