1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-04 09:09:56 +00:00

Revamp of the syscall path, exception and context handling. The

prime objectives are:
o  Implement a syscall path based on the epc inststruction (see
   sys/ia64/ia64/syscall.s).
o  Revisit the places were we need to save and restore registers
   and define those contexts in terms of the register sets (see
   sys/ia64/include/_regset.h).

Secundairy objectives:
o  Remove the requirement to use contigmalloc for kernel stacks.
o  Better handling of the high FP registers for SMP systems.
o  Switch to the new cpu_switch() and cpu_throw() semantics.
o  Add a good unwinder to reconstruct contexts for the rare
   cases we need to (see sys/contrib/ia64/libuwx)

Many files are affected by this change. Functionally it boils
down to:
o  The EPC syscall doesn't preserve registers it does not need
   to preserve and places the arguments differently on the stack.
   This affects libc and truss.
o  The address of the kernel page directory (kptdir) had to
   be unstaticized for use by the nested TLB fault handler.
   The name has been changed to ia64_kptdir to avoid conflicts.
   The renaming affects libkvm.
o  The trapframe only contains the special registers and the
   scratch registers. For syscalls using the EPC syscall path
   no scratch registers are saved. This affects all places where
   the trapframe is accessed. Most notably the unaligned access
   handler, the signal delivery code and the debugger.
o  Context switching only partly saves the special registers
   and the preserved registers. This affects cpu_switch() and
   triggered the move to the new semantics, which additionally
   affects cpu_throw().
o  The high FP registers are either in the PCB or on some
   CPU. context switching for them is done lazily. This affects
   trap().
o  The mcontext has room for all registers, but not all of them
   have to be defined in all cases. This mostly affects signal
   delivery code now. The *context syscalls are as of yet still
   unimplemented.

Many details went into the removal of the requirement to use
contigmalloc for kernel stacks. The details are mostly CPU
specific and limited to exception_save() and exception_restore().
The few places where we create, destroy or switch stacks were
mostly simplified by not having to construct physical addresses
and additionally saving the virtual addresses for later use.

Besides more efficient context saving and restoring, which of
course yields a noticable speedup, this also fixes the dreaded
SMP bootup problem as a side-effect. The details of which are
still not fully understood.

This change includes all the necessary backward compatibility
code to have it handle older userland binaries that use the
break instruction for syscalls. Support for break-based syscalls
has been pessimized in favor of a clean implementation. Due to
the overall better performance of the kernel, this will still
be notived as an improvement if it's noticed at all.

Approved by: re@ (jhb)
This commit is contained in:
Marcel Moolenaar 2003-05-16 21:26:42 +00:00
parent 07e4e2cc98
commit f2c49dd248
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=115084
46 changed files with 3325 additions and 5841 deletions

View File

@ -44,12 +44,14 @@ ENTRY(brk, 1)
add r14=@ltoff(minbrk),gp ;;
ld8 r14=[r14] ;;
ld8 r14=[r14] ;;
cmp.ltu p6,p0=in0,r14 ;;
(p6) mov in0=r14
cmp.ltu p6,p0=r32,r14 ;;
(p6) mov r32=r14 ;;
st8 [sp]=r32
CALLSYS_ERROR(break)
ld8 r15=[sp]
add r14=@ltoff(curbrk),gp ;;
ld8 r14=[r14] ;;
st8 [r14]=in0
st8 [r14]=r15
mov ret0=0
br.ret.sptk.few rp
END(brk)

View File

@ -32,10 +32,16 @@ __FBSDID("$FreeBSD$");
#include "SYS.h"
SYSCALL(pipe)
.regstk 1,0,0,0
st4 [in0]=ret0,4 ;;
st4 [in0]=ret1
mov ret0=0
ENTRY(__sys_pipe, 1)
WEAK_ALIAS(pipe, __sys_pipe)
WEAK_ALIAS(_pipe, __sys_pipe)
st8 [sp]=r32
CALLSYS_ERROR(pipe)
ld8 r14=[sp]
;;
st4 [r14]=ret0,4
;;
st4 [r14]=ret1
mov ret0=0
br.ret.sptk.few rp
END(pipe)
END(__sys_pipe)

View File

@ -43,18 +43,21 @@ ENTRY(sbrk, 1)
add r14 = @ltoff(curbrk), gp
;;
ld8 r14 = [r14]
cmp.eq p6, p0 = in0, r0
cmp.eq p6, p0 = r32, r0
;;
ld8 ret0 = [r14]
(p6) br.ret.sptk.few rp
;;
add in0 = ret0, in0
add r32 = ret0, r32
;;
st8 [sp] = r32
CALLSYS_ERROR(break)
ld8 r15 = [sp]
add r14 = @ltoff(curbrk), gp
;;
ld8 r14 = [r14]
;;
ld8 ret0 = [r14]
st8 [r14] = in0
st8 [r14] = r15
br.ret.sptk.few rp
END(sbrk)

View File

@ -148,7 +148,7 @@ _kvm_initvtop(kvm_t *kd)
* addresses/values.
*/
nlist[0].n_name = "kptdir";
nlist[0].n_name = "ia64_kptdir";
nlist[1].n_name = 0;
if (kvm_nlist(kd, nlist) != 0) {

View File

@ -32,6 +32,8 @@ S= ../../..
.endif
.include "$S/conf/kern.pre.mk"
INCLUDES+= -I$S/contrib/ia64/libuwx/src
CFLAGS+= -mconstant-gp
ASM_CFLAGS= -x assembler-with-cpp -Wa,-x -DLOCORE ${CFLAGS}

View File

@ -18,6 +18,19 @@ atkbdmap.h optional atkbd_dflt_keymap \
no-obj no-implicit-rule before-depend \
clean "atkbdmap.h"
#
contrib/ia64/libuwx/src/uwx_bstream.c standard
contrib/ia64/libuwx/src/uwx_context.c standard
contrib/ia64/libuwx/src/uwx_env.c standard
contrib/ia64/libuwx/src/uwx_scoreboard.c standard
#contrib/ia64/libuwx/src/uwx_self.c standard
#contrib/ia64/libuwx/src/uwx_self_context.s standard
contrib/ia64/libuwx/src/uwx_step.c standard
contrib/ia64/libuwx/src/uwx_str.c standard
contrib/ia64/libuwx/src/uwx_swap.c standard
contrib/ia64/libuwx/src/uwx_trace.c standard
#contrib/ia64/libuwx/src/uwx_ttrace.c standard
contrib/ia64/libuwx/src/uwx_uinfo.c standard
contrib/ia64/libuwx/src/uwx_utable.c standard
crypto/blowfish/bf_enc.c optional crypto
crypto/blowfish/bf_enc.c optional ipsec ipsec_esp
crypto/des/des_enc.c optional crypto
@ -62,6 +75,7 @@ ia64/ia64/autoconf.c standard
ia64/ia64/busdma_machdep.c standard
ia64/ia64/clock.c standard
ia64/ia64/clock_if.m standard
ia64/ia64/context.s standard
ia64/ia64/critical.c standard
ia64/ia64/db_disasm.c optional ddb
ia64/ia64/db_interface.c optional ddb
@ -88,8 +102,8 @@ ia64/ia64/setjmp.s standard
ia64/ia64/ssc.c optional ski
ia64/ia64/sscdisk.c optional ski
ia64/ia64/support.s standard
ia64/ia64/swtch.s standard
ia64/ia64/sys_machdep.c standard
ia64/ia64/syscall.s standard
ia64/ia64/trap.c standard
ia64/ia64/unaligned.c standard
ia64/ia64/unwind.c standard

View File

@ -4,7 +4,7 @@
ITANIUM opt_global.h
ITANIUM2 opt_global.h
IA32
IA32 opt_global.h
PAGE_SIZE_4K opt_global.h
PAGE_SIZE_8K opt_global.h

View File

@ -52,6 +52,10 @@
#include <sys/vnode.h>
#include <sys/imgact_elf.h>
#include <machine/frame.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <vm/vm_param.h>
@ -64,8 +68,6 @@
#include <i386/include/psl.h>
#include <i386/include/segments.h>
#include <i386/include/specialreg.h>
#include <machine/frame.h>
#include <machine/md_var.h>
static register_t *ia32_copyout_strings(struct image_params *imgp);
static void ia32_setregs(struct thread *td, u_long entry, u_long stack,
@ -84,9 +86,9 @@ static char ia32_sigcode[] = {
0x50, /* pushl %eax */
0xcd, 0x80, /* int $0x80 */
0xeb, 0xfe, /* 0: jmp 0b */
0, 0, 0, 0
0
};
static int ia32_szsigcode = sizeof(ia32_sigcode) & ~3;
static int ia32_szsigcode = sizeof(ia32_sigcode);
struct sysentvec ia32_freebsd_sysvec = {
SYS_MAXSYSCALL,
@ -105,8 +107,8 @@ struct sysentvec ia32_freebsd_sysvec = {
"FreeBSD ELF",
elf32_coredump,
NULL,
MINSIGSTKSZ,
4096,
IA32_MINSIGSTKSZ,
IA32_PAGE_SIZE,
0,
IA32_USRSTACK,
IA32_USRSTACK,
@ -145,8 +147,8 @@ ia32_copyout_strings(struct image_params *imgp)
*/
arginfo = (struct ia32_ps_strings *)IA32_PS_STRINGS;
szsigcode = *(imgp->proc->p_sysent->sv_szsigcode);
destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE -
roundup((ARG_MAX - imgp->stringspace), sizeof(char *));
destp = (caddr_t)arginfo - szsigcode - IA32_USRSPACE -
roundup((ARG_MAX - imgp->stringspace), sizeof(char *));
/*
* install sigcode
@ -185,6 +187,7 @@ ia32_copyout_strings(struct image_params *imgp)
/*
* vectp also becomes our initial stack base
*/
vectp = (void*)((uintptr_t)vectp & ~15);
stack_base = vectp;
stringp = imgp->stringbase;
@ -237,60 +240,45 @@ ia32_copyout_strings(struct image_params *imgp)
static void
ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
{
struct trapframe *frame = td->td_frame;
struct trapframe *tf = td->td_frame;
vm_offset_t gdt, ldt;
u_int64_t codesel, datasel, ldtsel;
u_int64_t codeseg, dataseg, gdtseg, ldtseg;
struct segment_descriptor desc;
struct vmspace *vmspace = td->td_proc->p_vmspace;
/*
* Make sure that we restore the entire trapframe after an
* execve.
*/
frame->tf_flags &= ~FRAME_SYSCALL;
exec_setregs(td, entry, stack, ps_strings);
bzero(frame->tf_r, sizeof(frame->tf_r));
bzero(frame->tf_f, sizeof(frame->tf_f));
/* Non-syscall frames are cleared by exec_setregs() */
if (tf->tf_flags & FRAME_SYSCALL) {
bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
} else
tf->tf_special.ndirty = 0;
frame->tf_cr_iip = entry;
frame->tf_cr_ipsr = (IA64_PSR_IC
| IA64_PSR_I
| IA64_PSR_IT
| IA64_PSR_DT
| IA64_PSR_RT
| IA64_PSR_DFH
| IA64_PSR_IS
| IA64_PSR_BN
| IA64_PSR_CPL_USER);
frame->tf_r[FRAME_R12] = stack;
tf->tf_special.psr |= IA64_PSR_IS;
tf->tf_special.sp = stack;
/* Point the RSE backstore to something harmless. */
tf->tf_special.bspstore = (IA32_PS_STRINGS - ia32_szsigcode -
IA32_USRSPACE + 15) & ~15;
codesel = LSEL(LUCODE_SEL, SEL_UPL);
datasel = LSEL(LUDATA_SEL, SEL_UPL);
ldtsel = GSEL(GLDT_SEL, SEL_UPL);
#if 1
frame->tf_r[FRAME_R16] = (datasel << 48) | (datasel << 32)
| (datasel << 16) | datasel;
frame->tf_r[FRAME_R17] = (ldtsel << 32) | (datasel << 16) | codesel;
#else
frame->tf_r[FRAME_R16] = datasel;
frame->tf_r[FRAME_R17] = codesel;
frame->tf_r[FRAME_R18] = datasel;
frame->tf_r[FRAME_R19] = datasel;
frame->tf_r[FRAME_R20] = datasel;
frame->tf_r[FRAME_R21] = datasel;
frame->tf_r[FRAME_R22] = ldtsel;
#endif
/* Setup ia32 segment registers. */
tf->tf_scratch.gr16 = (datasel << 48) | (datasel << 32) |
(datasel << 16) | datasel;
tf->tf_scratch.gr17 = (ldtsel << 32) | (datasel << 16) | codesel;
/*
* Build the GDT and LDT.
*/
gdt = IA32_USRSTACK;
vm_map_find(&vmspace->vm_map, 0, 0,
&gdt, PAGE_SIZE, 0,
VM_PROT_ALL, VM_PROT_ALL, 0);
ldt = gdt + 4096;
vm_map_find(&vmspace->vm_map, 0, 0, &gdt, IA32_PAGE_SIZE << 1, 0,
VM_PROT_ALL, VM_PROT_ALL, 0);
ldt = gdt + IA32_PAGE_SIZE;
desc.sd_lolimit = 8*NLDT-1;
desc.sd_lobase = ldt & 0xffffff;
@ -330,12 +318,13 @@ ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
+ (1L << 59) /* present */
+ (1L << 62) /* 32 bits */
+ (1L << 63); /* page granularity */
ia64_set_csd(codeseg);
ia64_set_ssd(dataseg);
frame->tf_r[FRAME_R24] = dataseg; /* ESD */
frame->tf_r[FRAME_R27] = dataseg; /* DSD */
frame->tf_r[FRAME_R28] = dataseg; /* FSD */
frame->tf_r[FRAME_R29] = dataseg; /* GSD */
tf->tf_scratch.csd = codeseg;
tf->tf_scratch.ssd = dataseg;
tf->tf_scratch.gr24 = dataseg; /* ESD */
tf->tf_scratch.gr27 = dataseg; /* DSD */
tf->tf_scratch.gr28 = dataseg; /* FSD */
tf->tf_scratch.gr29 = dataseg; /* GSD */
gdtseg = gdt /* base */
+ ((8L*NGDT - 1) << 32) /* limit */
@ -351,13 +340,16 @@ ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
+ (1L << 59) /* present */
+ (0L << 62) /* 16 bits */
+ (0L << 63); /* byte granularity */
frame->tf_r[FRAME_R30] = ldtseg; /* LDTD */
frame->tf_r[FRAME_R31] = gdtseg; /* GDTD */
tf->tf_scratch.gr30 = ldtseg; /* LDTD */
tf->tf_scratch.gr31 = gdtseg; /* GDTD */
/* Set ia32 control registers on this processor. */
ia64_set_cflg(CR0_PE | CR0_PG | ((long)(CR4_XMM | CR4_FXSR) << 32));
ia64_set_eflag(PSL_USER);
/* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
frame->tf_r[FRAME_R11] = IA32_PS_STRINGS;
tf->tf_scratch.gr11 = IA32_PS_STRINGS;
/*
* XXX - Linux emulator
@ -366,3 +358,27 @@ ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
*/
td->td_retval[1] = 0;
}
void
ia32_restorectx(struct pcb *pcb)
{
ia64_set_cflg(pcb->pcb_ia32_cflg);
ia64_set_eflag(pcb->pcb_ia32_eflag);
ia64_set_fcr(pcb->pcb_ia32_fcr);
ia64_set_fdr(pcb->pcb_ia32_fdr);
ia64_set_fir(pcb->pcb_ia32_fir);
ia64_set_fsr(pcb->pcb_ia32_fsr);
}
void
ia32_savectx(struct pcb *pcb)
{
pcb->pcb_ia32_cflg = ia64_get_cflg();
pcb->pcb_ia32_eflag = ia64_get_eflag();
pcb->pcb_ia32_fcr = ia64_get_fcr();
pcb->pcb_ia32_fdr = ia64_get_fdr();
pcb->pcb_ia32_fir = ia64_get_fir();
pcb->pcb_ia32_fsr = ia64_get_fsr();
}

View File

@ -52,6 +52,10 @@
#include <sys/vnode.h>
#include <sys/imgact_elf.h>
#include <machine/frame.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <vm/vm_param.h>
@ -64,8 +68,6 @@
#include <i386/include/psl.h>
#include <i386/include/segments.h>
#include <i386/include/specialreg.h>
#include <machine/frame.h>
#include <machine/md_var.h>
static register_t *ia32_copyout_strings(struct image_params *imgp);
static void ia32_setregs(struct thread *td, u_long entry, u_long stack,
@ -84,9 +86,9 @@ static char ia32_sigcode[] = {
0x50, /* pushl %eax */
0xcd, 0x80, /* int $0x80 */
0xeb, 0xfe, /* 0: jmp 0b */
0, 0, 0, 0
0
};
static int ia32_szsigcode = sizeof(ia32_sigcode) & ~3;
static int ia32_szsigcode = sizeof(ia32_sigcode);
struct sysentvec ia32_freebsd_sysvec = {
SYS_MAXSYSCALL,
@ -105,8 +107,8 @@ struct sysentvec ia32_freebsd_sysvec = {
"FreeBSD ELF",
elf32_coredump,
NULL,
MINSIGSTKSZ,
4096,
IA32_MINSIGSTKSZ,
IA32_PAGE_SIZE,
0,
IA32_USRSTACK,
IA32_USRSTACK,
@ -145,8 +147,8 @@ ia32_copyout_strings(struct image_params *imgp)
*/
arginfo = (struct ia32_ps_strings *)IA32_PS_STRINGS;
szsigcode = *(imgp->proc->p_sysent->sv_szsigcode);
destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE -
roundup((ARG_MAX - imgp->stringspace), sizeof(char *));
destp = (caddr_t)arginfo - szsigcode - IA32_USRSPACE -
roundup((ARG_MAX - imgp->stringspace), sizeof(char *));
/*
* install sigcode
@ -185,6 +187,7 @@ ia32_copyout_strings(struct image_params *imgp)
/*
* vectp also becomes our initial stack base
*/
vectp = (void*)((uintptr_t)vectp & ~15);
stack_base = vectp;
stringp = imgp->stringbase;
@ -237,60 +240,45 @@ ia32_copyout_strings(struct image_params *imgp)
static void
ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
{
struct trapframe *frame = td->td_frame;
struct trapframe *tf = td->td_frame;
vm_offset_t gdt, ldt;
u_int64_t codesel, datasel, ldtsel;
u_int64_t codeseg, dataseg, gdtseg, ldtseg;
struct segment_descriptor desc;
struct vmspace *vmspace = td->td_proc->p_vmspace;
/*
* Make sure that we restore the entire trapframe after an
* execve.
*/
frame->tf_flags &= ~FRAME_SYSCALL;
exec_setregs(td, entry, stack, ps_strings);
bzero(frame->tf_r, sizeof(frame->tf_r));
bzero(frame->tf_f, sizeof(frame->tf_f));
/* Non-syscall frames are cleared by exec_setregs() */
if (tf->tf_flags & FRAME_SYSCALL) {
bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
} else
tf->tf_special.ndirty = 0;
frame->tf_cr_iip = entry;
frame->tf_cr_ipsr = (IA64_PSR_IC
| IA64_PSR_I
| IA64_PSR_IT
| IA64_PSR_DT
| IA64_PSR_RT
| IA64_PSR_DFH
| IA64_PSR_IS
| IA64_PSR_BN
| IA64_PSR_CPL_USER);
frame->tf_r[FRAME_R12] = stack;
tf->tf_special.psr |= IA64_PSR_IS;
tf->tf_special.sp = stack;
/* Point the RSE backstore to something harmless. */
tf->tf_special.bspstore = (IA32_PS_STRINGS - ia32_szsigcode -
IA32_USRSPACE + 15) & ~15;
codesel = LSEL(LUCODE_SEL, SEL_UPL);
datasel = LSEL(LUDATA_SEL, SEL_UPL);
ldtsel = GSEL(GLDT_SEL, SEL_UPL);
#if 1
frame->tf_r[FRAME_R16] = (datasel << 48) | (datasel << 32)
| (datasel << 16) | datasel;
frame->tf_r[FRAME_R17] = (ldtsel << 32) | (datasel << 16) | codesel;
#else
frame->tf_r[FRAME_R16] = datasel;
frame->tf_r[FRAME_R17] = codesel;
frame->tf_r[FRAME_R18] = datasel;
frame->tf_r[FRAME_R19] = datasel;
frame->tf_r[FRAME_R20] = datasel;
frame->tf_r[FRAME_R21] = datasel;
frame->tf_r[FRAME_R22] = ldtsel;
#endif
/* Setup ia32 segment registers. */
tf->tf_scratch.gr16 = (datasel << 48) | (datasel << 32) |
(datasel << 16) | datasel;
tf->tf_scratch.gr17 = (ldtsel << 32) | (datasel << 16) | codesel;
/*
* Build the GDT and LDT.
*/
gdt = IA32_USRSTACK;
vm_map_find(&vmspace->vm_map, 0, 0,
&gdt, PAGE_SIZE, 0,
VM_PROT_ALL, VM_PROT_ALL, 0);
ldt = gdt + 4096;
vm_map_find(&vmspace->vm_map, 0, 0, &gdt, IA32_PAGE_SIZE << 1, 0,
VM_PROT_ALL, VM_PROT_ALL, 0);
ldt = gdt + IA32_PAGE_SIZE;
desc.sd_lolimit = 8*NLDT-1;
desc.sd_lobase = ldt & 0xffffff;
@ -330,12 +318,13 @@ ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
+ (1L << 59) /* present */
+ (1L << 62) /* 32 bits */
+ (1L << 63); /* page granularity */
ia64_set_csd(codeseg);
ia64_set_ssd(dataseg);
frame->tf_r[FRAME_R24] = dataseg; /* ESD */
frame->tf_r[FRAME_R27] = dataseg; /* DSD */
frame->tf_r[FRAME_R28] = dataseg; /* FSD */
frame->tf_r[FRAME_R29] = dataseg; /* GSD */
tf->tf_scratch.csd = codeseg;
tf->tf_scratch.ssd = dataseg;
tf->tf_scratch.gr24 = dataseg; /* ESD */
tf->tf_scratch.gr27 = dataseg; /* DSD */
tf->tf_scratch.gr28 = dataseg; /* FSD */
tf->tf_scratch.gr29 = dataseg; /* GSD */
gdtseg = gdt /* base */
+ ((8L*NGDT - 1) << 32) /* limit */
@ -351,13 +340,16 @@ ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
+ (1L << 59) /* present */
+ (0L << 62) /* 16 bits */
+ (0L << 63); /* byte granularity */
frame->tf_r[FRAME_R30] = ldtseg; /* LDTD */
frame->tf_r[FRAME_R31] = gdtseg; /* GDTD */
tf->tf_scratch.gr30 = ldtseg; /* LDTD */
tf->tf_scratch.gr31 = gdtseg; /* GDTD */
/* Set ia32 control registers on this processor. */
ia64_set_cflg(CR0_PE | CR0_PG | ((long)(CR4_XMM | CR4_FXSR) << 32));
ia64_set_eflag(PSL_USER);
/* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
frame->tf_r[FRAME_R11] = IA32_PS_STRINGS;
tf->tf_scratch.gr11 = IA32_PS_STRINGS;
/*
* XXX - Linux emulator
@ -366,3 +358,27 @@ ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
*/
td->td_retval[1] = 0;
}
void
ia32_restorectx(struct pcb *pcb)
{
ia64_set_cflg(pcb->pcb_ia32_cflg);
ia64_set_eflag(pcb->pcb_ia32_eflag);
ia64_set_fcr(pcb->pcb_ia32_fcr);
ia64_set_fdr(pcb->pcb_ia32_fdr);
ia64_set_fir(pcb->pcb_ia32_fir);
ia64_set_fsr(pcb->pcb_ia32_fsr);
}
void
ia32_savectx(struct pcb *pcb)
{
pcb->pcb_ia32_cflg = ia64_get_cflg();
pcb->pcb_ia32_eflag = ia64_get_eflag();
pcb->pcb_ia32_fcr = ia64_get_fcr();
pcb->pcb_ia32_fdr = ia64_get_fdr();
pcb->pcb_ia32_fir = ia64_get_fir();
pcb->pcb_ia32_fsr = ia64_get_fsr();
}

View File

@ -44,8 +44,11 @@ struct ia32_ps_strings {
int ps_nenvstr; /* the number of environment strings */
};
#define IA32_USRSTACK (4L*1024*1024*1024 - PAGE_SIZE)
#define IA32_PS_STRINGS (IA32_USRSTACK - sizeof(struct ia32_ps_strings))
#define IA32_MINSIGSTKSZ 2048
#define IA32_PAGE_SIZE 4096
#define IA32_USRSTACK (2L*1024*1024*1024 - IA32_PAGE_SIZE*2)
#define IA32_PS_STRINGS (IA32_USRSTACK - sizeof(struct ia32_ps_strings))
#define IA32_USRSPACE IA32_PAGE_SIZE
static __inline caddr_t stackgap_init(void);
static __inline void *stackgap_alloc(caddr_t *, size_t);
@ -54,7 +57,7 @@ static __inline caddr_t
stackgap_init()
{
#define szsigcode (*(curproc->p_sysent->sv_szsigcode))
return (caddr_t)(((caddr_t)IA32_PS_STRINGS) - szsigcode - SPARE_USRSPACE);
return (((caddr_t)IA32_PS_STRINGS) - szsigcode - IA32_USRSPACE);
#undef szsigcode
}

View File

@ -51,7 +51,6 @@
#include <vm/vm.h>
#include <machine/inst.h>
#include <machine/rse.h>
#include <machine/db_machdep.h>
#include <machine/mutex.h>
@ -70,73 +69,61 @@ extern void gdb_handle_exception(db_regs_t *, int);
int db_active;
db_regs_t ddb_regs;
static u_int64_t zero;
static int db_get_rse_reg(struct db_variable *vp, db_expr_t *valuep, int op);
static int db_get_pc_reg(struct db_variable *vp, db_expr_t *valuep, int op);
static int db_get_ip_reg(struct db_variable *vp, db_expr_t *valuep, int op);
struct db_variable db_regs[] = {
/* Misc control/app registers */
#define DB_MISC_REGS 15 /* make sure this is correct */
#define DB_MISC_REGS 13 /* make sure this is correct */
{"pc", (db_expr_t*) 0, db_get_pc_reg},
{"ip", (db_expr_t*) &ddb_regs.tf_cr_iip, FCN_NULL},
{"psr", (db_expr_t*) &ddb_regs.tf_cr_ipsr, FCN_NULL},
{"cr.isr", (db_expr_t*) &ddb_regs.tf_cr_isr, FCN_NULL},
{"cr.ifa", (db_expr_t*) &ddb_regs.tf_cr_ifa, FCN_NULL},
{"pr", (db_expr_t*) &ddb_regs.tf_pr, FCN_NULL},
{"ar.rsc", (db_expr_t*) &ddb_regs.tf_ar_rsc, FCN_NULL},
{"ar.pfs", (db_expr_t*) &ddb_regs.tf_ar_pfs, FCN_NULL},
{"cr.ifs", (db_expr_t*) &ddb_regs.tf_cr_ifs, FCN_NULL},
{"ar.bspstore", (db_expr_t*) &ddb_regs.tf_ar_bspstore, FCN_NULL},
{"ar.rnat", (db_expr_t*) &ddb_regs.tf_ar_rnat, FCN_NULL},
{"ndirty", (db_expr_t*) &ddb_regs.tf_ndirty, FCN_NULL},
{"ar.unat", (db_expr_t*) &ddb_regs.tf_ar_unat, FCN_NULL},
{"ar.ccv", (db_expr_t*) &ddb_regs.tf_ar_ccv, FCN_NULL},
{"ar.fpsr", (db_expr_t*) &ddb_regs.tf_ar_fpsr, FCN_NULL},
{"ip", NULL, db_get_ip_reg},
{"psr", (db_expr_t*) &ddb_regs.tf_special.psr, FCN_NULL},
{"cr.isr", (db_expr_t*) &ddb_regs.tf_special.isr, FCN_NULL},
{"cr.ifa", (db_expr_t*) &ddb_regs.tf_special.ifa, FCN_NULL},
{"pr", (db_expr_t*) &ddb_regs.tf_special.pr, FCN_NULL},
{"ar.rsc", (db_expr_t*) &ddb_regs.tf_special.rsc, FCN_NULL},
{"ar.pfs", (db_expr_t*) &ddb_regs.tf_special.pfs, FCN_NULL},
{"cr.ifs", (db_expr_t*) &ddb_regs.tf_special.cfm, FCN_NULL},
{"ar.bspstore", (db_expr_t*) &ddb_regs.tf_special.bspstore, FCN_NULL},
{"ndirty", (db_expr_t*) &ddb_regs.tf_special.ndirty, FCN_NULL},
{"ar.rnat", (db_expr_t*) &ddb_regs.tf_special.rnat, FCN_NULL},
{"ar.unat", (db_expr_t*) &ddb_regs.tf_special.unat, FCN_NULL},
{"ar.fpsr", (db_expr_t*) &ddb_regs.tf_special.fpsr, FCN_NULL},
/* Branch registers */
{"rp", (db_expr_t*) &ddb_regs.tf_b[0], FCN_NULL},
{"b1", (db_expr_t*) &ddb_regs.tf_b[1], FCN_NULL},
{"b2", (db_expr_t*) &ddb_regs.tf_b[2], FCN_NULL},
{"b3", (db_expr_t*) &ddb_regs.tf_b[3], FCN_NULL},
{"b4", (db_expr_t*) &ddb_regs.tf_b[4], FCN_NULL},
{"b5", (db_expr_t*) &ddb_regs.tf_b[5], FCN_NULL},
{"b6", (db_expr_t*) &ddb_regs.tf_b[6], FCN_NULL},
{"b7", (db_expr_t*) &ddb_regs.tf_b[7], FCN_NULL},
{"rp", (db_expr_t*) &ddb_regs.tf_special.rp, FCN_NULL},
/* b1, b2, b3, b4, b5 are preserved */
{"b6", (db_expr_t*) &ddb_regs.tf_scratch.br6, FCN_NULL},
{"b7", (db_expr_t*) &ddb_regs.tf_scratch.br7, FCN_NULL},
/* Static registers */
{"r0", (db_expr_t*) &zero, FCN_NULL},
{"gp", (db_expr_t*) &ddb_regs.tf_r[FRAME_R1], FCN_NULL},
{"r2", (db_expr_t*) &ddb_regs.tf_r[FRAME_R2], FCN_NULL},
{"r3", (db_expr_t*) &ddb_regs.tf_r[FRAME_R3], FCN_NULL},
{"r4", (db_expr_t*) &ddb_regs.tf_r[FRAME_R4], FCN_NULL},
{"r5", (db_expr_t*) &ddb_regs.tf_r[FRAME_R5], FCN_NULL},
{"r6", (db_expr_t*) &ddb_regs.tf_r[FRAME_R6], FCN_NULL},
{"r7", (db_expr_t*) &ddb_regs.tf_r[FRAME_R7], FCN_NULL},
{"r8", (db_expr_t*) &ddb_regs.tf_r[FRAME_R8], FCN_NULL},
{"r9", (db_expr_t*) &ddb_regs.tf_r[FRAME_R9], FCN_NULL},
{"r10", (db_expr_t*) &ddb_regs.tf_r[FRAME_R10], FCN_NULL},
{"r11", (db_expr_t*) &ddb_regs.tf_r[FRAME_R11], FCN_NULL},
{"sp", (db_expr_t*) &ddb_regs.tf_r[FRAME_R12], FCN_NULL},
{"r13", (db_expr_t*) &ddb_regs.tf_r[FRAME_R13], FCN_NULL},
{"r14", (db_expr_t*) &ddb_regs.tf_r[FRAME_R14], FCN_NULL},
{"r15", (db_expr_t*) &ddb_regs.tf_r[FRAME_R15], FCN_NULL},
{"r16", (db_expr_t*) &ddb_regs.tf_r[FRAME_R16], FCN_NULL},
{"r17", (db_expr_t*) &ddb_regs.tf_r[FRAME_R17], FCN_NULL},
{"r18", (db_expr_t*) &ddb_regs.tf_r[FRAME_R18], FCN_NULL},
{"r19", (db_expr_t*) &ddb_regs.tf_r[FRAME_R19], FCN_NULL},
{"r20", (db_expr_t*) &ddb_regs.tf_r[FRAME_R20], FCN_NULL},
{"r21", (db_expr_t*) &ddb_regs.tf_r[FRAME_R21], FCN_NULL},
{"r22", (db_expr_t*) &ddb_regs.tf_r[FRAME_R22], FCN_NULL},
{"r23", (db_expr_t*) &ddb_regs.tf_r[FRAME_R23], FCN_NULL},
{"r24", (db_expr_t*) &ddb_regs.tf_r[FRAME_R24], FCN_NULL},
{"r25", (db_expr_t*) &ddb_regs.tf_r[FRAME_R25], FCN_NULL},
{"r26", (db_expr_t*) &ddb_regs.tf_r[FRAME_R26], FCN_NULL},
{"r27", (db_expr_t*) &ddb_regs.tf_r[FRAME_R27], FCN_NULL},
{"r28", (db_expr_t*) &ddb_regs.tf_r[FRAME_R28], FCN_NULL},
{"r29", (db_expr_t*) &ddb_regs.tf_r[FRAME_R29], FCN_NULL},
{"r30", (db_expr_t*) &ddb_regs.tf_r[FRAME_R30], FCN_NULL},
{"r31", (db_expr_t*) &ddb_regs.tf_r[FRAME_R31], FCN_NULL},
{"gp", (db_expr_t*) &ddb_regs.tf_special.gp, FCN_NULL},
{"r2", (db_expr_t*) &ddb_regs.tf_scratch.gr2, FCN_NULL},
{"r3", (db_expr_t*) &ddb_regs.tf_scratch.gr3, FCN_NULL},
{"r8", (db_expr_t*) &ddb_regs.tf_scratch.gr8, FCN_NULL},
{"r9", (db_expr_t*) &ddb_regs.tf_scratch.gr9, FCN_NULL},
{"r10", (db_expr_t*) &ddb_regs.tf_scratch.gr10, FCN_NULL},
{"r11", (db_expr_t*) &ddb_regs.tf_scratch.gr11, FCN_NULL},
{"sp", (db_expr_t*) &ddb_regs.tf_special.sp, FCN_NULL},
{"tp", (db_expr_t*) &ddb_regs.tf_special.tp, FCN_NULL},
{"r14", (db_expr_t*) &ddb_regs.tf_scratch.gr14, FCN_NULL},
{"r15", (db_expr_t*) &ddb_regs.tf_scratch.gr15, FCN_NULL},
{"r16", (db_expr_t*) &ddb_regs.tf_scratch.gr16, FCN_NULL},
{"r17", (db_expr_t*) &ddb_regs.tf_scratch.gr17, FCN_NULL},
{"r18", (db_expr_t*) &ddb_regs.tf_scratch.gr18, FCN_NULL},
{"r19", (db_expr_t*) &ddb_regs.tf_scratch.gr19, FCN_NULL},
{"r20", (db_expr_t*) &ddb_regs.tf_scratch.gr20, FCN_NULL},
{"r21", (db_expr_t*) &ddb_regs.tf_scratch.gr21, FCN_NULL},
{"r22", (db_expr_t*) &ddb_regs.tf_scratch.gr22, FCN_NULL},
{"r23", (db_expr_t*) &ddb_regs.tf_scratch.gr23, FCN_NULL},
{"r24", (db_expr_t*) &ddb_regs.tf_scratch.gr24, FCN_NULL},
{"r25", (db_expr_t*) &ddb_regs.tf_scratch.gr25, FCN_NULL},
{"r26", (db_expr_t*) &ddb_regs.tf_scratch.gr26, FCN_NULL},
{"r27", (db_expr_t*) &ddb_regs.tf_scratch.gr27, FCN_NULL},
{"r28", (db_expr_t*) &ddb_regs.tf_scratch.gr28, FCN_NULL},
{"r29", (db_expr_t*) &ddb_regs.tf_scratch.gr29, FCN_NULL},
{"r30", (db_expr_t*) &ddb_regs.tf_scratch.gr30, FCN_NULL},
{"r31", (db_expr_t*) &ddb_regs.tf_scratch.gr31, FCN_NULL},
/* Stacked registers */
{"r32", (db_expr_t*) 32, db_get_rse_reg},
@ -241,28 +228,32 @@ struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
static int
db_get_rse_reg(struct db_variable *vp, db_expr_t *valuep, int op)
{
int sof = ddb_regs.tf_cr_ifs & 0x7f;
int regno = (db_expr_t) vp->valuep;
u_int64_t *bsp = (u_int64_t *) (ddb_regs.tf_ar_bspstore + ddb_regs.tf_ndirty);
u_int64_t *reg;
uint64_t bsp;
int nats, regno, sof;
if (regno - 32 >= sof) {
if (op == DB_VAR_GET)
*valuep = 0xdeadbeefdeadbeef;
} else {
bsp = ia64_rse_previous_frame(bsp, sof);
reg = ia64_rse_register_address(bsp, regno);
bsp = ddb_regs.tf_special.bspstore + ddb_regs.tf_special.ndirty;
regno = (db_expr_t)vp->valuep - 32;
sof = (int)(ddb_regs.tf_special.cfm & 0x7f);
nats = (sof - regno + 63 - ((int)(bsp >> 3) & 0x3f)) / 63;
reg = (void*)(bsp - ((sof - regno + nats) << 3));
if (regno < sof) {
if (op == DB_VAR_GET)
*valuep = *reg;
else
*reg = *valuep;
} else {
if (op == DB_VAR_GET)
*valuep = 0xdeadbeefdeadbeef;
}
return 0;
return (0);
}
static int
db_get_pc_reg(struct db_variable *vp, db_expr_t *valuep, int op)
db_get_ip_reg(struct db_variable *vp, db_expr_t *valuep, int op)
{
/* Read only */
if (op == DB_VAR_GET)
@ -353,8 +344,8 @@ kdb_trap(int vector, struct trapframe *regs)
/*
* XXX pretend that registers outside the current frame don't exist.
*/
db_eregs = db_regs + DB_MISC_REGS + 8 + 32
+ (ddb_regs.tf_cr_ifs & 0x7f);
db_eregs = db_regs + DB_MISC_REGS + 3 + 27 +
(ddb_regs.tf_special.cfm & 0x7f);
__asm __volatile("flushrs"); /* so we can look at them */
@ -412,13 +403,13 @@ db_read_bytes(addr, size, data)
register size_t size;
register char *data;
{
register char *src;
db_nofault = &db_jmpbuf;
src = (char *)addr;
while (size-- > 0)
*data++ = *src++;
if (addr < VM_MAX_ADDRESS)
copyin((char *)addr, data, size);
else
bcopy((char *)addr, data, size);
db_nofault = 0;
}
@ -432,13 +423,13 @@ db_write_bytes(addr, size, data)
register size_t size;
register char *data;
{
register char *dst;
db_nofault = &db_jmpbuf;
dst = (char *)addr;
while (size-- > 0)
*dst++ = *data++;
if (addr < VM_MAX_ADDRESS)
copyout(data, (char *)addr, size);
else
bcopy(data, (char *)addr, size);
db_nofault = 0;
}
@ -455,30 +446,36 @@ db_register_value(regs, regno)
db_regs_t *regs;
int regno;
{
if (regno > 127 || regno < 0) {
db_printf(" **** STRANGE REGISTER NUMBER %d **** ", regno);
return (0);
}
uint64_t *rsp;
uint64_t bsp;
int nats, sof;
if (regno == 0)
return (0);
if (regno == 1)
return (regs->tf_special.gp);
if (regno >= 2 && regno <= 3)
return ((&regs->tf_scratch.gr2)[regno - 2]);
if (regno >= 8 && regno <= 11)
return ((&regs->tf_scratch.gr8)[regno - 8]);
if (regno == 12)
return (regs->tf_special.sp);
if (regno == 13)
return (regs->tf_special.tp);
if (regno >= 14 && regno <= 31)
return ((&regs->tf_scratch.gr14)[regno - 14]);
if (regno < 32) {
return (regs->tf_r[regno - 1]);
} else {
int sof = ddb_regs.tf_cr_ifs & 0x7f;
u_int64_t *bsp = (u_int64_t *) (ddb_regs.tf_ar_bspstore + ddb_regs.tf_ndirty);
u_int64_t *reg;
if (regno - 32 >= sof) {
return 0xdeadbeefdeadbeef;
} else {
bsp = ia64_rse_previous_frame(bsp, sof);
reg = ia64_rse_register_address(bsp, regno);
return *reg;
}
sof = (int)(regs->tf_special.cfm & 0x7f);
if (regno >= 32 && regno < sof + 32) {
bsp = regs->tf_special.bspstore + regs->tf_special.ndirty;
regno -= 32;
nats = (sof - regno + 63 - ((int)(bsp >> 3) & 0x3f)) / 63;
rsp = (void*)(bsp - ((sof - regno + nats) << 3));
return (*rsp);
}
db_printf(" **** STRANGE REGISTER NUMBER %d **** ", regno);
return (0);
}
void
@ -539,10 +536,10 @@ db_skip_breakpoint(void)
/*
* Skip past the break instruction.
*/
ddb_regs.tf_cr_ipsr += IA64_PSR_RI_1;
if ((ddb_regs.tf_cr_ipsr & IA64_PSR_RI) > IA64_PSR_RI_2) {
ddb_regs.tf_cr_ipsr &= ~IA64_PSR_RI;
ddb_regs.tf_cr_iip += 16;
ddb_regs.tf_special.psr += IA64_PSR_RI_1;
if ((ddb_regs.tf_special.psr & IA64_PSR_RI) > IA64_PSR_RI_2) {
ddb_regs.tf_special.psr &= ~IA64_PSR_RI;
ddb_regs.tf_special.iip += 16;
}
}

View File

@ -31,7 +31,6 @@
#include <machine/inst.h>
#include <machine/db_machdep.h>
#include <machine/unwind.h>
#include <machine/rse.h>
#include <machine/vmparam.h>
#include <ddb/ddb.h>
@ -45,74 +44,61 @@ int db_md_set_watchpoint(db_expr_t addr, db_expr_t size);
int db_md_clr_watchpoint(db_expr_t addr, db_expr_t size);
void db_md_list_watchpoints(void);
extern char ia64_vector_table[], do_syscall[], do_syscall_end[];
void
db_stack_trace_cmd(db_expr_t addr, boolean_t have_addr, db_expr_t count, char *modif)
db_stack_trace_cmd(db_expr_t addr, boolean_t have_addr, db_expr_t count,
char *modif)
{
struct ia64_unwind_state *us;
struct unw_regstate rs;
const char *name;
db_expr_t offset;
uint64_t bsp, cfm, ip, pfs, reg;
c_db_sym_t sym;
int args, error, i;
if (count == -1)
count = 65535;
error = unw_create(&rs, &ddb_regs);
while (!error && count--) {
error = unw_get_cfm(&rs, &cfm);
if (!error)
error = unw_get_bsp(&rs, &bsp);
if (!error)
error = unw_get_ip(&rs, &ip);
if (error)
break;
if (!have_addr) {
us = ia64_create_unwind_state(&ddb_regs);
} else {
return; /* XXX */
}
args = (cfm >> 7) & 0x7f;
if (args > 8)
args = 8;
if (!us) {
db_printf("db_stack_trace_cmd: can't create unwind state\n");
return;
}
while (count--) {
const char * name;
db_expr_t ip;
db_expr_t offset;
c_db_sym_t sym;
int cfm, sof, sol, nargs, i;
u_int64_t *bsp;
u_int64_t *p, reg;
ip = ia64_unwind_state_get_ip(us);
cfm = ia64_unwind_state_get_cfm(us);
bsp = ia64_unwind_state_get_bsp(us);
sof = cfm & 0x7f;
sol = (cfm >> 7) & 0x7f;
error = unw_step(&rs);
if (!error) {
error = unw_get_cfm(&rs, &pfs);
if (!error) {
i = (pfs & 0x7f) - ((pfs >> 7) & 0x7f);
if (args > i)
args = i;
}
}
sym = db_search_symbol(ip, DB_STGY_ANY, &offset);
db_symbol_values(sym, &name, NULL);
db_printf("%s(", name);
nargs = sof - sol;
if (nargs > 8)
nargs = 8;
if (bsp >= (u_int64_t *)IA64_RR_BASE(5)) {
for (i = 0; i < nargs; i++) {
p = ia64_rse_register_address(bsp, 32 + i);
db_read_bytes((vm_offset_t) p, sizeof(reg),
(caddr_t) &reg);
if (bsp >= IA64_RR_BASE(5)) {
for (i = 0; i < args; i++) {
if ((bsp & 0x1ff) == 0x1f8)
bsp += 8;
db_read_bytes(bsp, sizeof(reg), (void*)&reg);
if (i > 0)
db_printf(", ");
db_printf("0x%lx", reg);
bsp += 8;
}
}
} else
db_printf("...");
db_printf(") at ");
db_printsym(ip, DB_STGY_PROC);
db_printf("\n");
if (ia64_unwind_state_previous_frame(us))
break;
ip = ia64_unwind_state_get_ip(us);
if (!ip)
break;
}
ia64_free_unwind_state(us);
}
void
@ -143,4 +129,3 @@ db_md_list_watchpoints()
{
return;
}

View File

@ -59,8 +59,8 @@ struct sysentvec elf64_freebsd_sysvec = {
NULL,
__elfN(freebsd_fixup),
sendsig,
sigcode,
&szsigcode,
NULL, /* sigcode */
NULL, /* &szsigcode */
NULL,
"FreeBSD ELF64",
__elfN(coredump),
@ -262,7 +262,7 @@ elf_cpu_load_file(linker_file_t lf)
if (ph->p_type == PT_IA_64_UNWIND) {
vaddr = ph->p_vaddr + reloc;
ia64_add_unwind_table((vm_offset_t)lf->address, vaddr,
unw_table_add((vm_offset_t)lf->address, vaddr,
vaddr + ph->p_memsz);
}
++ph;
@ -275,6 +275,6 @@ int
elf_cpu_unload_file(linker_file_t lf)
{
ia64_delete_unwind_table((vm_offset_t)lf->address);
unw_table_remove((vm_offset_t)lf->address);
return (0);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -37,8 +37,6 @@
* $FreeBSD$
*/
#include "opt_ia32.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/assym.h>
@ -65,87 +63,57 @@
#include <net/if.h>
#include <netinet/in.h>
#ifdef IA32
ASSYM(IA32, IA32);
#endif
ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
ASSYM(PC_IDLETHREAD, offsetof(struct pcpu, pc_idlethread));
ASSYM(PC_FPCURTHREAD, offsetof(struct pcpu, pc_fpcurthread));
ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
ASSYM(PC_CURRENT_PMAP, offsetof(struct pcpu, pc_current_pmap));
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse));
ASSYM(MTX_UNOWNED, MTX_UNOWNED);
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack));
ASSYM(TD_MD_FLAGS, offsetof(struct thread, td_md.md_flags));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
ASSYM(FRAME_SYSCALL, FRAME_SYSCALL);
ASSYM(TF_CR_IPSR, offsetof(struct trapframe, tf_cr_ipsr));
ASSYM(TF_CR_IFS, offsetof(struct trapframe, tf_cr_ifs));
ASSYM(TF_NDIRTY, offsetof(struct trapframe, tf_ndirty));
ASSYM(TF_AR_FPSR, offsetof(struct trapframe, tf_ar_fpsr));
ASSYM(TF_B, offsetof(struct trapframe, tf_b));
ASSYM(TF_R, offsetof(struct trapframe, tf_r));
ASSYM(TF_R_R1, offsetof(struct trapframe, tf_r[FRAME_R1]));
ASSYM(TF_R_R2, offsetof(struct trapframe, tf_r[FRAME_R2]));
ASSYM(TF_R_R3, offsetof(struct trapframe, tf_r[FRAME_R3]));
ASSYM(TF_R_R4, offsetof(struct trapframe, tf_r[FRAME_R4]));
ASSYM(TF_R_R5, offsetof(struct trapframe, tf_r[FRAME_R5]));
ASSYM(TF_R_R6, offsetof(struct trapframe, tf_r[FRAME_R6]));
ASSYM(TF_R_R7, offsetof(struct trapframe, tf_r[FRAME_R7]));
ASSYM(TF_R_R8, offsetof(struct trapframe, tf_r[FRAME_R8]));
ASSYM(TF_R_R9, offsetof(struct trapframe, tf_r[FRAME_R9]));
ASSYM(TF_R_R10, offsetof(struct trapframe, tf_r[FRAME_R10]));
ASSYM(TF_R_R11, offsetof(struct trapframe, tf_r[FRAME_R11]));
ASSYM(TF_R_SP, offsetof(struct trapframe, tf_r[FRAME_SP]));
ASSYM(TF_R_R13, offsetof(struct trapframe, tf_r[FRAME_R13]));
ASSYM(TF_R_R14, offsetof(struct trapframe, tf_r[FRAME_R14]));
ASSYM(TF_R_R15, offsetof(struct trapframe, tf_r[FRAME_R15]));
ASSYM(TF_F, offsetof(struct trapframe, tf_f));
ASSYM(PCB_CURRENT_PMAP, offsetof(struct pcb, pcb_current_pmap));
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
ASSYM(PCB_RP, offsetof(struct pcb, pcb_rp));
ASSYM(PCB_UNAT47, offsetof(struct pcb, pcb_unat47));
ASSYM(UC_MCONTEXT_MC_AR_BSP, offsetof(ucontext_t, uc_mcontext.mc_ar_bsp));
ASSYM(UC_MCONTEXT_MC_AR_RNAT, offsetof(ucontext_t, uc_mcontext.mc_ar_rnat));
ASSYM(EFAULT, EFAULT);
ASSYM(ENAMETOOLONG, ENAMETOOLONG);
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
ASSYM(PAGE_SIZE, PAGE_SIZE);
ASSYM(KSTACK_PAGES, KSTACK_PAGES);
ASSYM(SIZEOF_TRAPFRAME, sizeof(struct trapframe));
ASSYM(SIZEOF_PCB, sizeof(struct pcb));
ASSYM(DT_NULL, DT_NULL);
ASSYM(DT_RELA, DT_RELA);
ASSYM(DT_RELAENT, DT_RELAENT);
ASSYM(DT_RELASZ, DT_RELASZ);
ASSYM(DT_SYMTAB, DT_SYMTAB);
ASSYM(DT_SYMENT, DT_SYMENT);
ASSYM(DT_RELAENT, DT_RELAENT);
ASSYM(R_IA64_NONE, R_IA64_NONE);
ASSYM(EFAULT, EFAULT);
ASSYM(ENAMETOOLONG, ENAMETOOLONG);
ASSYM(ERESTART, ERESTART);
ASSYM(FRAME_SYSCALL, FRAME_SYSCALL);
ASSYM(KSTACK_PAGES, KSTACK_PAGES);
ASSYM(MC_PRESERVED, offsetof(mcontext_t, mc_preserved));
ASSYM(MC_PRESERVED_FP, offsetof(mcontext_t, mc_preserved_fp));
ASSYM(MC_SPECIAL, offsetof(mcontext_t, mc_special));
ASSYM(MC_SPECIAL_BSPSTORE, offsetof(mcontext_t, mc_special.bspstore));
ASSYM(MC_SPECIAL_RNAT, offsetof(mcontext_t, mc_special.rnat));
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
ASSYM(PAGE_SIZE, PAGE_SIZE);
ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
ASSYM(PC_CURRENT_PMAP, offsetof(struct pcpu, pc_current_pmap));
ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
ASSYM(PC_IDLETHREAD, offsetof(struct pcpu, pc_idlethread));
ASSYM(PCB_CURRENT_PMAP, offsetof(struct pcb, pcb_current_pmap));
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
ASSYM(PCB_SPECIAL_RP, offsetof(struct pcb, pcb_special.rp));
ASSYM(R_IA64_DIR64LSB, R_IA64_DIR64LSB);
ASSYM(R_IA64_FPTR64LSB, R_IA64_FPTR64LSB);
ASSYM(R_IA64_NONE, R_IA64_NONE);
ASSYM(R_IA64_REL64LSB, R_IA64_REL64LSB);
ASSYM(PAL_PTCE_INFO, PAL_PTCE_INFO);
ASSYM(PAL_FREQ_RATIOS, PAL_FREQ_RATIOS);
ASSYM(PAL_VM_SUMMARY, PAL_VM_SUMMARY);
ASSYM(SIZEOF_PCB, sizeof(struct pcb));
ASSYM(SIZEOF_SPECIAL, sizeof(struct _special));
ASSYM(SIZEOF_TRAPFRAME, sizeof(struct trapframe));
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
ASSYM(TF_SPECIAL_NDIRTY, offsetof(struct trapframe, tf_special.ndirty));
ASSYM(UC_MCONTEXT, offsetof(ucontext_t, uc_mcontext));
ASSYM(VM_MAX_ADDRESS, VM_MAX_ADDRESS);

View File

@ -152,6 +152,9 @@ interrupt(u_int64_t vector, struct trapframe *framep)
} else if (vector == ipi_vector[IPI_AST]) {
asts[PCPU_GET(cpuid)]++;
CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid));
} else if (vector == ipi_vector[IPI_HIGH_FP]) {
if (PCPU_GET(fpcurthread) != NULL)
ia64_highfp_save(PCPU_GET(fpcurthread));
} else if (vector == ipi_vector[IPI_RENDEZVOUS]) {
rdvs[PCPU_GET(cpuid)]++;
CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid));

View File

@ -25,32 +25,6 @@
*
* $FreeBSD$
*/
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
#include <machine/asm.h>
#include <machine/ia64_cpu.h>
@ -77,22 +51,33 @@ kstack: .space KSTACK_PAGES * PAGE_SIZE
* register r8.
*/
ENTRY(__start, 1)
{ .mlx
mov ar.rsc=0
movl r16=ia64_vector_table // set up IVT early
;;
}
{ .mlx
mov cr.iva=r16
movl r16=kstack
;;
}
{ .mmi
srlz.i
;;
ssm IA64_PSR_DFH
mov r17=KSTACK_PAGES*PAGE_SIZE-SIZEOF_PCB-SIZEOF_TRAPFRAME-16
;;
}
{ .mlx
add sp=r16,r17 // proc0's stack
movl gp=__gp // find kernel globals
;;
}
{ .mlx
mov ar.bspstore=r16 // switch backing store
movl r16=pa_bootinfo
;;
}
st8 [r16]=r8 // save the PA of the bootinfo block
loadrs // invalidate regs
;;
@ -117,7 +102,7 @@ ENTRY(__start, 1)
;;
ld8 out0=[out0]
;;
add r16=PCB_RP,out0 // return to mi_startup_trampoline
add r16=PCB_SPECIAL_RP,out0 // return to mi_startup_trampoline
movl r17=mi_startup_trampoline
;;
st8 [r16]=r17
@ -133,7 +118,7 @@ ENTRY(mi_startup_trampoline, 0)
.prologue
.save rp,r0
.body
br.call.sptk.many rp=mi_startup
// Should never happen
@ -141,6 +126,59 @@ ENTRY(mi_startup_trampoline, 0)
END(mi_startup_trampoline)
/*
* fork_trampoline()
*
* Arrange for a function to be invoked neatly, after a cpu_switch().
*
* Invokes fork_exit() passing in three arguments: a callout function, an
* argument to the callout, and a trapframe pointer. For child processes
* returning from fork(2), the argument is a pointer to the child process.
*
* The callout function and its argument is in the trapframe in scratch
* registers r2 and r3.
*/
ENTRY(fork_trampoline, 0)
.prologue
.save rp,r0
.body
{ .mmi
alloc r14=ar.pfs,0,0,3,0
add r15=32+SIZEOF_SPECIAL+8,sp
add r16=32+SIZEOF_SPECIAL+16,sp
;;
}
{ .mmi
ld8 out0=[r15]
ld8 out1=[r16]
nop 0
}
{ .mfb
add out2=16,sp
nop 0
br.call.sptk rp=fork_exit
;;
}
// If we get back here, it means we're a user space process that's
// the immediate result of fork(2).
.global enter_userland
.type enter_userland, @function
enter_userland:
{ .mmi
add r14=24,sp
;;
ld8 r14=[r14]
nop 0
;;
}
{ .mbb
cmp.eq p6,p7=r0,r14
(p6) br.sptk exception_restore
(p7) br.sptk epc_syscall_return
;;
}
END(fork_trampoline)
#ifdef SMP
/*
* AP wake-up entry point. The handoff state is similar as for the BSP,
@ -198,7 +236,7 @@ ENTRY(os_boot_rendez,0)
;;
1: mov r16 = ip
add r17 = 2f-1b, r17
movl r18 = (IA64_PSR_AC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT|IA64_PSR_BN)
movl r18 = (IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_DFH|IA64_PSR_DT|IA64_PSR_IC|IA64_PSR_IT|IA64_PSR_RT)
;;
add r17 = r17, r16
mov cr.ipsr = r18
@ -209,142 +247,54 @@ ENTRY(os_boot_rendez,0)
rfi
.align 32
2: movl r16 = ia64_vector_table // set up IVT early
2:
{ .mlx
mov ar.rsc = 0
movl r16 = ia64_vector_table // set up IVT early
;;
}
{ .mlx
mov cr.iva = r16
movl r16 = ap_stack
;;
}
{ .mmi
srlz.i
;;
movl r16 = ap_stack
movl r17 = ap_pcpu
mov ar.rsc = 0
movl gp = __gp
;;
ld8 r16 = [r16]
ld8 r17 = [r17]
mov r18 = KSTACK_PAGES*PAGE_SIZE-SIZEOF_PCB-SIZEOF_TRAPFRAME-16
;;
add sp = r18, r16
}
{ .mlx
mov ar.bspstore = r16
mov ar.k4 = r17
mov r13 = r17 /* gas doesn't know tp as an alias for r13 */
movl gp = __gp
;;
}
{ .mmi
loadrs
movl r16 = ia64_pal_base
;;
alloc r17 = ar.pfs, 0, 0, 0, 0
add sp = r18, r16
;;
}
{ .mfb
mov ar.rsc = 3
ld8 r16 = [r16]
;;
cmp.eq p1, p0 = 0, r16
(p1) br.cond.spnt 1f
;;
mov r18 = 28<<2
movl r17 = 7<<61
;;
mov cr.itir = r18
or r17 = r17, r16
mov r16 = (PTE_P|PTE_MA_WB|PTE_A|PTE_D|PTE_PL_KERN|PTE_AR_RWX)
;;
mov cr.ifa = r17
extr.u r18 = r17, 12, 38
;;
srlz.i
shl r18 = r18, 12
;;
add r17 = 1, r0
or r16 = r16, r18
;;
itr.i itr[r17] = r16
;;
srlz.i
;;
1: alloc r16 = ar.pfs, 0, 0, 0, 0
;;
nop 0
br.call.sptk.few rp = ia64_ap_startup
;;
}
/* NOT REACHED */
9: br 9b
9:
{ .mfb
nop 0
nop 0
br.sptk 9b
;;
}
END(os_boot_rendez)
#endif /* !SMP */
/**************************************************************************/
/*
* Signal "trampoline" code. Invoked from RTE setup by sendsig().
*
* On entry, registers look like:
*
* r14 signal number
* r15 pointer to siginfo_t
* r16 pointer to signal context frame (scp)
* r17 address of handler function descriptor
* r18 address of new backing store (if any)
* sp+16 pointer to sigframe
*/
ENTRY(sigcode,0)
ld8 r8=[r17],8 // function address
;;
ld8 gp=[r17] // function's gp value
mov b6=r8 // transfer to a branch register
cover
;;
add r8=UC_MCONTEXT_MC_AR_BSP,r16 // address or mc_ar_bsp
mov r9=ar.bsp // save ar.bsp
;;
st8 [r8]=r9
cmp.eq p1,p2=r0,r18 // check for new bs
(p1) br.cond.sptk.few 1f // branch if not switching
flushrs // flush out to old bs
mov ar.rsc=0 // switch off RSE
add r8=UC_MCONTEXT_MC_AR_RNAT,r16 // address of mc_ar_rnat
;;
mov r9=ar.rnat // value of ar.rnat after flush
mov ar.bspstore=r18 // point at new bs
;;
st8 [r8]=r9 // remember ar.rnat
mov ar.rsc=15 // XXX bogus value - check
invala
;;
1: alloc r5=ar.pfs,0,0,3,0 // register frame for call
;;
mov out0=r14 // signal number
mov out1=r15 // siginfo
mov out2=r16 // ucontext
mov r4=r16 // save from call
br.call.sptk.few rp=b6 // call the signal handler
;;
alloc r14=ar.pfs,0,0,0,0 // discard call frame
;;
flushrs
;;
(p1) br.cond.sptk.few 2f // note: p1 is preserved
mov ar.rsc=0
add r8=UC_MCONTEXT_MC_AR_RNAT,r4 // address of mc_ar_rnat
;;
ld8 r9=[r8]
;;
add r8=UC_MCONTEXT_MC_AR_BSP,r4 // address of mc_ar_bsp
;;
ld8 r10=[r8]
;;
mov ar.bspstore=r10
;;
mov ar.rnat=r9
mov ar.rsc=15
;;
2: CALLSYS_NOERROR(sigreturn) // call sigreturn()
alloc r14=ar.pfs,0,0,1,0 ;;
mov out0=ret0 // if that failed, get error code
CALLSYS_NOERROR(exit) // and call exit() with it.
XENTRY(esigcode)
END(sigcode)
.data
EXPORT(szsigcode)
.quad esigcode-sigcode
.text
/*
* Create a default interrupt name table. The first entry (vector 0) is
* hardwaired to the clock interrupt.

View File

@ -25,32 +25,6 @@
*
* $FreeBSD$
*/
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
#include <machine/asm.h>
#include <machine/ia64_cpu.h>
@ -77,22 +51,33 @@ kstack: .space KSTACK_PAGES * PAGE_SIZE
* register r8.
*/
ENTRY(__start, 1)
{ .mlx
mov ar.rsc=0
movl r16=ia64_vector_table // set up IVT early
;;
}
{ .mlx
mov cr.iva=r16
movl r16=kstack
;;
}
{ .mmi
srlz.i
;;
ssm IA64_PSR_DFH
mov r17=KSTACK_PAGES*PAGE_SIZE-SIZEOF_PCB-SIZEOF_TRAPFRAME-16
;;
}
{ .mlx
add sp=r16,r17 // proc0's stack
movl gp=__gp // find kernel globals
;;
}
{ .mlx
mov ar.bspstore=r16 // switch backing store
movl r16=pa_bootinfo
;;
}
st8 [r16]=r8 // save the PA of the bootinfo block
loadrs // invalidate regs
;;
@ -117,7 +102,7 @@ ENTRY(__start, 1)
;;
ld8 out0=[out0]
;;
add r16=PCB_RP,out0 // return to mi_startup_trampoline
add r16=PCB_SPECIAL_RP,out0 // return to mi_startup_trampoline
movl r17=mi_startup_trampoline
;;
st8 [r16]=r17
@ -133,7 +118,7 @@ ENTRY(mi_startup_trampoline, 0)
.prologue
.save rp,r0
.body
br.call.sptk.many rp=mi_startup
// Should never happen
@ -141,6 +126,59 @@ ENTRY(mi_startup_trampoline, 0)
END(mi_startup_trampoline)
/*
* fork_trampoline()
*
* Arrange for a function to be invoked neatly, after a cpu_switch().
*
* Invokes fork_exit() passing in three arguments: a callout function, an
* argument to the callout, and a trapframe pointer. For child processes
* returning from fork(2), the argument is a pointer to the child process.
*
* The callout function and its argument is in the trapframe in scratch
* registers r2 and r3.
*/
ENTRY(fork_trampoline, 0)
.prologue
.save rp,r0
.body
{ .mmi
alloc r14=ar.pfs,0,0,3,0
add r15=32+SIZEOF_SPECIAL+8,sp
add r16=32+SIZEOF_SPECIAL+16,sp
;;
}
{ .mmi
ld8 out0=[r15]
ld8 out1=[r16]
nop 0
}
{ .mfb
add out2=16,sp
nop 0
br.call.sptk rp=fork_exit
;;
}
// If we get back here, it means we're a user space process that's
// the immediate result of fork(2).
.global enter_userland
.type enter_userland, @function
enter_userland:
{ .mmi
add r14=24,sp
;;
ld8 r14=[r14]
nop 0
;;
}
{ .mbb
cmp.eq p6,p7=r0,r14
(p6) br.sptk exception_restore
(p7) br.sptk epc_syscall_return
;;
}
END(fork_trampoline)
#ifdef SMP
/*
* AP wake-up entry point. The handoff state is similar as for the BSP,
@ -198,7 +236,7 @@ ENTRY(os_boot_rendez,0)
;;
1: mov r16 = ip
add r17 = 2f-1b, r17
movl r18 = (IA64_PSR_AC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT|IA64_PSR_BN)
movl r18 = (IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_DFH|IA64_PSR_DT|IA64_PSR_IC|IA64_PSR_IT|IA64_PSR_RT)
;;
add r17 = r17, r16
mov cr.ipsr = r18
@ -209,142 +247,54 @@ ENTRY(os_boot_rendez,0)
rfi
.align 32
2: movl r16 = ia64_vector_table // set up IVT early
2:
{ .mlx
mov ar.rsc = 0
movl r16 = ia64_vector_table // set up IVT early
;;
}
{ .mlx
mov cr.iva = r16
movl r16 = ap_stack
;;
}
{ .mmi
srlz.i
;;
movl r16 = ap_stack
movl r17 = ap_pcpu
mov ar.rsc = 0
movl gp = __gp
;;
ld8 r16 = [r16]
ld8 r17 = [r17]
mov r18 = KSTACK_PAGES*PAGE_SIZE-SIZEOF_PCB-SIZEOF_TRAPFRAME-16
;;
add sp = r18, r16
}
{ .mlx
mov ar.bspstore = r16
mov ar.k4 = r17
mov r13 = r17 /* gas doesn't know tp as an alias for r13 */
movl gp = __gp
;;
}
{ .mmi
loadrs
movl r16 = ia64_pal_base
;;
alloc r17 = ar.pfs, 0, 0, 0, 0
add sp = r18, r16
;;
}
{ .mfb
mov ar.rsc = 3
ld8 r16 = [r16]
;;
cmp.eq p1, p0 = 0, r16
(p1) br.cond.spnt 1f
;;
mov r18 = 28<<2
movl r17 = 7<<61
;;
mov cr.itir = r18
or r17 = r17, r16
mov r16 = (PTE_P|PTE_MA_WB|PTE_A|PTE_D|PTE_PL_KERN|PTE_AR_RWX)
;;
mov cr.ifa = r17
extr.u r18 = r17, 12, 38
;;
srlz.i
shl r18 = r18, 12
;;
add r17 = 1, r0
or r16 = r16, r18
;;
itr.i itr[r17] = r16
;;
srlz.i
;;
1: alloc r16 = ar.pfs, 0, 0, 0, 0
;;
nop 0
br.call.sptk.few rp = ia64_ap_startup
;;
}
/* NOT REACHED */
9: br 9b
9:
{ .mfb
nop 0
nop 0
br.sptk 9b
;;
}
END(os_boot_rendez)
#endif /* !SMP */
/**************************************************************************/
/*
* Signal "trampoline" code. Invoked from RTE setup by sendsig().
*
* On entry, registers look like:
*
* r14 signal number
* r15 pointer to siginfo_t
* r16 pointer to signal context frame (scp)
* r17 address of handler function descriptor
* r18 address of new backing store (if any)
* sp+16 pointer to sigframe
*/
ENTRY(sigcode,0)
ld8 r8=[r17],8 // function address
;;
ld8 gp=[r17] // function's gp value
mov b6=r8 // transfer to a branch register
cover
;;
add r8=UC_MCONTEXT_MC_AR_BSP,r16 // address or mc_ar_bsp
mov r9=ar.bsp // save ar.bsp
;;
st8 [r8]=r9
cmp.eq p1,p2=r0,r18 // check for new bs
(p1) br.cond.sptk.few 1f // branch if not switching
flushrs // flush out to old bs
mov ar.rsc=0 // switch off RSE
add r8=UC_MCONTEXT_MC_AR_RNAT,r16 // address of mc_ar_rnat
;;
mov r9=ar.rnat // value of ar.rnat after flush
mov ar.bspstore=r18 // point at new bs
;;
st8 [r8]=r9 // remember ar.rnat
mov ar.rsc=15 // XXX bogus value - check
invala
;;
1: alloc r5=ar.pfs,0,0,3,0 // register frame for call
;;
mov out0=r14 // signal number
mov out1=r15 // siginfo
mov out2=r16 // ucontext
mov r4=r16 // save from call
br.call.sptk.few rp=b6 // call the signal handler
;;
alloc r14=ar.pfs,0,0,0,0 // discard call frame
;;
flushrs
;;
(p1) br.cond.sptk.few 2f // note: p1 is preserved
mov ar.rsc=0
add r8=UC_MCONTEXT_MC_AR_RNAT,r4 // address of mc_ar_rnat
;;
ld8 r9=[r8]
;;
add r8=UC_MCONTEXT_MC_AR_BSP,r4 // address of mc_ar_bsp
;;
ld8 r10=[r8]
;;
mov ar.bspstore=r10
;;
mov ar.rnat=r9
mov ar.rsc=15
;;
2: CALLSYS_NOERROR(sigreturn) // call sigreturn()
alloc r14=ar.pfs,0,0,1,0 ;;
mov out0=ret0 // if that failed, get error code
CALLSYS_NOERROR(exit) // and call exit() with it.
XENTRY(esigcode)
END(sigcode)
.data
EXPORT(szsigcode)
.quad esigcode-sigcode
.text
/*
* Create a default interrupt name table. The first entry (vector 0) is
* hardwaired to the clock interrupt.

File diff suppressed because it is too large Load Diff

View File

@ -56,6 +56,8 @@
#include <machine/fpu.h>
#include <i386/include/specialreg.h>
MALLOC_DECLARE(M_PMAP);
void ia64_ap_startup(void);
extern vm_offset_t vhpt_base, vhpt_size;
@ -69,36 +71,37 @@ extern u_int64_t ia64_lapic_address;
int mp_ipi_test = 0;
/* Variables used by os_boot_rendez */
volatile vm_offset_t ap_stack;
volatile struct pcpu *ap_pcpu;
void *ap_stack;
struct pcpu *ap_pcpu;
volatile int ap_delay;
volatile int ap_awake;
volatile int ap_spin;
static void ipi_send(u_int64_t, int);
static void cpu_mp_unleash(void *);
void
ia64_ap_startup(void)
{
ap_awake = 1;
ap_delay = 0;
__asm __volatile("mov cr.pta=%0;; srlz.i;;" ::
"r" (vhpt_base + (1<<8) + (vhpt_size<<2) + 1));
pcpup = ap_pcpu;
ia64_set_k4((intptr_t)pcpup);
map_pal_code();
map_port_space();
map_gateway_page();
ia64_set_fpsr(IA64_FPSR_DEFAULT);
/*
* Set ia32 control registers.
*/
ia64_set_cflg(CR0_PE | CR0_PG | ((long)(CR4_XMM|CR4_FXSR) << 32));
ap_awake = 1;
ap_delay = 0;
/* Wait until it's time for us to be unleashed */
while (ap_spin)
/* spin */;
__asm __volatile("ssm psr.ic|psr.i;; srlz.i;;");
__asm __volatile("ssm psr.i;; srlz.d;;");
/*
* Get and save the CPU specific MCA records. Should we get the
@ -122,8 +125,8 @@ ia64_ap_startup(void)
ia64_set_itm(ia64_get_itc() + itm_reload);
ia64_set_itv(CLOCK_VECTOR);
ia64_set_tpr(0);
cpu_throw();
panic("ia64_ap_startup: cpu_throw() returned");
cpu_throw(NULL, choosethread());
/* NOTREACHED */
}
int
@ -213,22 +216,8 @@ cpu_mp_start()
pc->pc_current_pmap = kernel_pmap;
pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
if (pc->pc_cpuid > 0) {
void *ks;
/*
* Use contigmalloc for stack so that we can
* use a region 7 address for it which makes
* it impossible to accidentally lose when
* recording a trapframe.
*/
ks = contigmalloc(KSTACK_PAGES * PAGE_SIZE, M_TEMP,
M_WAITOK,
0ul,
256*1024*1024 - 1,
PAGE_SIZE,
256*1024*1024);
ap_stack = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)ks));
ap_stack = malloc(KSTACK_PAGES * PAGE_SIZE, M_PMAP,
M_WAITOK);
ap_pcpu = pc;
ap_delay = 2000;
ap_awake = 0;
@ -344,7 +333,7 @@ ipi_self(int ipi)
* cr.lid (CR64) contents of the target processor. Only the id and eid
* fields are used here.
*/
static void
void
ipi_send(u_int64_t lid, int ipi)
{
volatile u_int64_t *pipi;

View File

@ -121,9 +121,13 @@
#include <sys/user.h>
#include <machine/cpu.h>
#include <machine/pal.h>
#include <machine/md_var.h>
/* XXX move to a header. */
extern u_int64_t ia64_gateway_page[];
MALLOC_DEFINE(M_PMAP, "PMAP", "PMAP Structures");
#ifndef KSTACK_MAX_PAGES
@ -201,7 +205,7 @@ vm_offset_t vhpt_base, vhpt_size;
* ia64_lptes. This gives us up to 2Gb of kernel virtual space.
*/
static int nkpt;
static struct ia64_lpte **kptdir;
struct ia64_lpte **ia64_kptdir;
#define KPTE_DIR_INDEX(va) \
((va >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1))
#define KPTE_PTE_INDEX(va) \
@ -369,12 +373,13 @@ pmap_bootstrap()
/*
* Allocate some memory for initial kernel 'page tables'.
*/
kptdir = (struct ia64_lpte **) pmap_steal_memory(PAGE_SIZE);
ia64_kptdir = (void *)pmap_steal_memory(PAGE_SIZE);
for (i = 0; i < NKPT; i++) {
kptdir[i] = (struct ia64_lpte *) pmap_steal_memory(PAGE_SIZE);
ia64_kptdir[i] = (void*)pmap_steal_memory(PAGE_SIZE);
}
nkpt = NKPT;
kernel_vm_end = NKPT * PAGE_SIZE * NKPTEPG + VM_MIN_KERNEL_ADDRESS;
kernel_vm_end = NKPT * PAGE_SIZE * NKPTEPG + VM_MIN_KERNEL_ADDRESS -
VM_GATEWAY_SIZE;
avail_start = phys_avail[0];
for (i = 0; phys_avail[i+2]; i+= 2) ;
@ -497,6 +502,8 @@ pmap_bootstrap()
* Clear out any random TLB entries left over from booting.
*/
pmap_invalidate_all(kernel_pmap);
map_gateway_page();
}
void *
@ -754,27 +761,14 @@ pmap_track_modified(vm_offset_t va)
void
pmap_new_thread(struct thread *td, int pages)
{
vm_offset_t *ks;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* Use contigmalloc for user area so that we can use a region
* 7 address for it which makes it impossible to accidentally
* lose when recording a trapframe.
*/
ks = contigmalloc(pages * PAGE_SIZE, M_PMAP, M_WAITOK, 0ul,
256*1024*1024 - 1, PAGE_SIZE, 256*1024*1024);
if (ks == NULL)
panic("pmap_new_thread: could not contigmalloc %d pages\n",
pages);
td->td_md.md_kstackvirt = ks;
td->td_kstack = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)ks));
td->td_kstack = (vm_offset_t)malloc(pages * PAGE_SIZE, M_PMAP,
M_WAITOK);
td->td_kstack_pages = pages;
}
@ -785,12 +779,10 @@ pmap_new_thread(struct thread *td, int pages)
void
pmap_dispose_thread(struct thread *td)
{
int pages;
pages = td->td_kstack_pages;
contigfree(td->td_md.md_kstackvirt, pages * PAGE_SIZE, M_PMAP);
td->td_md.md_kstackvirt = NULL;
free((void*)td->td_kstack, M_PMAP);
td->td_kstack = 0;
td->td_kstack_pages = 0;
}
/*
@ -800,16 +792,9 @@ void
pmap_new_altkstack(struct thread *td, int pages)
{
/*
* Shuffle the original stack. Save the virtual kstack address
* instead of the physical address because 1) we can derive the
* physical address from the virtual address and 2) we need the
* virtual address in pmap_dispose_thread.
*/
td->td_altkstack = td->td_kstack;
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack = (vm_offset_t)td->td_md.md_kstackvirt;
td->td_altkstack_pages = td->td_kstack_pages;
pmap_new_thread(td, pages);
}
@ -818,13 +803,7 @@ pmap_dispose_altkstack(struct thread *td)
{
pmap_dispose_thread(td);
/*
* Restore the original kstack. Note that td_altkstack holds the
* virtual kstack address of the previous kstack.
*/
td->td_md.md_kstackvirt = (void*)td->td_altkstack;
td->td_kstack = IA64_PHYS_TO_RR7(ia64_tpa(td->td_altkstack));
td->td_kstack = td->td_altkstack;
td->td_kstack_obj = td->td_altkstack_obj;
td->td_kstack_pages = td->td_altkstack_pages;
td->td_altkstack = 0;
@ -938,7 +917,7 @@ pmap_growkernel(vm_offset_t addr)
ptepage = (struct ia64_lpte *)
IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(nkpg));
bzero(ptepage, PAGE_SIZE);
kptdir[KPTE_DIR_INDEX(kernel_vm_end)] = ptepage;
ia64_kptdir[KPTE_DIR_INDEX(kernel_vm_end)] = ptepage;
nkpt++;
kernel_vm_end += PAGE_SIZE * NKPTEPG;
@ -1210,7 +1189,7 @@ pmap_find_kpte(vm_offset_t va)
("kernel mapping 0x%lx not in region 5", va));
KASSERT(IA64_RR_MASK(va) < (nkpt * PAGE_SIZE * NKPTEPG),
("kernel mapping 0x%lx out of range", va));
return &kptdir[KPTE_DIR_INDEX(va)][KPTE_PTE_INDEX(va)];
return (&ia64_kptdir[KPTE_DIR_INDEX(va)][KPTE_PTE_INDEX(va)]);
}
/*
@ -1355,6 +1334,7 @@ vm_paddr_t
pmap_kextract(vm_offset_t va)
{
struct ia64_lpte *pte;
vm_offset_t gwpage;
KASSERT(va >= IA64_RR_BASE(5), ("Must be kernel VA"));
@ -1362,6 +1342,11 @@ pmap_kextract(vm_offset_t va)
if (va >= IA64_RR_BASE(6))
return (IA64_RR_MASK(va));
/* EPC gateway page? */
gwpage = (vm_offset_t)ia64_get_k5();
if (va >= gwpage && va < gwpage + VM_GATEWAY_SIZE)
return (IA64_RR_MASK((vm_offset_t)ia64_gateway_page));
/* Bail out if the virtual address is beyond our limits. */
if (IA64_RR_MASK(va) >= nkpt * PAGE_SIZE * NKPTEPG)
return (0);

View File

@ -69,7 +69,6 @@
* ar.bsp tranlated to new mode
*/
ENTRY(ia64_change_mode, 0)
rsm psr.i | psr.ic
mov r19=ar.rsc // save rsc while we change mode
tbit.nz p6,p7=r14,17 // physical or virtual ?
@ -106,7 +105,6 @@ ENTRY(ia64_change_mode, 0)
2: mov ar.rsc=r19 // restore ar.rsc
br.ret.sptk.few rp // now in new mode
END(ia64_change_mode)
/*
@ -121,7 +119,6 @@ END(ia64_change_mode)
* psr.i cleared
*/
ENTRY(ia64_physical_mode, 0)
mov r14=psr
mov ret0=psr
movl r15=(IA64_PSR_I|IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFL|IA64_PSR_DFH)
@ -133,7 +130,6 @@ ENTRY(ia64_physical_mode, 0)
or ret0=ret0,r16 // make sure BN=1
br.cond.sptk.many ia64_change_mode
END(ia64_physical_mode)
/*
@ -148,7 +144,6 @@ END(ia64_physical_mode)
*
*/
ENTRY(ia64_call_efi_physical, 6)
.prologue
.regstk 6,4,5,0
.save ar.pfs,loc0
@ -183,7 +178,6 @@ ENTRY(ia64_call_efi_physical, 6)
mov ar.pfs=loc0
;;
br.ret.sptk.many rp
END(ia64_call_efi_physical)
/**************************************************************************/
@ -194,8 +188,7 @@ END(ia64_call_efi_physical)
ENTRY(suword64, 2)
XENTRY(suword)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -218,12 +211,10 @@ XENTRY(suword)
mov ret0=r0
br.ret.sptk.few rp
END(suword64)
ENTRY(suword32, 2)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -246,12 +237,10 @@ ENTRY(suword32, 2)
mov ret0=r0
br.ret.sptk.few rp
END(suword32)
ENTRY(subyte, 2)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -274,13 +263,11 @@ ENTRY(subyte, 2)
mov ret0=r0
br.ret.sptk.few rp
END(subyte)
ENTRY(fuword64, 1)
XENTRY(fuword)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -302,12 +289,10 @@ XENTRY(fuword)
st8 [r15]=r0 // clean up
br.ret.sptk.few rp
END(fuword64)
ENTRY(fuword32, 1)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -329,12 +314,10 @@ ENTRY(fuword32, 1)
st8 [r15]=r0 // clean up
br.ret.sptk.few rp
END(fuword32)
ENTRY(fubyte, 1)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -356,7 +339,6 @@ ENTRY(fubyte, 1)
st8 [r15]=r0 // clean up
br.ret.sptk.few rp
END(fubyte)
ENTRY(fusufault, 0)
@ -410,7 +392,6 @@ ENTRY(copystr, 4)
4: mov ret0=0 // return 0.
br.ret.sptk.few rp
END(copystr)
ENTRY(copyinstr, 4)
@ -422,7 +403,7 @@ ENTRY(copyinstr, 4)
mov loc1=rp
.body
movl loc2=VM_MAXUSER_ADDRESS // make sure that src addr
movl loc2=VM_MAX_ADDRESS // make sure that src addr
;;
cmp.geu p6,p0=in0,loc2 // is in user space.
;;
@ -450,54 +431,12 @@ ENTRY(copyinstr, 4)
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from copystr
END(copyinstr)
ENTRY(copyoutstr, 4)
.prologue
.regstk 4, 3, 4, 0
.save ar.pfs,loc0
alloc loc0=ar.pfs,4,3,4,0
.save rp,loc1
mov loc1=rp
.body
movl loc2=VM_MAXUSER_ADDRESS // make sure that dest addr
;;
cmp.geu p6,p0=in1,loc2 // is in user space.
;;
(p6) br.cond.spnt.few copyerr // if it's not, error out.
movl r14=copyerr // set up fault handler.
add r15=PC_CURTHREAD,r13 // find curthread
;;
ld8 r15=[r15]
;;
add r15=TD_PCB,r15 // find pcb
;;
ld8 r15=[r15]
;;
add loc2=PCB_ONFAULT,r15
;;
st8 [loc2]=r14
;;
mov out0=in0
mov out1=in1
mov out2=in2
mov out3=in3
;;
br.call.sptk.few rp=copystr // do the copy.
st8 [loc2]=r0 // kill the fault handler.
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from copystr
END(copyoutstr)
/*
* Not the fastest bcopy in the world.
*/
ENTRY(bcopy, 3)
mov ret0=r0 // return zero for copy{in,out}
;;
cmp.le p6,p0=in2,r0 // bail if len <= 0
@ -559,29 +498,25 @@ ENTRY(bcopy, 3)
(p6) br.cond.spnt.few 6b
br.ret.sptk.few rp
END(bcopy)
ENTRY(memcpy,3)
mov r14=in0 ;;
mov in0=in1 ;;
mov in1=r14
br.cond.sptk.few bcopy
END(memcpy)
ENTRY(copyin, 3)
.prologue
.regstk 4, 3, 4, 0
.regstk 3, 3, 3, 0
.save ar.pfs,loc0
alloc loc0=ar.pfs,4,3,4,0
alloc loc0=ar.pfs,3,3,3,0
.save rp,loc1
mov loc1=rp
.body
movl loc2=VM_MAXUSER_ADDRESS // make sure that src addr
movl loc2=VM_MAX_ADDRESS // make sure that src addr
;;
cmp.geu p6,p0=in0,loc2 // is in user space.
;;
@ -608,20 +543,18 @@ ENTRY(copyin, 3)
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from bcopy
END(copyin)
ENTRY(copyout, 3)
.prologue
.regstk 4, 3, 4, 0
.regstk 3, 3, 3, 0
.save ar.pfs,loc0
alloc loc0=ar.pfs,4,3,4,0
alloc loc0=ar.pfs,3,3,3,0
.save rp,loc1
mov loc1=rp
.body
movl loc2=VM_MAXUSER_ADDRESS // make sure that dest addr
movl loc2=VM_MAX_ADDRESS // make sure that dest addr
;;
cmp.geu p6,p0=in1,loc2 // is in user space.
;;
@ -648,11 +581,9 @@ ENTRY(copyout, 3)
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from bcopy
END(copyout)
ENTRY(copyerr, 0)
add r14=PC_CURTHREAD,r13 ;; // find curthread
ld8 r14=[r14] ;;
add r14=TD_PCB,r14 ;; // curthread->td_addr
@ -662,5 +593,4 @@ ENTRY(copyerr, 0)
mov ret0=EFAULT // return EFAULT
br.ret.sptk.few rp
END(copyerr)

View File

@ -69,7 +69,6 @@
* ar.bsp tranlated to new mode
*/
ENTRY(ia64_change_mode, 0)
rsm psr.i | psr.ic
mov r19=ar.rsc // save rsc while we change mode
tbit.nz p6,p7=r14,17 // physical or virtual ?
@ -106,7 +105,6 @@ ENTRY(ia64_change_mode, 0)
2: mov ar.rsc=r19 // restore ar.rsc
br.ret.sptk.few rp // now in new mode
END(ia64_change_mode)
/*
@ -121,7 +119,6 @@ END(ia64_change_mode)
* psr.i cleared
*/
ENTRY(ia64_physical_mode, 0)
mov r14=psr
mov ret0=psr
movl r15=(IA64_PSR_I|IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFL|IA64_PSR_DFH)
@ -133,7 +130,6 @@ ENTRY(ia64_physical_mode, 0)
or ret0=ret0,r16 // make sure BN=1
br.cond.sptk.many ia64_change_mode
END(ia64_physical_mode)
/*
@ -148,7 +144,6 @@ END(ia64_physical_mode)
*
*/
ENTRY(ia64_call_efi_physical, 6)
.prologue
.regstk 6,4,5,0
.save ar.pfs,loc0
@ -183,7 +178,6 @@ ENTRY(ia64_call_efi_physical, 6)
mov ar.pfs=loc0
;;
br.ret.sptk.many rp
END(ia64_call_efi_physical)
/**************************************************************************/
@ -194,8 +188,7 @@ END(ia64_call_efi_physical)
ENTRY(suword64, 2)
XENTRY(suword)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -218,12 +211,10 @@ XENTRY(suword)
mov ret0=r0
br.ret.sptk.few rp
END(suword64)
ENTRY(suword32, 2)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -246,12 +237,10 @@ ENTRY(suword32, 2)
mov ret0=r0
br.ret.sptk.few rp
END(suword32)
ENTRY(subyte, 2)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -274,13 +263,11 @@ ENTRY(subyte, 2)
mov ret0=r0
br.ret.sptk.few rp
END(subyte)
ENTRY(fuword64, 1)
XENTRY(fuword)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -302,12 +289,10 @@ XENTRY(fuword)
st8 [r15]=r0 // clean up
br.ret.sptk.few rp
END(fuword64)
ENTRY(fuword32, 1)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -329,12 +314,10 @@ ENTRY(fuword32, 1)
st8 [r15]=r0 // clean up
br.ret.sptk.few rp
END(fuword32)
ENTRY(fubyte, 1)
movl r14=VM_MAXUSER_ADDRESS;; // make sure address is ok
movl r14=VM_MAX_ADDRESS;; // make sure address is ok
cmp.geu p6,p0=in0,r14
(p6) br.dpnt.few fusufault
@ -356,7 +339,6 @@ ENTRY(fubyte, 1)
st8 [r15]=r0 // clean up
br.ret.sptk.few rp
END(fubyte)
ENTRY(fusufault, 0)
@ -410,7 +392,6 @@ ENTRY(copystr, 4)
4: mov ret0=0 // return 0.
br.ret.sptk.few rp
END(copystr)
ENTRY(copyinstr, 4)
@ -422,7 +403,7 @@ ENTRY(copyinstr, 4)
mov loc1=rp
.body
movl loc2=VM_MAXUSER_ADDRESS // make sure that src addr
movl loc2=VM_MAX_ADDRESS // make sure that src addr
;;
cmp.geu p6,p0=in0,loc2 // is in user space.
;;
@ -450,54 +431,12 @@ ENTRY(copyinstr, 4)
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from copystr
END(copyinstr)
ENTRY(copyoutstr, 4)
.prologue
.regstk 4, 3, 4, 0
.save ar.pfs,loc0
alloc loc0=ar.pfs,4,3,4,0
.save rp,loc1
mov loc1=rp
.body
movl loc2=VM_MAXUSER_ADDRESS // make sure that dest addr
;;
cmp.geu p6,p0=in1,loc2 // is in user space.
;;
(p6) br.cond.spnt.few copyerr // if it's not, error out.
movl r14=copyerr // set up fault handler.
add r15=PC_CURTHREAD,r13 // find curthread
;;
ld8 r15=[r15]
;;
add r15=TD_PCB,r15 // find pcb
;;
ld8 r15=[r15]
;;
add loc2=PCB_ONFAULT,r15
;;
st8 [loc2]=r14
;;
mov out0=in0
mov out1=in1
mov out2=in2
mov out3=in3
;;
br.call.sptk.few rp=copystr // do the copy.
st8 [loc2]=r0 // kill the fault handler.
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from copystr
END(copyoutstr)
/*
* Not the fastest bcopy in the world.
*/
ENTRY(bcopy, 3)
mov ret0=r0 // return zero for copy{in,out}
;;
cmp.le p6,p0=in2,r0 // bail if len <= 0
@ -559,29 +498,25 @@ ENTRY(bcopy, 3)
(p6) br.cond.spnt.few 6b
br.ret.sptk.few rp
END(bcopy)
ENTRY(memcpy,3)
mov r14=in0 ;;
mov in0=in1 ;;
mov in1=r14
br.cond.sptk.few bcopy
END(memcpy)
ENTRY(copyin, 3)
.prologue
.regstk 4, 3, 4, 0
.regstk 3, 3, 3, 0
.save ar.pfs,loc0
alloc loc0=ar.pfs,4,3,4,0
alloc loc0=ar.pfs,3,3,3,0
.save rp,loc1
mov loc1=rp
.body
movl loc2=VM_MAXUSER_ADDRESS // make sure that src addr
movl loc2=VM_MAX_ADDRESS // make sure that src addr
;;
cmp.geu p6,p0=in0,loc2 // is in user space.
;;
@ -608,20 +543,18 @@ ENTRY(copyin, 3)
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from bcopy
END(copyin)
ENTRY(copyout, 3)
.prologue
.regstk 4, 3, 4, 0
.regstk 3, 3, 3, 0
.save ar.pfs,loc0
alloc loc0=ar.pfs,4,3,4,0
alloc loc0=ar.pfs,3,3,3,0
.save rp,loc1
mov loc1=rp
.body
movl loc2=VM_MAXUSER_ADDRESS // make sure that dest addr
movl loc2=VM_MAX_ADDRESS // make sure that dest addr
;;
cmp.geu p6,p0=in1,loc2 // is in user space.
;;
@ -648,11 +581,9 @@ ENTRY(copyout, 3)
mov ar.pfs=loc0 // restore ar.pfs
mov rp=loc1 // restore ra.
br.ret.sptk.few rp // ret0 left over from bcopy
END(copyout)
ENTRY(copyerr, 0)
add r14=PC_CURTHREAD,r13 ;; // find curthread
ld8 r14=[r14] ;;
add r14=TD_PCB,r14 ;; // curthread->td_addr
@ -662,5 +593,4 @@ ENTRY(copyerr, 0)
mov ret0=EFAULT // return EFAULT
br.ret.sptk.few rp
END(copyerr)

View File

@ -46,6 +46,7 @@
#include <sys/sysent.h>
#include <sys/syscall.h>
#include <sys/pioctl.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
@ -61,6 +62,9 @@
#include <machine/pal.h>
#include <machine/fpu.h>
#include <machine/efi.h>
#ifdef SMP
#include <machine/smp.h>
#endif
#ifdef KTRACE
#include <sys/uio.h>
@ -71,7 +75,13 @@
#include <ddb/ddb.h>
#endif
static int print_usertrap = 0;
SYSCTL_INT(_machdep, CPU_UNALIGNED_PRINT, print_usertrap,
CTLFLAG_RW, &print_usertrap, 0, "");
extern int unaligned_fixup(struct trapframe *framep, struct thread *td);
static void break_syscall(struct trapframe *tf);
static void ia32_syscall(struct trapframe *framep);
/*
@ -93,12 +103,12 @@ typedef struct {
* these there. The rest of the registers are "live"
*/
typedef struct {
u_int64_t bitmask_low64; /* f63 - f2 */
u_int64_t bitmask_high64; /* f127 - f64 */
struct ia64_fpreg *fp_low_preserved; /* f2 - f5 */
struct ia64_fpreg *fp_low_volatile; /* f6 - f15 */
struct ia64_fpreg *fp_high_preserved; /* f16 - f31 */
struct ia64_fpreg *fp_high_volatile; /* f32 - f127 */
u_int64_t bitmask_low64; /* f63 - f2 */
u_int64_t bitmask_high64; /* f127 - f64 */
struct _ia64_fpreg *fp_low_preserved; /* f2 - f5 */
struct _ia64_fpreg *fp_low_volatile; /* f6 - f15 */
struct _ia64_fpreg *fp_high_preserved; /* f16 - f31 */
struct _ia64_fpreg *fp_high_volatile; /* f32 - f127 */
} FP_STATE;
#ifdef WITNESS
@ -271,7 +281,7 @@ static void printisr(u_int64_t isr)
}
static void
printtrap(int vector, int imm, struct trapframe *framep, int isfatal, int user)
printtrap(int vector, struct trapframe *framep, int isfatal, int user)
{
printf("\n");
printf("%s %s trap (cpu %d):\n", isfatal? "fatal" : "handled",
@ -279,16 +289,15 @@ printtrap(int vector, int imm, struct trapframe *framep, int isfatal, int user)
printf("\n");
printf(" trap vector = 0x%x (%s)\n",
vector, ia64_vector_names[vector]);
printf(" cr.iip = 0x%lx\n", framep->tf_cr_iip);
printf(" cr.ipsr = 0x%lx (", framep->tf_cr_ipsr);
printpsr(framep->tf_cr_ipsr);
printf(" cr.iip = 0x%lx\n", framep->tf_special.iip);
printf(" cr.ipsr = 0x%lx (", framep->tf_special.psr);
printpsr(framep->tf_special.psr);
printf(")\n");
printf(" cr.isr = 0x%lx (", framep->tf_cr_isr);
printisr(framep->tf_cr_isr);
printf(" cr.isr = 0x%lx (", framep->tf_special.isr);
printisr(framep->tf_special.isr);
printf(")\n");
printf(" cr.ifa = 0x%lx\n", framep->tf_cr_ifa);
printf(" cr.iim = 0x%x\n", imm);
if (framep->tf_cr_ipsr & IA64_PSR_IS) {
printf(" cr.ifa = 0x%lx\n", framep->tf_special.ifa);
if (framep->tf_special.psr & IA64_PSR_IS) {
printf(" ar.cflg = 0x%lx\n", ia64_get_cflg());
printf(" ar.csd = 0x%lx\n", ia64_get_csd());
printf(" ar.ssd = 0x%lx\n", ia64_get_ssd());
@ -300,35 +309,58 @@ printtrap(int vector, int imm, struct trapframe *framep, int isfatal, int user)
printf("\n");
}
/*
*
*/
int
do_ast(struct trapframe *tf)
{
disable_intr();
while (curthread->td_flags & (TDF_ASTPENDING|TDF_NEEDRESCHED)) {
enable_intr();
ast(tf);
disable_intr();
}
/*
* Keep interrupts disabled. We return r10 as a favor to the EPC
* syscall code so that it can quicky determine if the syscall
* needs to be restarted or not.
*/
return (tf->tf_scratch.gr10);
}
/*
* Trap is called from exception.s to handle most types of processor traps.
* System calls are broken out for efficiency and ASTs are broken out
* to make the code a bit cleaner and more representative of the
* architecture.
*/
/*ARGSUSED*/
void
trap(int vector, int imm, struct trapframe *framep)
trap(int vector, struct trapframe *framep)
{
struct thread *td;
struct proc *p;
int i;
struct thread *td;
u_int64_t ucode;
int i, user;
u_int sticks;
int user;
cnt.v_trap++;
user = ((framep->tf_special.psr & IA64_PSR_CPL) == IA64_PSR_CPL_USER);
/* Short-circuit break instruction based system calls. */
if (vector == IA64_VEC_BREAK && user &&
framep->tf_special.ifa == 0x100000) {
break_syscall(framep);
return;
}
/* Sanitize the FP state in case the user has trashed it. */
ia64_set_fpsr(IA64_FPSR_DEFAULT);
atomic_add_int(&cnt.v_trap, 1);
td = curthread;
p = td->td_proc;
ucode = 0;
/*
* Make sure we have a sane floating-point state in case the
* user has trashed it.
*/
ia64_set_fpsr(IA64_FPSR_DEFAULT);
user = ((framep->tf_cr_ipsr & IA64_PSR_CPL) == IA64_PSR_CPL_USER);
if (user) {
sticks = td->td_sticks;
td->td_frame = framep;
@ -341,19 +373,18 @@ trap(int vector, int imm, struct trapframe *framep)
}
switch (vector) {
case IA64_VEC_UNALIGNED_REFERENCE:
case IA64_VEC_UNALIGNED_REFERENCE: {
/*
* If user-land, do whatever fixups, printing, and
* signalling is appropriate (based on system-wide
* and per-process unaligned-access-handling flags).
*/
if (user) {
mtx_lock(&Giant);
i = unaligned_fixup(framep, td);
mtx_unlock(&Giant);
if (i == 0)
goto out;
ucode = framep->tf_cr_ifa; /* VA */
ucode = framep->tf_special.ifa; /* VA */
break;
}
@ -369,9 +400,9 @@ trap(int vector, int imm, struct trapframe *framep)
* does cause an unaligned access it's a kernel bug.
*/
goto dopanic;
}
case IA64_VEC_FLOATING_POINT_FAULT:
{
case IA64_VEC_FLOATING_POINT_FAULT: {
FP_STATE fp_state;
FPSWA_RET fpswa_ret;
FPSWA_BUNDLE bundle;
@ -385,7 +416,7 @@ trap(int vector, int imm, struct trapframe *framep)
break;
}
mtx_lock(&Giant);
i = copyin((const void *)(framep->tf_cr_iip), &bundle, 16);
i = copyin((void *)(framep->tf_special.iip), &bundle, 16);
mtx_unlock(&Giant);
if (i) {
i = SIGBUS; /* EFAULT, basically */
@ -396,28 +427,28 @@ trap(int vector, int imm, struct trapframe *framep)
fp_state.bitmask_low64 = 0xffc0; /* bits 6 - 15 */
fp_state.bitmask_high64 = 0x0;
fp_state.fp_low_preserved = NULL;
fp_state.fp_low_volatile = framep->tf_f;
fp_state.fp_low_volatile = &framep->tf_scratch_fp.fr6;
fp_state.fp_high_preserved = NULL;
fp_state.fp_high_volatile = NULL;
/* The docs are unclear. Is Fpswa reentrant? */
fpswa_ret = fpswa_interface->Fpswa(1, &bundle,
&framep->tf_cr_ipsr, &framep->tf_ar_fpsr,
&framep->tf_cr_isr, &framep->tf_pr,
&framep->tf_cr_ifs, &fp_state);
&framep->tf_special.psr, &framep->tf_special.fpsr,
&framep->tf_special.isr, &framep->tf_special.pr,
&framep->tf_special.cfm, &fp_state);
if (fpswa_ret.status == 0) {
/* fixed. update ipsr and iip to next insn */
int ei;
ei = (framep->tf_cr_isr >> 41) & 0x03;
ei = (framep->tf_special.isr >> 41) & 0x03;
if (ei == 0) { /* no template for this case */
framep->tf_cr_ipsr &= ~IA64_ISR_EI;
framep->tf_cr_ipsr |= IA64_ISR_EI_1;
framep->tf_special.psr &= ~IA64_ISR_EI;
framep->tf_special.psr |= IA64_ISR_EI_1;
} else if (ei == 1) { /* MFI or MFB */
framep->tf_cr_ipsr &= ~IA64_ISR_EI;
framep->tf_cr_ipsr |= IA64_ISR_EI_2;
framep->tf_special.psr &= ~IA64_ISR_EI;
framep->tf_special.psr |= IA64_ISR_EI_2;
} else if (ei == 2) { /* MMF */
framep->tf_cr_ipsr &= ~IA64_ISR_EI;
framep->tf_cr_iip += 0x10;
framep->tf_special.psr &= ~IA64_ISR_EI;
framep->tf_special.iip += 0x10;
}
goto out;
} else if (fpswa_ret.status == -1) {
@ -446,8 +477,7 @@ trap(int vector, int imm, struct trapframe *framep)
}
}
case IA64_VEC_FLOATING_POINT_TRAP:
{
case IA64_VEC_FLOATING_POINT_TRAP: {
FP_STATE fp_state;
FPSWA_RET fpswa_ret;
FPSWA_BUNDLE bundle;
@ -461,7 +491,7 @@ trap(int vector, int imm, struct trapframe *framep)
break;
}
mtx_lock(&Giant);
i = copyin((const void *)(framep->tf_cr_iip), &bundle, 16);
i = copyin((void *)(framep->tf_special.iip), &bundle, 16);
mtx_unlock(&Giant);
if (i) {
i = SIGBUS; /* EFAULT, basically */
@ -472,14 +502,14 @@ trap(int vector, int imm, struct trapframe *framep)
fp_state.bitmask_low64 = 0xffc0; /* bits 6 - 15 */
fp_state.bitmask_high64 = 0x0;
fp_state.fp_low_preserved = NULL;
fp_state.fp_low_volatile = framep->tf_f;
fp_state.fp_low_volatile = &framep->tf_scratch_fp.fr6;
fp_state.fp_high_preserved = NULL;
fp_state.fp_high_volatile = NULL;
/* The docs are unclear. Is Fpswa reentrant? */
fpswa_ret = fpswa_interface->Fpswa(0, &bundle,
&framep->tf_cr_ipsr, &framep->tf_ar_fpsr,
&framep->tf_cr_isr, &framep->tf_pr,
&framep->tf_cr_ifs, &fp_state);
&framep->tf_special.psr, &framep->tf_special.fpsr,
&framep->tf_special.isr, &framep->tf_special.pr,
&framep->tf_special.cfm, &fp_state);
if (fpswa_ret.status == 0) {
/* fixed */
/*
@ -501,24 +531,97 @@ trap(int vector, int imm, struct trapframe *framep)
}
}
case IA64_VEC_DISABLED_FP:
/*
* on exit from the kernel, if thread == fpcurthread,
* FP is enabled.
*/
if (PCPU_GET(fpcurthread) == td) {
printf("trap: fp disabled for fpcurthread == %p", td);
case IA64_VEC_DISABLED_FP: { /* High FP registers are disabled. */
struct pcpu *pcpu;
struct pcb *pcb;
struct thread *thr;
/* Always fatal in kernel. Should never happen. */
if (!user)
goto dopanic;
pcb = td->td_pcb;
pcpu = pcb->pcb_fpcpu;
#if 0
printf("XXX: td %p: highfp on cpu %p\n", td, pcpu);
#endif
/*
* The pcpu variable holds the address of the per-CPU
* structure of the CPU currently holding this threads
* high FP registers (or NULL if no CPU holds these
* registers). We have to interrupt that CPU and wait
* for it to have saved the registers.
*/
if (pcpu != NULL) {
thr = pcpu->pc_fpcurthread;
KASSERT(thr == td, ("High FP state out of sync"));
if (pcpu == pcpup) {
/*
* Short-circuit handling the trap when this
* CPU already holds the high FP registers for
* this thread. We really shouldn't get the
* trap in the first place, but since it's
* only a performance issue and not a
* correctness issue, we emit a message for
* now, enable the high FP registers and
* return.
*/
printf("XXX: bogusly disabled high FP regs\n");
framep->tf_special.psr &= ~IA64_PSR_DFH;
goto out;
}
#ifdef SMP
/*
* Interrupt the other CPU so that it saves the high
* FP registers of this thread. Note that this can
* only happen for the SMP case.
*/
ipi_send(pcpu->pc_lid, IPI_HIGH_FP);
#endif
#ifdef DIAGNOSTICS
} else {
KASSERT(PCPU_GET(fpcurthread) != td,
("High FP state out of sync"));
#endif
}
ia64_fpstate_switch(td);
thr = PCPU_GET(fpcurthread);
#if 0
printf("XXX: cpu %p: highfp belongs to td %p\n", pcpup, thr);
#endif
/*
* The thr variable holds the thread that owns the high FP
* registers currently on this CPU. Free this CPU so that
* we can load the current threads high FP registers.
*/
if (thr != NULL) {
KASSERT(thr != td, ("High FP state out of sync"));
pcb = thr->td_pcb;
KASSERT(pcb->pcb_fpcpu == pcpup,
("High FP state out of sync"));
ia64_highfp_save(thr);
}
/*
* Wait for the other CPU to have saved out high FP
* registers (if applicable).
*/
while (pcpu && pcpu->pc_fpcurthread == td);
ia64_highfp_load(td);
framep->tf_special.psr &= ~IA64_PSR_DFH;
goto out;
break;
}
case IA64_VEC_PAGE_NOT_PRESENT:
case IA64_VEC_INST_ACCESS_RIGHTS:
case IA64_VEC_DATA_ACCESS_RIGHTS:
{
case IA64_VEC_DATA_ACCESS_RIGHTS: {
vm_offset_t va;
struct vmspace *vm;
vm_map_t map;
@ -526,7 +629,7 @@ trap(int vector, int imm, struct trapframe *framep)
int rv;
rv = 0;
va = framep->tf_cr_ifa;
va = framep->tf_special.ifa;
/*
* If it was caused by fuswintr or suswintr, just punt. Note
@ -536,8 +639,8 @@ trap(int vector, int imm, struct trapframe *framep)
*/
if (!user && td != NULL && td->td_pcb->pcb_accessaddr == va &&
td->td_pcb->pcb_onfault == (unsigned long)fswintrberr) {
framep->tf_cr_iip = td->td_pcb->pcb_onfault;
framep->tf_cr_ipsr &= ~IA64_PSR_RI;
framep->tf_special.iip = td->td_pcb->pcb_onfault;
framep->tf_special.psr &= ~IA64_PSR_RI;
td->td_pcb->pcb_onfault = 0;
goto out;
}
@ -559,9 +662,9 @@ trap(int vector, int imm, struct trapframe *framep)
map = &vm->vm_map;
}
if (framep->tf_cr_isr & IA64_ISR_X)
if (framep->tf_special.isr & IA64_ISR_X)
ftype = VM_PROT_EXECUTE;
else if (framep->tf_cr_isr & IA64_ISR_W)
else if (framep->tf_special.isr & IA64_ISR_W)
ftype = VM_PROT_WRITE;
else
ftype = VM_PROT_READ;
@ -597,8 +700,9 @@ trap(int vector, int imm, struct trapframe *framep)
if (!user) {
/* Check for copyin/copyout fault. */
if (td != NULL && td->td_pcb->pcb_onfault != 0) {
framep->tf_cr_iip = td->td_pcb->pcb_onfault;
framep->tf_cr_ipsr &= ~IA64_PSR_RI;
framep->tf_special.iip =
td->td_pcb->pcb_onfault;
framep->tf_special.psr &= ~IA64_PSR_RI;
td->td_pcb->pcb_onfault = 0;
goto out;
}
@ -609,10 +713,10 @@ trap(int vector, int imm, struct trapframe *framep)
break;
}
case IA64_VEC_SINGLE_STEP_TRAP:
case IA64_VEC_DEBUG:
case IA64_VEC_TAKEN_BRANCH_TRAP:
case IA64_VEC_BREAK:
case IA64_VEC_DEBUG:
case IA64_VEC_SINGLE_STEP_TRAP:
case IA64_VEC_TAKEN_BRANCH_TRAP: {
/*
* These are always fatal in kernel, and should never happen.
*/
@ -633,27 +737,29 @@ trap(int vector, int imm, struct trapframe *framep)
}
i = SIGTRAP;
break;
}
case IA64_VEC_GENERAL_EXCEPTION:
case IA64_VEC_GENERAL_EXCEPTION: {
if (user) {
ucode = vector;
i = SIGILL;
break;
}
goto dopanic;
}
case IA64_VEC_UNSUPP_DATA_REFERENCE:
case IA64_VEC_LOWER_PRIVILEGE_TRANSFER:
case IA64_VEC_LOWER_PRIVILEGE_TRANSFER: {
if (user) {
ucode = vector;
i = SIGBUS;
break;
}
goto dopanic;
}
case IA64_VEC_IA32_EXCEPTION:
{
u_int64_t isr = framep->tf_cr_isr;
case IA64_VEC_IA32_EXCEPTION: {
u_int64_t isr = framep->tf_special.isr;
switch ((isr >> 16) & 0xffff) {
case IA32_EXCEPTION_DIVIDE:
@ -694,7 +800,7 @@ trap(int vector, int imm, struct trapframe *framep)
break;
case IA32_EXCEPTION_ALIGNMENT_CHECK:
ucode = framep->tf_cr_ifa; /* VA */
ucode = framep->tf_special.ifa; /* VA */
i = SIGBUS;
break;
@ -709,33 +815,36 @@ trap(int vector, int imm, struct trapframe *framep)
break;
}
case IA64_VEC_IA32_INTERRUPT:
case IA64_VEC_IA32_INTERRUPT: {
/*
* INT n instruction - probably a syscall.
*/
if (((framep->tf_cr_isr >> 16) & 0xffff) == 0x80) {
if (((framep->tf_special.isr >> 16) & 0xffff) == 0x80) {
ia32_syscall(framep);
goto out;
} else {
ucode = (framep->tf_cr_isr >> 16) & 0xffff;
ucode = (framep->tf_special.isr >> 16) & 0xffff;
i = SIGILL;
break;
}
}
case IA64_VEC_IA32_INTERCEPT:
case IA64_VEC_IA32_INTERCEPT: {
/*
* Maybe need to emulate ia32 instruction.
*/
goto dopanic;
}
default:
goto dopanic;
}
#ifdef DEBUG
printtrap(vector, imm, framep, 1, user);
#endif
if (print_usertrap)
printtrap(vector, framep, 1, user);
trapsignal(td, i, ucode);
out:
if (user) {
userret(td, framep, sticks);
@ -743,73 +852,96 @@ trap(int vector, int imm, struct trapframe *framep)
#ifdef DIAGNOSTIC
cred_free_thread(td);
#endif
do_ast(framep);
}
return;
dopanic:
printtrap(vector, imm, framep, 1, user);
/* XXX dump registers */
printtrap(vector, framep, 1, user);
#ifdef DDB
kdb_trap(vector, framep);
#endif
panic("trap");
}
/*
* Handle break instruction based system calls.
*/
void
break_syscall(struct trapframe *tf)
{
uint64_t *bsp, *tfp;
uint64_t iip, psr;
int error, nargs;
/* Save address of break instruction. */
iip = tf->tf_special.iip;
psr = tf->tf_special.psr;
/* Advance to the next instruction. */
tf->tf_special.psr += IA64_PSR_RI_1;
if ((tf->tf_special.psr & IA64_PSR_RI) > IA64_PSR_RI_2) {
tf->tf_special.iip += 16;
tf->tf_special.psr &= ~IA64_PSR_RI;
}
/*
* Copy the arguments on the register stack into the trapframe
* to avoid having interleaved NaT collections.
*/
tfp = &tf->tf_scratch.gr16;
nargs = tf->tf_special.cfm & 0x7f;
bsp = (uint64_t*)(curthread->td_kstack + tf->tf_special.ndirty);
bsp -= (((uintptr_t)bsp & 0x1ff) < (nargs << 3)) ? (nargs + 1): nargs;
while (nargs--) {
*tfp++ = *bsp++;
if (((uintptr_t)bsp & 0x1ff) == 0x1f8)
bsp++;
}
error = syscall(tf);
if (error == ERESTART) {
tf->tf_special.iip = iip;
tf->tf_special.psr = psr;
}
do_ast(tf);
}
/*
* Process a system call.
*
* System calls are strange beasts. They are passed the syscall number
* in r15, and the arguments in the registers (as normal). They return
* an error flag in r10 (if r10 != 0 on return, the syscall had an error),
* and the return value (if any) in r8 and r9.
*
* The assembly stub takes care of moving the call number into a register
* we can get to, and moves all of the argument registers into a stack
* buffer. On return, it restores r8-r10 from the frame before
* returning to the user process.
* See syscall.s for details as to how we get here. In order to support
* the ERESTART case, we return the error to our caller. They deal with
* the hairy details.
*/
void
syscall(int code, u_int64_t *args, struct trapframe *framep)
int
syscall(struct trapframe *tf)
{
struct sysent *callp;
struct thread *td;
struct proc *p;
int error = 0;
u_int64_t oldip, oldri;
struct thread *td;
u_int64_t *args;
int code, error;
u_int sticks;
cnt.v_syscall++;
code = tf->tf_scratch.gr15;
args = &tf->tf_scratch.gr16;
atomic_add_int(&cnt.v_syscall, 1);
td = curthread;
p = td->td_proc;
td->td_frame = framep;
td->td_frame = tf;
sticks = td->td_sticks;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
/*
* Skip past the break instruction. Remember old address in case
* we have to restart.
*/
oldip = framep->tf_cr_iip;
oldri = framep->tf_cr_ipsr & IA64_PSR_RI;
framep->tf_cr_ipsr += IA64_PSR_RI_1;
if ((framep->tf_cr_ipsr & IA64_PSR_RI) > IA64_PSR_RI_2) {
framep->tf_cr_ipsr &= ~IA64_PSR_RI;
framep->tf_cr_iip += 16;
}
if (p->p_flag & P_THREADED)
thread_user_enter(p, td);
#ifdef DIAGNOSTIC
ia64_fpstate_check(td);
#endif
if (p->p_sysent->sv_prepsyscall) {
/* (*p->p_sysent->sv_prepsyscall)(framep, args, &code, &params); */
/* (*p->p_sysent->sv_prepsyscall)(tf, args, &code, &params); */
panic("prepsyscall");
} else {
/*
@ -842,39 +974,33 @@ syscall(int code, u_int64_t *args, struct trapframe *framep)
if (KTRPOINT(td, KTR_SYSCALL))
ktrsyscall(code, (callp->sy_narg & SYF_ARGMASK), args);
#endif
if (error == 0) {
td->td_retval[0] = 0;
td->td_retval[1] = 0;
STOPEVENT(p, S_SCE, (callp->sy_narg & SYF_ARGMASK));
td->td_retval[0] = 0;
td->td_retval[1] = 0;
tf->tf_scratch.gr10 = EJUSTRETURN;
error = (*callp->sy_call)(td, args);
}
STOPEVENT(p, S_SCE, (callp->sy_narg & SYF_ARGMASK));
error = (*callp->sy_call)(td, args);
switch (error) {
case 0:
framep->tf_r[FRAME_R8] = td->td_retval[0];
framep->tf_r[FRAME_R9] = td->td_retval[1];
framep->tf_r[FRAME_R10] = 0;
break;
case ERESTART:
framep->tf_cr_iip = oldip;
framep->tf_cr_ipsr =
(framep->tf_cr_ipsr & ~IA64_PSR_RI) | oldri;
break;
case EJUSTRETURN:
break;
default:
if (p->p_sysent->sv_errsize) {
if (error >= p->p_sysent->sv_errsize)
error = -1; /* XXX */
else
if (error != EJUSTRETURN) {
/*
* Save the "raw" error code in r10. We use this to handle
* syscall restarts (see do_ast()).
*/
tf->tf_scratch.gr10 = error;
if (error == 0) {
tf->tf_scratch.gr8 = td->td_retval[0];
tf->tf_scratch.gr9 = td->td_retval[1];
} else if (error != ERESTART) {
if (error < p->p_sysent->sv_errsize)
error = p->p_sysent->sv_errtbl[error];
/*
* Translated error codes are returned in r8. User
* processes use the translated error code.
*/
tf->tf_scratch.gr8 = error;
}
framep->tf_r[FRAME_R8] = error;
framep->tf_r[FRAME_R10] = 1;
break;
}
/*
@ -883,12 +1009,13 @@ syscall(int code, u_int64_t *args, struct trapframe *framep)
if ((callp->sy_narg & SYF_MPSAFE) == 0)
mtx_unlock(&Giant);
userret(td, framep, sticks);
userret(td, tf, sticks);
#ifdef KTRACE
if (KTRPOINT(td, KTR_SYSRET))
ktrsysret(code, error, td->td_retval[0]);
#endif
/*
* This works because errno is findable through the
* register set. If we ever support an emulation where this
@ -899,10 +1026,13 @@ syscall(int code, u_int64_t *args, struct trapframe *framep)
#ifdef DIAGNOSTIC
cred_free_thread(td);
#endif
WITNESS_WARN(WARN_PANIC, NULL, "System call %s returning",
(code >= 0 && code < SYS_MAXSYSCALL) ? syscallnames[code] : "???");
mtx_assert(&sched_lock, MA_NOTOWNED);
mtx_assert(&Giant, MA_NOTOWNED);
return (error);
}
#include <i386/include/psl.h>
@ -933,9 +1063,9 @@ ia32_syscall(struct trapframe *framep)
td->td_frame = framep;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
params = (caddr_t)(framep->tf_r[FRAME_SP] & ((1L<<32)-1))
+ sizeof(u_int32_t);
code = framep->tf_r[FRAME_R8]; /* eax */
params = (caddr_t)(framep->tf_special.sp & ((1L<<32)-1))
+ sizeof(u_int32_t);
code = framep->tf_scratch.gr8; /* eax */
orig_eflags = ia64_get_eflag();
if (p->p_sysent->sv_prepsyscall) {
@ -1001,7 +1131,7 @@ ia32_syscall(struct trapframe *framep)
if (error == 0) {
td->td_retval[0] = 0;
td->td_retval[1] = framep->tf_r[FRAME_R10]; /* edx */
td->td_retval[1] = framep->tf_scratch.gr10; /* edx */
STOPEVENT(p, S_SCE, narg);
@ -1010,8 +1140,8 @@ ia32_syscall(struct trapframe *framep)
switch (error) {
case 0:
framep->tf_r[FRAME_R8] = td->td_retval[0]; /* eax */
framep->tf_r[FRAME_R10] = td->td_retval[1]; /* edx */
framep->tf_scratch.gr8 = td->td_retval[0]; /* eax */
framep->tf_scratch.gr10 = td->td_retval[1]; /* edx */
ia64_set_eflag(ia64_get_eflag() & ~PSL_C);
break;
@ -1020,7 +1150,7 @@ ia32_syscall(struct trapframe *framep)
* Reconstruct pc, assuming lcall $X,y is 7 bytes,
* int 0x80 is 2 bytes. XXX Assume int 0x80.
*/
framep->tf_cr_iip -= 2;
framep->tf_special.iip -= 2;
break;
case EJUSTRETURN:
@ -1033,7 +1163,7 @@ ia32_syscall(struct trapframe *framep)
else
error = p->p_sysent->sv_errtbl[error];
}
framep->tf_r[FRAME_R8] = error;
framep->tf_scratch.gr8 = error;
ia64_set_eflag(ia64_get_eflag() | PSL_C);
break;
}

View File

@ -34,7 +34,6 @@
#include <vm/vm_extern.h>
#include <machine/frame.h>
#include <machine/inst.h>
#include <machine/rse.h>
#define sign_extend(imm, w) (((int64_t)(imm) << (64 - (w))) >> (64 - (w)))
@ -153,16 +152,45 @@ static int
read_register(struct trapframe *framep, struct thread *td,
int reg, u_int64_t *valuep)
{
if (reg == 0) {
*valuep = 0;
return 0;
} else if (reg < 32) {
*valuep = framep->tf_r[reg - 1];
return 0;
if (reg < 32) {
switch (reg) {
case 0: *valuep = 0; break;
case 1: *valuep = framep->tf_special.gp; break;
case 2: *valuep = framep->tf_scratch.gr2; break;
case 3: *valuep = framep->tf_scratch.gr3; break;
case 8: *valuep = framep->tf_scratch.gr8; break;
case 9: *valuep = framep->tf_scratch.gr9; break;
case 10: *valuep = framep->tf_scratch.gr10; break;
case 11: *valuep = framep->tf_scratch.gr11; break;
case 12: *valuep = framep->tf_special.sp; break;
case 13: *valuep = framep->tf_special.tp; break;
case 14: *valuep = framep->tf_scratch.gr14; break;
case 15: *valuep = framep->tf_scratch.gr15; break;
case 16: *valuep = framep->tf_scratch.gr16; break;
case 17: *valuep = framep->tf_scratch.gr17; break;
case 18: *valuep = framep->tf_scratch.gr18; break;
case 19: *valuep = framep->tf_scratch.gr19; break;
case 20: *valuep = framep->tf_scratch.gr20; break;
case 21: *valuep = framep->tf_scratch.gr21; break;
case 22: *valuep = framep->tf_scratch.gr22; break;
case 23: *valuep = framep->tf_scratch.gr23; break;
case 24: *valuep = framep->tf_scratch.gr24; break;
case 25: *valuep = framep->tf_scratch.gr25; break;
case 26: *valuep = framep->tf_scratch.gr26; break;
case 27: *valuep = framep->tf_scratch.gr27; break;
case 28: *valuep = framep->tf_scratch.gr28; break;
case 29: *valuep = framep->tf_scratch.gr29; break;
case 30: *valuep = framep->tf_scratch.gr30; break;
case 31: *valuep = framep->tf_scratch.gr31; break;
default:
return (EINVAL);
}
} else {
u_int64_t cfm = framep->tf_cr_ifs;
u_int64_t *bsp = (u_int64_t *) (td->td_kstack
+ framep->tf_ndirty);
#if 0
u_int64_t cfm = framep->tf_special.cfm;
u_int64_t *bsp = (u_int64_t *)(td->td_kstack +
framep->tf_ndirty);
int sof = cfm & 0x7f;
int sor = 8*((cfm >> 14) & 15);
int rrb_gr = (cfm >> 18) & 0x7f;
@ -182,23 +210,54 @@ read_register(struct trapframe *framep, struct thread *td,
}
*valuep = *ia64_rse_register_address(bsp, reg);
return 0;
return (0);
#else
return (EINVAL);
#endif
}
return EINVAL;
return (0);
}
static int
write_register(struct trapframe *framep, struct thread *td,
int reg, u_int64_t value)
{
if (reg == 0) {
return EINVAL; /* can't happen */
} else if (reg < 32) {
framep->tf_r[reg - 1] = value;
return 0;
if (reg < 32) {
switch (reg) {
case 1: framep->tf_special.gp = value; break;
case 2: framep->tf_scratch.gr2 = value; break;
case 3: framep->tf_scratch.gr3 = value; break;
case 8: framep->tf_scratch.gr8 = value; break;
case 9: framep->tf_scratch.gr9 = value; break;
case 10: framep->tf_scratch.gr10 = value; break;
case 11: framep->tf_scratch.gr11 = value; break;
case 12: framep->tf_special.sp = value; break;
case 13: framep->tf_special.tp = value; break;
case 14: framep->tf_scratch.gr14 = value; break;
case 15: framep->tf_scratch.gr15 = value; break;
case 16: framep->tf_scratch.gr16 = value; break;
case 17: framep->tf_scratch.gr17 = value; break;
case 18: framep->tf_scratch.gr18 = value; break;
case 19: framep->tf_scratch.gr19 = value; break;
case 20: framep->tf_scratch.gr20 = value; break;
case 21: framep->tf_scratch.gr21 = value; break;
case 22: framep->tf_scratch.gr22 = value; break;
case 23: framep->tf_scratch.gr23 = value; break;
case 24: framep->tf_scratch.gr24 = value; break;
case 25: framep->tf_scratch.gr25 = value; break;
case 26: framep->tf_scratch.gr26 = value; break;
case 27: framep->tf_scratch.gr27 = value; break;
case 28: framep->tf_scratch.gr28 = value; break;
case 29: framep->tf_scratch.gr29 = value; break;
case 30: framep->tf_scratch.gr30 = value; break;
case 31: framep->tf_scratch.gr31 = value; break;
default:
return (EINVAL);
}
} else {
u_int64_t cfm = framep->tf_cr_ifs;
#if 0
u_int64_t cfm = framep->tf_special.cfm;
u_int64_t *bsp = (u_int64_t *) (td->td_kstack
+ framep->tf_ndirty);
int sof = cfm & 0x7f;
@ -221,9 +280,11 @@ write_register(struct trapframe *framep, struct thread *td,
*ia64_rse_register_address(bsp, reg) = value;
return 0;
#else
return (EINVAL);
#endif
}
return EINVAL;
return (0);
}
/*
@ -367,7 +428,7 @@ invala_e(int reg)
int
unaligned_fixup(struct trapframe *framep, struct thread *td)
{
vm_offset_t va = framep->tf_cr_ifa;
vm_offset_t va = framep->tf_special.ifa;
int doprint, dofix, dosigbus;
int signal, size = 0;
unsigned long uac;
@ -399,7 +460,7 @@ unaligned_fixup(struct trapframe *framep, struct thread *td)
* If psr.ac is set, then clearly the user program *wants* to
* fault.
*/
if (framep->tf_cr_ipsr & IA64_PSR_AC) {
if (framep->tf_special.psr & IA64_PSR_AC) {
dofix = 0;
dosigbus = 1;
}
@ -419,10 +480,10 @@ unaligned_fixup(struct trapframe *framep, struct thread *td)
* offending instruction.
* XXX assume that the instruction is in an 'M' slot.
*/
copyin((const void *) framep->tf_cr_iip, &low, 8);
copyin((const void *) (framep->tf_cr_iip + 8), &high, 8);
copyin((const void *) framep->tf_special.iip, &low, 8);
copyin((const void *) (framep->tf_special.iip + 8), &high, 8);
ia64_unpack_bundle(low, high, &b);
slot = (framep->tf_cr_ipsr >> 41) & 3;
slot = (framep->tf_special.psr >> 41) & 3;
ins.ins = b.slot[slot];
decoded = 0;
@ -451,7 +512,7 @@ unaligned_fixup(struct trapframe *framep, struct thread *td)
*/
if (doprint) {
uprintf("pid %d (%s): unaligned access: va=0x%lx pc=0x%lx",
p->p_pid, p->p_comm, va, framep->tf_cr_iip);
p->p_pid, p->p_comm, va, framep->tf_special.iip);
if (decoded) {
uprintf(" op=");
if (dec.isload) {
@ -500,7 +561,7 @@ unaligned_fixup(struct trapframe *framep, struct thread *td)
*/
__asm __volatile("flushrs");
isr = framep->tf_cr_isr;
isr = framep->tf_special.isr;
error = read_register(framep, td, dec.basereg, &addr);
if (error) {
signal = SIGBUS;
@ -565,12 +626,12 @@ unaligned_fixup(struct trapframe *framep, struct thread *td)
* Advance to the instruction following the
* one which faulted.
*/
if ((framep->tf_cr_ipsr & IA64_PSR_RI)
if ((framep->tf_special.psr & IA64_PSR_RI)
== IA64_PSR_RI_2) {
framep->tf_cr_ipsr &= ~IA64_PSR_RI;
framep->tf_cr_iip += 16;
framep->tf_special.psr &= ~IA64_PSR_RI;
framep->tf_special.iip += 16;
} else {
framep->tf_cr_ipsr += IA64_PSR_RI_1;
framep->tf_special.psr += IA64_PSR_RI_1;
}
}
} else {

File diff suppressed because it is too large Load Diff

View File

@ -127,170 +127,67 @@ cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
* ready to run and return to user mode.
*/
void
cpu_fork(td1, p2, td2, flags)
register struct thread *td1;
register struct proc *p2;
register struct thread *td2;
int flags;
cpu_fork(struct thread *td1, struct proc *p2 __unused, struct thread *td2,
int flags)
{
struct proc *p1;
struct trapframe *p2tf;
u_int64_t bspstore, *p1bs, *p2bs, rnatloc, rnat;
char *stackp;
KASSERT(td1 == curthread || td1 == &thread0,
("cpu_fork: p1 not curproc and not proc0"));
("cpu_fork: td1 not curthread and not thread0"));
if ((flags & RFPROC) == 0)
return;
p1 = td1->td_proc;
td2->td_pcb = (struct pcb *)
(td2->td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
td2->td_md.md_flags = td1->td_md.md_flags & (MDP_FPUSED | MDP_UAC_MASK);
/*
* Save the preserved registers and the high FP registers in the
* PCB if we're the parent (ie td1 == curthread) so that we have
* a valid PCB. This also causes a RSE flush. We don't have to
* do that otherwise, because there wouldn't be anything important
* to save.
*/
if (td1 == curthread) {
if (savectx(td1->td_pcb) != 0)
panic("unexpected return from savectx()");
ia64_highfp_save(td1);
}
/*
* Copy floating point state from the FP chip to the PCB
* if this process has state stored there.
* create the child's kernel stack and backing store. We basicly
* create an image of the parent's stack and backing store and
* adjust where necessary.
*/
ia64_fpstate_save(td1, 0);
stackp = (char *)(td2->td_kstack + KSTACK_PAGES * PAGE_SIZE);
/*
* Copy pcb and stack from proc p1 to p2. We do this as
* cheaply as possible, copying only the active part of the
* stack. The stack and pcb need to agree. Make sure that the
* new process has FEN disabled.
*/
stackp -= sizeof(struct pcb);
td2->td_pcb = (struct pcb *)stackp;
bcopy(td1->td_pcb, td2->td_pcb, sizeof(struct pcb));
/*
* Set the floating point state.
*/
#if 0
if ((td2->td_pcb->pcb_fp_control & IEEE_INHERIT) == 0) {
td2->td_pcb->pcb_fp_control = 0;
td2->td_pcb->pcb_fp.fpr_cr = (FPCR_DYN_NORMAL
| FPCR_INVD | FPCR_DZED
| FPCR_OVFD | FPCR_INED
| FPCR_UNFD);
}
#endif
/*
* Arrange for a non-local goto when the new process
* is started, to resume here, returning nonzero from setjmp.
*/
#ifdef DIAGNOSTIC
if (td1 == curthread)
ia64_fpstate_check(td1);
#endif
/*
* create the child's kernel stack, from scratch.
*
* Pick a stack pointer, leaving room for a trapframe;
* copy trapframe from parent so return to user mode
* will be to right address, with correct registers. Clear the
* high-fp enable for the new process so that it is forced to
* load its state from the pcb.
*/
td2->td_frame = (struct trapframe *)td2->td_pcb - 1;
stackp -= sizeof(struct trapframe);
td2->td_frame = (struct trapframe *)stackp;
bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
td2->td_frame->tf_cr_ipsr |= IA64_PSR_DFH;
td2->td_frame->tf_length = sizeof(struct trapframe);
/*
* Set up return-value registers as fork() libc stub expects.
*/
p2tf = td2->td_frame;
if (p2tf->tf_cr_ipsr & IA64_PSR_IS) {
p2tf->tf_r[FRAME_R8] = 0; /* child returns zero (eax) */
p2tf->tf_r[FRAME_R10] = 1; /* is child (edx) */
td2->td_pcb->pcb_ar_eflag &= ~PSL_C; /* no error */
bcopy((void*)td1->td_kstack, (void*)td2->td_kstack,
td2->td_frame->tf_special.ndirty);
/* Set-up the return values as expected by the fork() libc stub. */
if (td2->td_frame->tf_special.psr & IA64_PSR_IS) {
td2->td_frame->tf_scratch.gr8 = 0;
td2->td_frame->tf_scratch.gr10 = 1;
} else {
p2tf->tf_r[FRAME_R8] = 0; /* child's pid (linux) */
p2tf->tf_r[FRAME_R9] = 1; /* is child (FreeBSD) */
p2tf->tf_r[FRAME_R10] = 0; /* no error */
td2->td_frame->tf_scratch.gr8 = 0;
td2->td_frame->tf_scratch.gr9 = 1;
td2->td_frame->tf_scratch.gr10 = 0;
}
/*
* Turn off RSE for a moment and work out our current
* ar.bspstore. This assumes that td1==curthread. Also
* flush dirty regs to ensure that the user's stacked
* regs are written out to backing store.
*
* We could cope with td1!=curthread by digging values
* out of its PCB but I don't see the point since
* current usage only allows &thread0 when creating kernel
* threads and &thread0 doesn't have any dirty regs.
*/
td2->td_pcb->pcb_special.bspstore = td2->td_kstack +
td2->td_frame->tf_special.ndirty;
td2->td_pcb->pcb_special.pfs = 0;
td2->td_pcb->pcb_current_pmap = vmspace_pmap(td2->td_proc->p_vmspace);
p1bs = (u_int64_t *)td1->td_kstack;
p2bs = (u_int64_t *)td2->td_kstack;
if (td1 == curthread) {
__asm __volatile("mov ar.rsc=0;;");
__asm __volatile("flushrs;;" ::: "memory");
__asm __volatile("mov %0=ar.bspstore" : "=r"(bspstore));
} else {
bspstore = (u_int64_t) p1bs;
}
/*
* Copy enough of td1's backing store to include all
* the user's stacked regs.
*/
bcopy(p1bs, p2bs, td1->td_frame->tf_ndirty);
/*
* To calculate the ar.rnat for td2, we need to decide
* if td1's ar.bspstore has advanced past the place
* where the last ar.rnat which covers the user's
* saved registers would be placed. If so, we read
* that one from memory, otherwise we take td1's
* current ar.rnat. If we are simply spawning a new kthread
* from &thread0 we don't care about ar.rnat.
*/
if (td1 == curthread) {
rnatloc = (u_int64_t)p1bs + td1->td_frame->tf_ndirty;
rnatloc |= 0x1f8;
if (bspstore > rnatloc)
rnat = *(u_int64_t *) rnatloc;
else
__asm __volatile("mov %0=ar.rnat;;" : "=r"(rnat));
/*
* Switch the RSE back on.
*/
__asm __volatile("mov ar.rsc=3;;");
} else {
rnat = 0;
}
/*
* Setup the child's pcb so that its ar.bspstore
* starts just above the region which we copied. This
* should work since the child will normally return
* straight into exception_restore. Also initialise its
* pmap to the containing proc's vmspace.
*/
td2->td_pcb->pcb_ar_bsp = (u_int64_t)p2bs + td1->td_frame->tf_ndirty;
td2->td_pcb->pcb_ar_rnat = rnat;
td2->td_pcb->pcb_ar_pfs = 0;
td2->td_pcb->pcb_current_pmap = (u_int64_t)
vmspace_pmap(td2->td_proc->p_vmspace);
/*
* Arrange for continuation at fork_return(), which
* will return to exception_restore(). Note that the
* child process doesn't stay in the kernel for long!
*
* The extra 16 bytes subtracted from sp is part of the ia64
* ABI - a function can assume that the 16 bytes above sp are
* available as scratch space.
*/
td2->td_pcb->pcb_sp = (u_int64_t)p2tf - 16;
td2->td_pcb->pcb_r[PCB_R4] = (u_int64_t)fork_return;
td2->td_pcb->pcb_r[PCB_R5] = FDESC_FUNC(exception_restore);
td2->td_pcb->pcb_r[PCB_R6] = (u_int64_t)td2;
td2->td_pcb->pcb_rp = FDESC_FUNC(fork_trampoline);
td2->td_pcb->pcb_special.sp = (uintptr_t)stackp - 16;
td2->td_pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline);
cpu_set_fork_handler(td2, (void (*)(void*))fork_return, td2);
}
/*
@ -305,8 +202,8 @@ cpu_set_fork_handler(td, func, arg)
void (*func)(void *);
void *arg;
{
td->td_pcb->pcb_r[PCB_R4] = (u_int64_t) func;
td->td_pcb->pcb_r[PCB_R6] = (u_int64_t) arg;
td->td_frame->tf_scratch.gr2 = (u_int64_t)func;
td->td_frame->tf_scratch.gr3 = (u_int64_t)arg;
}
/*
@ -315,11 +212,11 @@ cpu_set_fork_handler(td, func, arg)
* When the proc is reaped, cpu_wait() will gc the VM state.
*/
void
cpu_exit(td)
register struct thread *td;
cpu_exit(struct thread *td)
{
ia64_fpstate_drop(td);
/* Throw away the high FP registers. */
ia64_highfp_drop(td);
}
void

View File

@ -151,15 +151,22 @@ _name_ = _value_
label: ASCIZ msg; \
.text;
/*
* System call glue.
*/
#define SYSCALLNUM(name) \
SYS_ ## name
#define SYSCALLNUM(name) SYS_ ## name
#define CALLSYS_NOERROR(name) \
{ .mmi ; \
alloc r9 = ar.pfs, 0, 0, 8, 0 ; \
mov r31 = ar.k5 ; \
mov r10 = b0 ;; } \
{ .mib ; \
mov r8 = SYSCALLNUM(name) ; \
mov b7 = r31 ; \
br.call.sptk b0 = b7 ;; }
#define CALLSYS_NOERROR(name) \
mov r15=SYSCALLNUM(name); \
break 0x100000 ;;
/*
* WEAK_ALIAS: create a weak alias (ELF only).

View File

@ -46,28 +46,25 @@
#ifndef _MACHINE_CPU_H_
#define _MACHINE_CPU_H_
/*
* Exported definitions unique to Alpha cpu support.
*/
#include <machine/frame.h>
#define cpu_getstack(td) ((td)->td_frame->tf_r[FRAME_SP])
/*
* Arguments to hardclock and gatherstats encapsulate the previous
* machine state in an opaque clockframe. One the Alpha, we use
* what we push on an interrupt (a trapframe).
* Arguments to hardclock and gatherstats encapsulate the previous machine
* state in an opaque clockframe.
*/
struct clockframe {
struct trapframe cf_tf;
struct trapframe cf_tf;
};
#define TRAPF_USERMODE(framep) \
(((framep)->tf_cr_ipsr & IA64_PSR_CPL) == IA64_PSR_CPL_USER)
#define TRAPF_PC(framep) ((framep)->tf_cr_iip)
#define CLKF_PC(cf) ((cf)->cf_tf.tf_special.iip)
#define CLKF_USERMODE(cf) ((CLKF_PC(cf) >> 61) < 5)
#define CLKF_USERMODE(framep) TRAPF_USERMODE(&(framep)->cf_tf)
#define CLKF_PC(framep) TRAPF_PC(&(framep)->cf_tf)
/* Used by signaling code. */
#define cpu_getstack(td) ((td)->td_frame->tf_special.sp)
/* XXX */
#define TRAPF_PC(tf) ((tf)->tf_special.iip)
#define TRAPF_USERMODE(framep) \
(((framep)->tf_special.psr & IA64_PSR_CPL) == IA64_PSR_CPL_USER)
/*
* CTL_MACHDEP definitions.
@ -107,34 +104,33 @@ struct trapframe;
extern struct rpb *hwrpb;
extern volatile int mc_expected, mc_received;
int badaddr (void *, size_t);
int badaddr(void *, size_t);
int badaddr_read(void *, size_t, void *);
u_int64_t console_restart(u_int64_t, u_int64_t, u_int64_t);
void do_sir(void);
int do_ast(struct trapframe *);
void dumpconf(void);
void exception_restore(void); /* MAGIC */
void frametoreg(struct trapframe *, struct reg *);
long fswintrberr(void); /* MAGIC */
int ia64_pa_access(u_long);
int ia64_highfp_drop(struct thread *);
int ia64_highfp_load(struct thread *);
int ia64_highfp_save(struct thread *);
void ia64_init(u_int64_t, u_int64_t);
void ia64_fpstate_check(struct thread *p);
void ia64_fpstate_save(struct thread *p, int write);
void ia64_fpstate_drop(struct thread *p);
void ia64_fpstate_switch(struct thread *p);
int ia64_pa_access(u_long);
void init_prom_interface(struct rpb*);
void interrupt(u_int64_t, struct trapframe *);
void machine_check
(unsigned long, struct trapframe *, unsigned long, unsigned long);
void machine_check(unsigned long, struct trapframe *, unsigned long,
unsigned long);
u_int64_t hwrpb_checksum(void);
void hwrpb_restart_setup(void);
void regdump(struct trapframe *);
void regtoframe(struct reg *, struct trapframe *);
void set_iointr(void (*)(void *, unsigned long));
void fork_trampoline(void); /* MAGIC */
void syscall(int, u_int64_t *, struct trapframe *);
void trap(int vector, int imm, struct trapframe *framep);
int syscall(struct trapframe *);
void trap(int vector, struct trapframe *framep);
void ia64_probe_sapics(void);
int ia64_count_cpus(void);
void map_gateway_page(void);
void map_pal_code(void);
void map_port_space(void);
void cpu_mp_add(uint, uint, uint);

View File

@ -50,8 +50,8 @@ typedef struct trapframe db_regs_t;
extern db_regs_t ddb_regs; /* register state */
#define DDB_REGS (&ddb_regs)
#define PC_REGS(regs) ((db_addr_t)(regs)->tf_cr_iip \
+ (((regs)->tf_cr_ipsr >> 41) & 3))
#define PC_REGS(regs) ((db_addr_t)(regs)->tf_special.iip + \
(((regs)->tf_special.psr >> 41) & 3))
#define BKPT_WRITE(addr, storage) db_write_breakpoint(addr, storage)
#define BKPT_CLEAR(addr, storage) db_clear_breakpoint(addr, storage)
@ -59,8 +59,8 @@ extern db_regs_t ddb_regs; /* register state */
#define BKPT_SKIP db_skip_breakpoint()
#define db_clear_single_step(regs) ddb_regs.tf_cr_ipsr &= ~IA64_PSR_SS
#define db_set_single_step(regs) ddb_regs.tf_cr_ipsr |= IA64_PSR_SS
#define db_clear_single_step(regs) ddb_regs.tf_special.psr &= ~IA64_PSR_SS
#define db_set_single_step(regs) ddb_regs.tf_special.psr |= IA64_PSR_SS
#define IS_BREAKPOINT_TRAP(type, code) (type == IA64_VEC_BREAK)
#define IS_WATCHPOINT_TRAP(type, code) 0

View File

@ -29,87 +29,18 @@
#ifndef _MACHINE_FRAME_H_
#define _MACHINE_FRAME_H_
#include <machine/reg.h>
#include <machine/_regset.h>
/*
* Software trap, exception, and syscall frame.
*/
struct trapframe {
u_int64_t tf_flags;
uint64_t tf_length;
uint64_t tf_flags;
#define FRAME_SYSCALL 1 /* syscalls use a partial trapframe */
u_int64_t tf_cr_iip;
u_int64_t tf_cr_ipsr;
u_int64_t tf_cr_isr;
u_int64_t tf_cr_ifa;
u_int64_t tf_pr;
u_int64_t tf_ar_rsc;
u_int64_t tf_ar_pfs;
u_int64_t tf_cr_ifs;
u_int64_t tf_ar_bspstore;
u_int64_t tf_ar_rnat;
u_int64_t tf_ndirty;
u_int64_t tf_ar_unat;
u_int64_t tf_ar_ccv;
u_int64_t tf_ar_fpsr;
u_int64_t tf_ar_lc;
u_int64_t tf_ar_ec;
u_int64_t tf_b[8];
u_int64_t tf_r[31]; /* don't need to save r0 */
#define FRAME_R1 0
#define FRAME_R2 1
#define FRAME_R3 2
#define FRAME_R4 3
#define FRAME_R5 4
#define FRAME_R6 5
#define FRAME_R7 6
#define FRAME_R8 7
#define FRAME_R9 8
#define FRAME_R10 9
#define FRAME_R11 10
#define FRAME_R12 11
#define FRAME_R13 12
#define FRAME_R14 13
#define FRAME_R15 14
#define FRAME_R16 15
#define FRAME_R17 16
#define FRAME_R18 17
#define FRAME_R19 18
#define FRAME_R20 19
#define FRAME_R21 20
#define FRAME_R22 21
#define FRAME_R23 22
#define FRAME_R24 23
#define FRAME_R25 24
#define FRAME_R26 25
#define FRAME_R27 26
#define FRAME_R28 27
#define FRAME_R29 28
#define FRAME_R30 29
#define FRAME_R31 30
#define FRAME_GP FRAME_R1
#define FRAME_SP FRAME_R12
#define FRAME_TP FRAME_R13
/*
* We rely on the compiler to save/restore f2-f5 and
* f16-f31. We also tell the compiler to avoid f32-f127
* completely so we don't worry about them at all.
*/
struct ia64_fpreg tf_f[10];
#define FRAME_F6 0
#define FRAME_F7 1
#define FRAME_F8 2
#define FRAME_F9 3
#define FRAME_F10 4
#define FRAME_F11 5
#define FRAME_F12 6
#define FRAME_F13 7
#define FRAME_F14 8
#define FRAME_F15 9
struct _special tf_special;
struct _caller_saved tf_scratch;
struct _caller_saved_fp tf_scratch_fp;
};
#endif /* _MACHINE_FRAME_H_ */

View File

@ -1,4 +1,5 @@
/*-
* Copyright (c) 2003 Doug Rabson
* Copyright (c) 2000 Doug Rabson
* All rights reserved.
*
@ -29,72 +30,44 @@
#ifndef _MACHINE_PCB_H_
#define _MACHINE_PCB_H_
#include <machine/_regset.h>
/*
* PCB: process control block
*/
struct pmap;
struct pcb {
uint64_t pcb_sp;
uint64_t pcb_ar_unat;
uint64_t pcb_rp;
uint64_t pcb_pr;
struct ia64_fpreg pcb_f[20];
#define PCB_F2 0
#define PCB_F3 1
#define PCB_F4 2
#define PCB_F5 3
#define PCB_F16 4
#define PCB_F17 5
#define PCB_F18 6
#define PCB_F19 7
#define PCB_F20 8
#define PCB_F21 9
#define PCB_F22 10
#define PCB_F23 11
#define PCB_F24 12
#define PCB_F25 13
#define PCB_F26 14
#define PCB_F27 15
#define PCB_F28 16
#define PCB_F29 17
#define PCB_F30 18
#define PCB_F31 19
uint64_t pcb_r[4];
#define PCB_R4 0
#define PCB_R5 1
#define PCB_R6 2
#define PCB_R7 3
uint64_t pcb_unat47;
uint64_t pcb_b[5];
#define PCB_B1 0
#define PCB_B2 1
#define PCB_B3 2
#define PCB_B4 3
#define PCB_B5 4
uint64_t pcb_ar_bsp;
uint64_t pcb_ar_pfs;
uint64_t pcb_ar_rnat;
uint64_t pcb_ar_lc;
uint64_t pcb_current_pmap;
uint64_t pcb_ar_fcr;
uint64_t pcb_ar_eflag;
uint64_t pcb_ar_csd;
uint64_t pcb_ar_ssd;
uint64_t pcb_ar_fsr;
uint64_t pcb_ar_fir;
uint64_t pcb_ar_fdr;
/* Aligned! */
struct ia64_fpreg pcb_highfp[96]; /* f32-f127 */
struct _special pcb_special;
struct _callee_saved pcb_preserved;
struct _callee_saved_fp pcb_preserved_fp;
struct _high_fp pcb_high_fp;
struct pcpu *pcb_fpcpu;
struct pmap *pcb_current_pmap;
uint64_t pcb_onfault; /* for copy faults */
uint64_t pcb_accessaddr; /* for [fs]uswintr */
#if IA32
uint64_t pcb_ia32_cflg;
uint64_t pcb_ia32_eflag;
uint64_t pcb_ia32_fcr;
uint64_t pcb_ia32_fdr;
uint64_t pcb_ia32_fir;
uint64_t pcb_ia32_fsr;
#endif
};
#ifdef _KERNEL
void restorectx(struct pcb *);
void savectx(struct pcb *);
#define savectx(p) swapctx(p, NULL)
void restorectx(struct pcb *) __dead2;
int swapctx(struct pcb *old, struct pcb *new);
#if IA32
void ia32_restorectx(struct pcb *);
void ia32_savectx(struct pcb *);
#endif
#endif
#endif /* _MACHINE_PCB_H_ */

View File

@ -37,7 +37,6 @@
struct mdthread {
u_long md_flags;
void *md_kstackvirt; /* virtual address of td_kstack */
vm_offset_t md_bspstore; /* initial ar.bspstore */
register_t md_savecrit;
};
@ -50,7 +49,7 @@ struct mdthread {
#define MDP_UAC_MASK (MDP_UAC_NOPRINT | MDP_UAC_NOFIX | MDP_UAC_SIGBUS)
struct mdproc {
struct user *md_uservirt; /* virtual address of p_addr */
int __dummy; /* Avoid having an empty struct. */
};
#endif /* !_MACHINE_PROC_H_ */

View File

@ -29,37 +29,18 @@
#ifndef _MACHINE_REG_H_
#define _MACHINE_REG_H_
#ifndef _IA64_FPREG_DEFINED
struct ia64_fpreg {
uint64_t fpr_bits[2];
} __aligned(16);
#define _IA64_FPREG_DEFINED
#endif
#include <machine/_regset.h>
struct reg {
uint64_t r_gr[128];
uint64_t r_br[8];
uint64_t r_cfm;
uint64_t r_ip; /* Bits 0-3 encode the slot number */
uint64_t r_pr;
uint64_t r_psr; /* User mask */
uint64_t r_ar_rsc;
uint64_t r_ar_bsp;
uint64_t r_ar_bspstore;
uint64_t r_ar_rnat;
uint64_t r_ar_ccv;
uint64_t r_ar_unat;
uint64_t r_ar_fpsr;
uint64_t r_ar_pfs;
uint64_t r_ar_lc;
uint64_t r_ar_ec;
struct _special r_special;
struct _callee_saved r_preserved;
struct _caller_saved r_scratch;
};
struct fpreg {
struct ia64_fpreg fpr_regs[128];
struct _callee_saved_fp fpr_preserved;
struct _caller_saved_fp fpr_scratch;
struct _high_fp fpr_high;
};
struct dbreg {
@ -68,15 +49,9 @@ struct dbreg {
};
#ifdef _KERNEL
struct thread;
void restorehighfp(struct ia64_fpreg *);
void savehighfp(struct ia64_fpreg *);
/*
* XXX these interfaces are MI, so they should be declared in a MI place.
*/
/* XXX these interfaces are MI, so they should be declared in a MI place. */
int fill_regs(struct thread *, struct reg *);
int set_regs(struct thread *, struct reg *);
int fill_fpregs(struct thread *, struct fpreg *);

View File

@ -51,62 +51,35 @@ typedef long sig_atomic_t;
#endif
#if __XSI_VISIBLE
/*
* Minimum signal stack size. The current signal frame
* for IA-64 is 2656 bytes large.
*/
/* Minimum signal stack size. */
#define MINSIGSTKSZ (3072 * 4)
#endif
#if __BSD_VISIBLE
#ifndef _IA64_FPREG_DEFINED
struct ia64_fpreg {
unsigned long fpr_bits[2];
} __aligned(16);
#define _IA64_FPREG_DEFINED
#endif
#endif
/*
* Information pushed on stack when a signal is delivered.
* This is used by the kernel to restore state following
* execution of the signal handler. It is also made available
* to the handler to allow it to restore state properly if
* a non-standard exit is performed.
*
* Note that sc_regs[] and sc_fpregs[]+sc_fpcr are inline
* representations of 'struct reg' and 'struct fpreg', respectively.
*/
#if __BSD_VISIBLE
#include <machine/_regset.h>
/*
* The sequence of the fields should match those in
* mcontext_t. Keep them in sync!
*/
struct sigcontext {
struct __sigset sc_mask; /* signal mask to restore */
unsigned long sc_onstack;
unsigned long sc_flags;
unsigned long sc_nat;
unsigned long sc_sp;
unsigned long sc_ip;
unsigned long sc_cfm;
unsigned long sc_um;
unsigned long sc_ar_rsc;
unsigned long sc_ar_bsp;
unsigned long sc_ar_rnat;
unsigned long sc_ar_ccv;
unsigned long sc_ar_unat;
unsigned long sc_ar_fpsr;
unsigned long sc_ar_pfs;
unsigned long sc_pr;
unsigned long sc_br[8];
unsigned long sc_gr[32];
struct ia64_fpreg sc_fr[128];
struct __sigset sc_mask; /* signal mask to restore */
unsigned long sc_onstack;
unsigned long sc_flags;
struct _special sc_special;
struct _callee_saved sc_preserved;
struct _callee_saved_fp sc_preserved_fp;
struct _caller_saved sc_scratch;
struct _caller_saved_fp sc_scratch_fp;
struct _high_fp sc_high_fp;
};
#endif /* __BSD_VISIBLE */

View File

@ -14,15 +14,16 @@
*/
/* Architecture specific IPIs. */
#define IPI_AP_WAKEUP 0
#define IPI_MCA_RENDEZ 1
#define IPI_HIGH_FP 1
#define IPI_MCA_CMCV 2
#define IPI_TEST 3
#define IPI_MCA_RENDEZ 3
#define IPI_TEST 4
/* Machine independent IPIs. */
#define IPI_AST 4
#define IPI_RENDEZVOUS 5
#define IPI_STOP 6
#define IPI_AST 5
#define IPI_RENDEZVOUS 6
#define IPI_STOP 7
#define IPI_COUNT 7
#define IPI_COUNT 8
#ifndef LOCORE
@ -32,6 +33,7 @@ void ipi_all(int ipi);
void ipi_all_but_self(int ipi);
void ipi_selected(u_int64_t cpus, int ipi);
void ipi_self(int ipi);
void ipi_send(u_int64_t lid, int ipi);
#endif /* !LOCORE */
#endif /* _KERNEL */

View File

@ -31,35 +31,19 @@
#ifndef _MACHINE_UCONTEXT_H_
#define _MACHINE_UCONTEXT_H_
#define IA64_MC_FLAG_ONSTACK 0
#define IA64_MC_FLAG_IN_SYSCALL 1
#define IA64_MC_FLAG_FPH_VALID 2
#include <machine/_regset.h>
typedef struct __mcontext {
/*
* These fields must match the definition
* of struct sigcontext. That way we can support
* struct sigcontext and ucontext_t at the same
* time.
*/
long mc_onstack; /* XXX - sigcontext compat. */
unsigned long mc_flags;
unsigned long mc_nat;
unsigned long mc_sp;
unsigned long mc_ip;
unsigned long mc_cfm;
unsigned long mc_um;
unsigned long mc_ar_rsc;
unsigned long mc_ar_bsp;
unsigned long mc_ar_rnat;
unsigned long mc_ar_ccv;
unsigned long mc_ar_unat;
unsigned long mc_ar_fpsr;
unsigned long mc_ar_pfs;
unsigned long mc_pr;
unsigned long mc_br[8];
unsigned long mc_gr[32];
struct ia64_fpreg mc_fr[128];
uint64_t mc_flags;
#define IA64_MC_FLAGS_SCRATCH_VALID 1
#define IA64_MC_FLAGS_HIGHFP_VALID 2
uint64_t _reserved_;
struct _special mc_special;
struct _callee_saved mc_preserved;
struct _callee_saved_fp mc_preserved_fp;
struct _caller_saved mc_scratch;
struct _caller_saved_fp mc_scratch_fp;
struct _high_fp mc_high_fp;
} mcontext_t;
#endif /* !_MACHINE_UCONTEXT_H_ */

View File

@ -26,13 +26,25 @@
* $FreeBSD$
*/
int ia64_add_unwind_table(vm_offset_t, vm_offset_t, vm_offset_t);
void ia64_delete_unwind_table(vm_offset_t);
#ifndef _MACHINE_UNWIND_H_
#define _MACHINE_UNWIND_H_
struct ia64_unwind_state *ia64_create_unwind_state(struct trapframe *framep);
void ia64_free_unwind_state(struct ia64_unwind_state *us);
u_int64_t ia64_unwind_state_get_ip(struct ia64_unwind_state *us);
u_int64_t ia64_unwind_state_get_sp(struct ia64_unwind_state *us);
u_int64_t ia64_unwind_state_get_cfm(struct ia64_unwind_state *us);
u_int64_t *ia64_unwind_state_get_bsp(struct ia64_unwind_state *us);
int ia64_unwind_state_previous_frame(struct ia64_unwind_state *us);
struct uwx_env;
struct unw_regstate {
struct trapframe *frame;
struct uwx_env *env;
uint64_t keyval[8];
};
int unw_create(struct unw_regstate *s, struct trapframe *tf);
int unw_step(struct unw_regstate *s);
int unw_get_bsp(struct unw_regstate *s, uint64_t *r);
int unw_get_cfm(struct unw_regstate *s, uint64_t *r);
int unw_get_ip(struct unw_regstate *s, uint64_t *r);
int unw_table_add(uint64_t, uint64_t, uint64_t);
void unw_table_remove(uint64_t);
#endif /* _MACHINE_UNWIND_H_ */

View File

@ -50,11 +50,10 @@
/*
* USRTEXT is the start of the user text/data space, while USRSTACK
* is the top (end) of the user stack. Immediately above the user stack
* resides the user structure, which is UPAGES long and contains the
* kernel stack.
* resides the syscall gateway page.
*/
#define USRTEXT CLBYTES
#define USRSTACK VM_MAXUSER_ADDRESS
#define USRSTACK VM_MAX_ADDRESS
/*
* Virtual memory related constants, all in bytes
@ -140,12 +139,13 @@
/* user/kernel map constants */
#define VM_MIN_ADDRESS 0
#define VM_MAXUSER_ADDRESS IA64_RR_BASE(5)
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
#define VM_MIN_KERNEL_ADDRESS IA64_RR_BASE(5)
#define VM_MAX_ADDRESS IA64_RR_BASE(5)
#define VM_GATEWAY_SIZE PAGE_SIZE
#define VM_MAXUSER_ADDRESS (VM_MAX_ADDRESS + VM_GATEWAY_SIZE)
#define VM_MIN_KERNEL_ADDRESS VM_MAXUSER_ADDRESS
#define VM_MAX_KERNEL_ADDRESS (IA64_RR_BASE(6) - 1)
#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
#define KERNBASE VM_MAX_ADDRESS
/* virtual sizes (bytes) for various kernel submaps */
#ifndef VM_KMEM_SIZE

View File

@ -1221,7 +1221,7 @@ thread_exit(void)
}
/* XXX Shouldn't cpu_throw() here. */
mtx_assert(&sched_lock, MA_OWNED);
#if defined(__i386__) || defined(__sparc64__) || defined(__amd64__)
#if !defined(__alpha__) && !defined(__powerpc__)
cpu_throw(td, choosethread());
#else
cpu_throw();

View File

@ -460,7 +460,7 @@ mi_switch(void)
{
struct bintime new_switchtime;
struct thread *td;
#if defined(__i386__) || defined(__sparc64__) || defined(__amd64__)
#if !defined(__alpha__) && !defined(__powerpc__)
struct thread *newtd;
#endif
struct proc *p;
@ -518,7 +518,7 @@ mi_switch(void)
thread_switchout(td);
sched_switchout(td);
#if defined(__i386__) || defined(__sparc64__) || defined(__amd64__)
#if !defined(__alpha__) && !defined(__powerpc__)
newtd = choosethread();
if (td != newtd)
cpu_switch(td, newtd); /* SHAZAM!! */

View File

@ -111,7 +111,7 @@ thr_exit1(void)
sched_exit_thread(TAILQ_NEXT(td, td_kglist), td);
thread_stash(td);
#if defined(__i386__) || defined(__sparc64__) || defined(__amd64__)
#if !defined(__alpha__) && !defined(__powerpc__)
cpu_throw(td, choosethread());
#else
cpu_throw();

View File

@ -1221,7 +1221,7 @@ thread_exit(void)
}
/* XXX Shouldn't cpu_throw() here. */
mtx_assert(&sched_lock, MA_OWNED);
#if defined(__i386__) || defined(__sparc64__) || defined(__amd64__)
#if !defined(__alpha__) && !defined(__powerpc__)
cpu_throw(td, choosethread());
#else
cpu_throw();

View File

@ -860,7 +860,7 @@ int sigonstack(size_t sp);
void sleepinit(void);
void stopevent(struct proc *, u_int, u_int);
void cpu_idle(void);
#if defined(__i386__) || defined(__sparc64__) || defined(__amd64__)
#if !defined(__alpha__) && !defined(__powerpc__)
void cpu_switch(struct thread *old, struct thread *new);
void cpu_throw(struct thread *old, struct thread *new) __dead2;
#else

View File

@ -136,14 +136,14 @@ ia64_syscall_entry(struct trussinfo *trussinfo, int nargs) {
fprintf(trussinfo->outfile, "-- CANNOT READ REGISTERS --\n");
return;
}
parm_offset = regs.r_gr[12] + 16;
parm_offset = regs.r_special.sp + 16;
/*
* FreeBSD has two special kinds of system call redirctions --
* SYS_syscall, and SYS___syscall. The former is the old syscall()
* routine, basicly; the latter is for quad-aligned arguments.
*/
syscall_num = regs.r_gr[15];
syscall_num = regs.r_scratch.gr15; /* XXX double-check. */
switch (syscall_num) {
case SYS_syscall:
lseek(Procfd, parm_offset, SEEK_SET);
@ -293,8 +293,8 @@ ia64_syscall_exit(struct trussinfo *trussinfo, int syscall_num __unused) {
fprintf(trussinfo->outfile, "-- CANNOT READ REGISTERS --\n");
return (-1);
}
retval = regs.r_gr[8];
errorp = (regs.r_gr[10] != 0) ? 1 : 0;
retval = regs.r_scratch.gr8;
errorp = (regs.r_scratch.gr10 != 0) ? 1 : 0;
/*
* This code, while simpler than the initial versions I used, could