mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-14 10:09:48 +00:00
[PowerPC] kernel ifunc support for powerpc*, fix ppc64 relocation oddities.
This is a general cleanup of the relocatable kernel support on powerpc, needed to enable kernel ifuncs. * Fix some relocatable issues in the kernel linker, and change to using a RELOCATABLE_KERNEL #define instead of #ifdef __powerpc__ for parts that other platforms can use in the future if they wish to have ET_DYN kernels. * Get rid of the DB_STOFFS hack now that the kernel is relocated to the DMAP properly across the board on powerpc64. * Add powerpc64 and powerpc32 ifunc functionality. * Allow AIM64 virtual mode OF kernels to run from the DMAP like other AIM64 by implementing a virtual mode restart. This fixes the runtime address on PowerMac G5. * Fix symbol relocation problems on post-relocation kernels by relocating the symbol table. * Add an undocumented method for supplying kernel symbols on powernv and other powerpc machines using linux-style kernel/initrd loading -- If you pass the kernel in as the initrd as well, the copy resident in initrd will be used as a source for symbols when initializing the debugger. This method is subject to removal once we have a better way of doing this. Approved by: jhibbits Relnotes: yes Sponsored by: Tag1 Consulting, Inc. Differential Revision: https://reviews.freebsd.org/D23156
This commit is contained in:
parent
f8519228d1
commit
9411e24df3
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=360794
@ -161,9 +161,9 @@ LDFLAGS+= --build-id=sha1
|
||||
.endif
|
||||
|
||||
.if (${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \
|
||||
${MACHINE_CPUARCH} == "i386") && \
|
||||
${MACHINE_CPUARCH} == "i386" || ${MACHINE} == "powerpc") && \
|
||||
defined(LINKER_FEATURES) && ${LINKER_FEATURES:Mifunc} == ""
|
||||
.error amd64/arm64/i386 kernel requires linker ifunc support
|
||||
.error amd64/arm64/i386/ppc* kernel requires linker ifunc support
|
||||
.endif
|
||||
.if ${MACHINE_CPUARCH} == "amd64"
|
||||
LDFLAGS+= -z max-page-size=2097152
|
||||
|
@ -6,6 +6,11 @@ OUTPUT_ARCH(powerpc)
|
||||
ENTRY(__start)
|
||||
SEARCH_DIR(/usr/lib);
|
||||
PROVIDE (__stack = 0);
|
||||
PHDRS
|
||||
{
|
||||
kernel PT_LOAD;
|
||||
dynamic PT_DYNAMIC;
|
||||
}
|
||||
SECTIONS
|
||||
{
|
||||
/* Read-only sections, merged into text segment: */
|
||||
@ -21,7 +26,7 @@ SECTIONS
|
||||
/* .gnu.warning sections are handled specially by elf32.em. */
|
||||
*(.gnu.warning)
|
||||
*(.gnu.linkonce.t*)
|
||||
} =0
|
||||
} :kernel =0
|
||||
_etext = .;
|
||||
PROVIDE (etext = .);
|
||||
|
||||
@ -77,7 +82,7 @@ SECTIONS
|
||||
.got.plt : { *(.got.plt) }
|
||||
|
||||
|
||||
.dynamic : { *(.dynamic) }
|
||||
.dynamic : { *(.dynamic) } :kernel :dynamic
|
||||
/* Put .ctors and .dtors next to the .got2 section, so that the pointers
|
||||
get relocated with -mrelocatable. Also put in the .fixup pointers.
|
||||
The current compiler no longer needs this, but keep it around for 2.7.2 */
|
||||
@ -96,7 +101,7 @@ SECTIONS
|
||||
/* We want the small data sections together, so single-instruction offsets
|
||||
can access them all, and initialized data all before uninitialized, so
|
||||
we can shorten the on-disk segment size. */
|
||||
.sdata : { *(.sdata) }
|
||||
.sdata : { *(.sdata) } :kernel
|
||||
_edata = .;
|
||||
PROVIDE (edata = .);
|
||||
.sbss :
|
||||
|
@ -8,15 +8,15 @@ SEARCH_DIR(/usr/lib);
|
||||
PROVIDE (__stack = 0);
|
||||
PHDRS
|
||||
{
|
||||
text PT_LOAD ;
|
||||
dynamic PT_DYNAMIC ;
|
||||
kernel PT_LOAD;
|
||||
dynamic PT_DYNAMIC;
|
||||
}
|
||||
SECTIONS
|
||||
{
|
||||
|
||||
/* Low-address wrapper for bootloaders (kexec/kboot) that can't parse ELF */
|
||||
. = kernbase - 0x100;
|
||||
.kboot : { *(.text.kboot) } :text
|
||||
.kboot : { *(.text.kboot) } :kernel
|
||||
|
||||
/* Read-only sections, merged into text segment: */
|
||||
. = kernbase;
|
||||
@ -106,7 +106,7 @@ SECTIONS
|
||||
.got : ALIGN(8) { __tocbase = .; *(.got) }
|
||||
.toc : ALIGN(8) { *(.toc) }
|
||||
|
||||
.dynamic : { *(.dynamic) } :text :dynamic
|
||||
.dynamic : { *(.dynamic) } :kernel :dynamic
|
||||
/* Put .ctors and .dtors next to the .got2 section, so that the pointers
|
||||
get relocated with -mrelocatable. Also put in the .fixup pointers.
|
||||
The current compiler no longer needs this, but keep it around for 2.7.2 */
|
||||
@ -125,7 +125,7 @@ SECTIONS
|
||||
/* We want the small data sections together, so single-instruction offsets
|
||||
can access them all, and initialized data all before uninitialized, so
|
||||
we can shorten the on-disk segment size. */
|
||||
.sdata : { *(.sdata) }
|
||||
.sdata : { *(.sdata) } :kernel
|
||||
_edata = .;
|
||||
PROVIDE (edata = .);
|
||||
.sbss :
|
||||
|
@ -6,6 +6,11 @@ OUTPUT_ARCH(powerpc)
|
||||
ENTRY(__start)
|
||||
SEARCH_DIR(/usr/lib);
|
||||
PROVIDE (__stack = 0);
|
||||
PHDRS
|
||||
{
|
||||
kernel PT_LOAD;
|
||||
dynamic PT_DYNAMIC;
|
||||
}
|
||||
SECTIONS
|
||||
{
|
||||
/* Read-only sections, merged into text segment: */
|
||||
@ -21,7 +26,7 @@ SECTIONS
|
||||
/* .gnu.warning sections are handled specially by elf32.em. */
|
||||
*(.gnu.warning)
|
||||
*(.gnu.linkonce.t*)
|
||||
} =0
|
||||
} :kernel =0
|
||||
_etext = .;
|
||||
PROVIDE (etext = .);
|
||||
|
||||
@ -78,7 +83,7 @@ SECTIONS
|
||||
.got.plt : { *(.got.plt) }
|
||||
|
||||
|
||||
.dynamic : { *(.dynamic) }
|
||||
.dynamic : { *(.dynamic) } :kernel :dynamic
|
||||
/* Put .ctors and .dtors next to the .got2 section, so that the pointers
|
||||
get relocated with -mrelocatable. Also put in the .fixup pointers.
|
||||
The current compiler no longer needs this, but keep it around for 2.7.2 */
|
||||
@ -97,7 +102,7 @@ SECTIONS
|
||||
/* We want the small data sections together, so single-instruction offsets
|
||||
can access them all, and initialized data all before uninitialized, so
|
||||
we can shorten the on-disk segment size. */
|
||||
.sdata : { *(.sdata) }
|
||||
.sdata : { *(.sdata) } :kernel
|
||||
_edata = .;
|
||||
PROVIDE (edata = .);
|
||||
.sbss :
|
||||
|
@ -388,7 +388,9 @@ link_elf_link_common_finish(linker_file_t lf)
|
||||
return (0);
|
||||
}
|
||||
|
||||
#ifdef RELOCATABLE_KERNEL
|
||||
extern vm_offset_t __startkernel, __endkernel;
|
||||
#endif
|
||||
|
||||
static unsigned long kern_relbase = KERNBASE;
|
||||
|
||||
@ -424,7 +426,7 @@ link_elf_init(void* arg)
|
||||
|
||||
ef = (elf_file_t) linker_kernel_file;
|
||||
ef->preloaded = 1;
|
||||
#ifdef __powerpc__
|
||||
#ifdef RELOCATABLE_KERNEL
|
||||
ef->address = (caddr_t) (__startkernel - KERNBASE);
|
||||
#else
|
||||
ef->address = 0;
|
||||
@ -436,7 +438,7 @@ link_elf_init(void* arg)
|
||||
|
||||
if (dp != NULL)
|
||||
parse_dynamic(ef);
|
||||
#ifdef __powerpc__
|
||||
#ifdef RELOCATABLE_KERNEL
|
||||
linker_kernel_file->address = (caddr_t)__startkernel;
|
||||
linker_kernel_file->size = (intptr_t)(__endkernel - __startkernel);
|
||||
kern_relbase = (unsigned long)__startkernel;
|
||||
@ -1860,7 +1862,7 @@ link_elf_strtab_get(linker_file_t lf, caddr_t *strtab)
|
||||
return (ef->ddbstrcnt);
|
||||
}
|
||||
|
||||
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
|
||||
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) || defined(__powerpc__)
|
||||
/*
|
||||
* Use this lookup routine when performing relocations early during boot.
|
||||
* The generic lookup routine depends on kobj, which is not initialized
|
||||
@ -1896,8 +1898,14 @@ link_elf_ireloc(caddr_t kmdp)
|
||||
|
||||
ef->modptr = kmdp;
|
||||
ef->dynamic = (Elf_Dyn *)&_DYNAMIC;
|
||||
parse_dynamic(ef);
|
||||
|
||||
#ifdef RELOCATABLE_KERNEL
|
||||
ef->address = (caddr_t) (__startkernel - KERNBASE);
|
||||
#else
|
||||
ef->address = 0;
|
||||
#endif
|
||||
parse_dynamic(ef);
|
||||
|
||||
link_elf_preload_parse_symbols(ef);
|
||||
relocate_file1(ef, elf_lookup_ifunc, elf_reloc, true);
|
||||
}
|
||||
|
@ -161,6 +161,7 @@ extern void *dsmisstrap, *dsmisssize;
|
||||
|
||||
extern void *ap_pcpu;
|
||||
extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
|
||||
extern void __restartkernel_virtual(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
|
||||
|
||||
void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
|
||||
void *mdp, uint32_t mdp_cookie);
|
||||
@ -184,13 +185,22 @@ aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
|
||||
#ifdef __powerpc64__
|
||||
/*
|
||||
* If in real mode, relocate to high memory so that the kernel
|
||||
* Relocate to high memory so that the kernel
|
||||
* can execute from the direct map.
|
||||
*
|
||||
* If we are in virtual mode already, use a special entry point
|
||||
* that sets up a temporary DMAP to execute from until we can
|
||||
* properly set up the MMU.
|
||||
*/
|
||||
if (!(mfmsr() & PSL_DR) &&
|
||||
(vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS)
|
||||
__restartkernel(fdt, 0, ofentry, mdp, mdp_cookie,
|
||||
DMAP_BASE_ADDRESS, mfmsr());
|
||||
if ((vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) {
|
||||
if (mfmsr() & PSL_DR) {
|
||||
__restartkernel_virtual(fdt, 0, ofentry, mdp,
|
||||
mdp_cookie, DMAP_BASE_ADDRESS, mfmsr());
|
||||
} else {
|
||||
__restartkernel(fdt, 0, ofentry, mdp, mdp_cookie,
|
||||
DMAP_BASE_ADDRESS, mfmsr());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Various very early CPU fix ups */
|
||||
|
@ -200,6 +200,57 @@ ASENTRY_NOPROF(__start)
|
||||
/* Unreachable */
|
||||
b .
|
||||
|
||||
ASENTRY_NOPROF(__restartkernel_virtual)
|
||||
/*
|
||||
* When coming in via this entry point, we need to alter the SLB to
|
||||
* shadow the segment register emulation entries in DMAP space.
|
||||
* We need to do this dance because we are running with virtual-mode
|
||||
* OpenFirmware and have not yet taken over the MMU.
|
||||
*
|
||||
* Assumptions:
|
||||
* 1) The kernel is currently identity-mapped.
|
||||
* 2) We are currently executing at an address compatible with
|
||||
* real mode.
|
||||
* 3) The first 16 SLB entries are emulating SRs.
|
||||
* 4) The rest of the SLB is not in use.
|
||||
* 5) OpenFirmware is not manipulating the SLB at runtime.
|
||||
* 6) We are running on 64-bit AIM.
|
||||
*
|
||||
* Tested on a G5.
|
||||
*/
|
||||
mfmsr %r14
|
||||
/* Switch to real mode because we are about to mess with the SLB. */
|
||||
andi. %r14, %r14, ~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l
|
||||
mtmsr %r14
|
||||
isync
|
||||
/* Prepare variables for later use. */
|
||||
li %r14, 0
|
||||
li %r18, 0
|
||||
oris %r18, %r18, 0xc000
|
||||
sldi %r18, %r18, 32 /* r18: 0xc000000000000000 */
|
||||
1:
|
||||
/*
|
||||
* Loop over the first 16 SLB entries.
|
||||
* Offset the SLBE into the DMAP, add 16 to the index, and write
|
||||
* it back to the SLB.
|
||||
*/
|
||||
/* XXX add more safety checks */
|
||||
slbmfev %r15, %r14
|
||||
slbmfee %r16, %r14
|
||||
or %r16, %r16, %r14 /* index is 0-15 */
|
||||
ori %r16, %r16, 0x10 /* add 16 to index. */
|
||||
or %r16, %r16, %r18 /* SLBE DMAP offset */
|
||||
rldicr %r17, %r16, 0, 37 /* Invalidation SLBE */
|
||||
|
||||
isync
|
||||
slbie %r17
|
||||
/* isync */
|
||||
slbmte %r15, %r16
|
||||
isync
|
||||
addi %r14, %r14, 1
|
||||
cmpdi %r14, 16
|
||||
blt 1b
|
||||
|
||||
ASENTRY_NOPROF(__restartkernel)
|
||||
/*
|
||||
* r3-r7: arguments to go to __start
|
||||
|
@ -85,8 +85,4 @@ typedef intptr_t db_expr_t; /* expression - signed */
|
||||
#define inst_load(ins) 0
|
||||
#define inst_store(ins) 0
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#define DB_STOFFS(offs) ((offs) & ~DMAP_BASE_ADDRESS)
|
||||
#endif
|
||||
|
||||
#endif /* _POWERPC_DB_MACHDEP_H_ */
|
||||
|
@ -109,6 +109,8 @@
|
||||
|
||||
#define MAXPAGESIZES 1 /* maximum number of supported page sizes */
|
||||
|
||||
#define RELOCATABLE_KERNEL 1 /* kernel may relocate during startup */
|
||||
|
||||
#ifndef KSTACK_PAGES
|
||||
#ifdef __powerpc64__
|
||||
#define KSTACK_PAGES 8 /* includes pcb */
|
||||
|
@ -36,6 +36,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/pmap.h>
|
||||
|
||||
#include <machine/bus.h>
|
||||
#include <machine/elf.h>
|
||||
#include <machine/param.h>
|
||||
|
||||
#include <dev/ofw/openfirm.h>
|
||||
#include <dev/ofw/ofw_bus.h>
|
||||
@ -58,6 +60,8 @@ ofw_initrd_probe_and_attach(void *junk)
|
||||
vm_paddr_t start, end;
|
||||
pcell_t cell[2];
|
||||
ssize_t size;
|
||||
u_char *taste;
|
||||
Elf_Ehdr ehdr;
|
||||
|
||||
if (!hw_direct_map)
|
||||
return;
|
||||
@ -91,7 +95,15 @@ ofw_initrd_probe_and_attach(void *junk)
|
||||
}
|
||||
|
||||
if (end - start > 0) {
|
||||
mfs_root = (u_char *) PHYS_TO_DMAP(start);
|
||||
taste = (u_char*) PHYS_TO_DMAP(start);
|
||||
memcpy(&ehdr, taste, sizeof(ehdr));
|
||||
|
||||
if (IS_ELF(ehdr)) {
|
||||
printf("ofw_initrd: initrd is kernel image!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mfs_root = taste;
|
||||
mfs_root_size = end - start;
|
||||
printf("ofw_initrd: initrd loaded at 0x%08lx-0x%08lx\n",
|
||||
start, end);
|
||||
|
@ -221,10 +221,10 @@ elf32_dump_thread(struct thread *td, void *dst, size_t *off)
|
||||
|
||||
#ifndef __powerpc64__
|
||||
bool
|
||||
elf_is_ifunc_reloc(Elf_Size r_info __unused)
|
||||
elf_is_ifunc_reloc(Elf_Size r_info)
|
||||
{
|
||||
|
||||
return (false);
|
||||
return (ELF_R_TYPE(r_info) == R_PPC_IRELATIVE);
|
||||
}
|
||||
|
||||
/* Process one elf relocation with addend. */
|
||||
@ -235,7 +235,7 @@ elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
|
||||
Elf_Addr *where;
|
||||
Elf_Half *hwhere;
|
||||
Elf_Addr addr;
|
||||
Elf_Addr addend;
|
||||
Elf_Addr addend, val;
|
||||
Elf_Word rtype, symidx;
|
||||
const Elf_Rela *rela;
|
||||
int error;
|
||||
@ -317,6 +317,13 @@ elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
|
||||
*where = elf_relocaddr(lf, addr + addend);
|
||||
break;
|
||||
|
||||
case R_PPC_IRELATIVE:
|
||||
addr = relocbase + addend;
|
||||
val = ((Elf32_Addr (*)(void))addr)();
|
||||
if (*where != val)
|
||||
*where = val;
|
||||
break;
|
||||
|
||||
default:
|
||||
printf("kldload: unexpected relocation type %d\n",
|
||||
(int) rtype);
|
||||
|
@ -282,10 +282,10 @@ elf64_dump_thread(struct thread *td, void *dst, size_t *off)
|
||||
}
|
||||
|
||||
bool
|
||||
elf_is_ifunc_reloc(Elf_Size r_info __unused)
|
||||
elf_is_ifunc_reloc(Elf_Size r_info)
|
||||
{
|
||||
|
||||
return (false);
|
||||
return (ELF_R_TYPE(r_info) == R_PPC_IRELATIVE);
|
||||
}
|
||||
|
||||
/* Process one elf relocation with addend. */
|
||||
@ -295,7 +295,7 @@ elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
|
||||
{
|
||||
Elf_Addr *where;
|
||||
Elf_Addr addr;
|
||||
Elf_Addr addend;
|
||||
Elf_Addr addend, val;
|
||||
Elf_Word rtype, symidx;
|
||||
const Elf_Rela *rela;
|
||||
int error;
|
||||
@ -342,6 +342,13 @@ elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
|
||||
__asm __volatile("dcbst 0,%0; sync" :: "r"(where) : "memory");
|
||||
break;
|
||||
|
||||
case R_PPC_IRELATIVE:
|
||||
addr = relocbase + addend;
|
||||
val = ((Elf64_Addr (*)(void))addr)();
|
||||
if (*where != val)
|
||||
*where = val;
|
||||
break;
|
||||
|
||||
default:
|
||||
printf("kldload: unexpected relocation type %d\n",
|
||||
(int) rtype);
|
||||
|
@ -113,6 +113,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/elf.h>
|
||||
#include <machine/fpu.h>
|
||||
#include <machine/hid.h>
|
||||
#include <machine/ifunc.h>
|
||||
#include <machine/kdb.h>
|
||||
#include <machine/md_var.h>
|
||||
#include <machine/metadata.h>
|
||||
@ -161,6 +162,8 @@ SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
|
||||
uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *,
|
||||
uint32_t);
|
||||
|
||||
static void fake_preload_metadata(void);
|
||||
|
||||
long Maxmem = 0;
|
||||
long realmem = 0;
|
||||
|
||||
@ -246,6 +249,11 @@ void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
|
||||
void aim_cpu_init(vm_offset_t toc);
|
||||
void booke_cpu_init(void);
|
||||
|
||||
#ifdef DDB
|
||||
static void load_external_symtab(void);
|
||||
static void displace_symbol_table(vm_offset_t, vm_offset_t, vm_offset_t);
|
||||
#endif
|
||||
|
||||
uintptr_t
|
||||
powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
uint32_t mdp_cookie)
|
||||
@ -254,10 +262,13 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
struct cpuref bsp;
|
||||
vm_offset_t startkernel, endkernel;
|
||||
char *env;
|
||||
void *kmdp = NULL;
|
||||
bool ofw_bootargs = false;
|
||||
bool symbols_provided = false;
|
||||
#ifdef DDB
|
||||
vm_offset_t ksym_start;
|
||||
vm_offset_t ksym_end;
|
||||
vm_offset_t ksym_sz;
|
||||
#endif
|
||||
|
||||
/* First guess at start/end kernel positions */
|
||||
@ -286,16 +297,30 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
aim_early_init(fdt, toc, ofentry, mdp, mdp_cookie);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* At this point, we are executing in our correct memory space.
|
||||
* Book-E started there, and AIM has done an rfi and restarted
|
||||
* execution from _start.
|
||||
*
|
||||
* We may still be in real mode, however. If we are running out of
|
||||
* the direct map on 64 bit, this is possible to do.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Parse metadata if present and fetch parameters. Must be done
|
||||
* before console is inited so cninit gets the right value of
|
||||
* boothowto.
|
||||
*/
|
||||
if (mdp != NULL) {
|
||||
void *kmdp = NULL;
|
||||
/*
|
||||
* Starting up from loader.
|
||||
*
|
||||
* Full metadata has been provided, but we need to figure
|
||||
* out the correct address to relocate it to.
|
||||
*/
|
||||
char *envp = NULL;
|
||||
uintptr_t md_offset = 0;
|
||||
vm_paddr_t kernelendphys;
|
||||
vm_paddr_t kernelstartphys, kernelendphys;
|
||||
|
||||
#ifdef AIM
|
||||
if ((uintptr_t)&powerpc_init > DMAP_BASE_ADDRESS)
|
||||
@ -306,6 +331,7 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
|
||||
preload_metadata = mdp;
|
||||
if (md_offset > 0) {
|
||||
/* Translate phys offset into DMAP offset. */
|
||||
preload_metadata += md_offset;
|
||||
preload_bootstrap_relocate(md_offset);
|
||||
}
|
||||
@ -321,6 +347,9 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
if (fdt != 0)
|
||||
fdt += md_offset;
|
||||
}
|
||||
kernelstartphys = MD_FETCH(kmdp, MODINFO_ADDR,
|
||||
vm_offset_t);
|
||||
/* kernelstartphys is already relocated. */
|
||||
kernelendphys = MD_FETCH(kmdp, MODINFOMD_KERNEND,
|
||||
vm_offset_t);
|
||||
if (kernelendphys != 0)
|
||||
@ -329,13 +358,35 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
#ifdef DDB
|
||||
ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
|
||||
ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
|
||||
ksym_sz = *(Elf_Size*)ksym_start;
|
||||
|
||||
/*
|
||||
* Loader already handled displacing to the load
|
||||
* address, but we still need to displace it to the
|
||||
* DMAP.
|
||||
*/
|
||||
displace_symbol_table(
|
||||
(vm_offset_t)(ksym_start + sizeof(Elf_Size)),
|
||||
ksym_sz, md_offset);
|
||||
|
||||
db_fetch_ksymtab(ksym_start, ksym_end);
|
||||
symbols_provided = true;
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Self-loading kernel, we have to fake up metadata.
|
||||
*
|
||||
* Since we are creating the metadata from the final
|
||||
* memory space, we don't need to call
|
||||
* preload_boostrap_relocate().
|
||||
*/
|
||||
fake_preload_metadata();
|
||||
kmdp = preload_search_by_type("elf kernel");
|
||||
init_static_kenv(init_kenv, sizeof(init_kenv));
|
||||
ofw_bootargs = true;
|
||||
}
|
||||
|
||||
/* Store boot environment state */
|
||||
OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry);
|
||||
|
||||
@ -365,6 +416,11 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
*/
|
||||
OF_bootstrap();
|
||||
|
||||
#ifdef DDB
|
||||
if (!symbols_provided && hw_direct_map)
|
||||
load_external_symtab();
|
||||
#endif
|
||||
|
||||
if (ofw_bootargs)
|
||||
ofw_parse_bootargs();
|
||||
|
||||
@ -412,6 +468,7 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
*/
|
||||
pmap_bootstrap(startkernel, endkernel);
|
||||
mtmsr(psl_kernset & ~PSL_EE);
|
||||
link_elf_ireloc(kmdp);
|
||||
|
||||
/*
|
||||
* Initialize params/tunables that are derived from memsize
|
||||
@ -449,6 +506,178 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
|
||||
(sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
|
||||
}
|
||||
|
||||
#ifdef DDB
|
||||
/*
|
||||
* XXX Figure out where to move this.
|
||||
*/
|
||||
static void
|
||||
displace_symbol_table(vm_offset_t ksym_start,
|
||||
vm_offset_t ksym_sz, vm_offset_t displacement) {
|
||||
Elf_Sym *sym;
|
||||
|
||||
/*
|
||||
* Relocate the symbol table to our final load address.
|
||||
*/
|
||||
for (sym = (Elf_Sym *)ksym_start;
|
||||
(vm_paddr_t)sym < (ksym_start + ksym_sz);
|
||||
sym++) {
|
||||
if (sym->st_name == 0 ||
|
||||
sym->st_shndx == SHN_UNDEF ||
|
||||
sym->st_value == 0)
|
||||
continue;
|
||||
if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT &&
|
||||
ELF_ST_TYPE(sym->st_info) != STT_FUNC &&
|
||||
ELF_ST_TYPE(sym->st_info) != STT_NOTYPE)
|
||||
continue;
|
||||
/* Skip relocating any implausible symbols */
|
||||
if (sym->st_value > KERNBASE)
|
||||
sym->st_value += displacement;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* On powernv, we might not have symbols loaded via loader. However, if the
|
||||
* user passed the kernel in as the initrd as well, we can manually load it
|
||||
* via reinterpreting the initrd copy of the kernel.
|
||||
*/
|
||||
static void
|
||||
load_external_symtab(void) {
|
||||
phandle_t chosen;
|
||||
vm_paddr_t start, end;
|
||||
pcell_t cell[2];
|
||||
ssize_t size;
|
||||
u_char *kernelimg;
|
||||
|
||||
int i;
|
||||
|
||||
Elf_Ehdr *ehdr;
|
||||
Elf_Phdr *phdr;
|
||||
Elf_Shdr *shdr;
|
||||
|
||||
vm_offset_t ksym_start, ksym_sz, kstr_start, kstr_sz;
|
||||
|
||||
if (!hw_direct_map)
|
||||
return;
|
||||
|
||||
chosen = OF_finddevice("/chosen");
|
||||
if (chosen <= 0)
|
||||
return;
|
||||
|
||||
if (!OF_hasprop(chosen, "linux,initrd-start") ||
|
||||
!OF_hasprop(chosen, "linux,initrd-end"))
|
||||
return;
|
||||
|
||||
size = OF_getencprop(chosen, "linux,initrd-start", cell, sizeof(cell));
|
||||
if (size == 4)
|
||||
start = cell[0];
|
||||
else if (size == 8)
|
||||
start = (uint64_t)cell[0] << 32 | cell[1];
|
||||
else
|
||||
return;
|
||||
|
||||
size = OF_getencprop(chosen, "linux,initrd-end", cell, sizeof(cell));
|
||||
if (size == 4)
|
||||
end = cell[0];
|
||||
else if (size == 8)
|
||||
end = (uint64_t)cell[0] << 32 | cell[1];
|
||||
else
|
||||
return;
|
||||
|
||||
if (!(end - start > 0))
|
||||
return;
|
||||
|
||||
kernelimg = (u_char *) PHYS_TO_DMAP(start);
|
||||
|
||||
ehdr = (Elf_Ehdr *)kernelimg;
|
||||
|
||||
if (!IS_ELF(*ehdr))
|
||||
return;
|
||||
|
||||
phdr = (Elf_Phdr *)(kernelimg + ehdr->e_phoff);
|
||||
shdr = (Elf_Shdr *)(kernelimg + ehdr->e_shoff);
|
||||
|
||||
ksym_start = 0;
|
||||
ksym_sz = 0;
|
||||
kstr_start = 0;
|
||||
kstr_sz = 0;
|
||||
for (i = 0; i < ehdr->e_shnum; i++) {
|
||||
if (shdr[i].sh_type == SHT_SYMTAB) {
|
||||
ksym_start = (vm_offset_t)(kernelimg +
|
||||
shdr[i].sh_offset);
|
||||
ksym_sz = (vm_offset_t)(shdr[i].sh_size);
|
||||
kstr_start = (vm_offset_t)(kernelimg +
|
||||
shdr[shdr[i].sh_link].sh_offset);
|
||||
kstr_sz = (vm_offset_t)
|
||||
(shdr[shdr[i].sh_link].sh_size);
|
||||
}
|
||||
}
|
||||
|
||||
if (ksym_start != 0 && kstr_start != 0 && ksym_sz != 0 &&
|
||||
kstr_sz != 0 && ksym_start < kstr_start) {
|
||||
|
||||
displace_symbol_table(ksym_start, ksym_sz,
|
||||
(__startkernel - KERNBASE));
|
||||
ksymtab = ksym_start;
|
||||
ksymtab_size = ksym_sz;
|
||||
kstrtab = kstr_start;
|
||||
}
|
||||
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* When not being loaded from loader, we need to create our own metadata
|
||||
* so we can interact with the kernel linker.
|
||||
*/
|
||||
static void
|
||||
fake_preload_metadata(void) {
|
||||
/* We depend on dword alignment here. */
|
||||
static uint32_t fake_preload[36] __aligned(8);
|
||||
int i = 0;
|
||||
|
||||
fake_preload[i++] = MODINFO_NAME;
|
||||
fake_preload[i++] = strlen("kernel") + 1;
|
||||
strcpy((char*)&fake_preload[i], "kernel");
|
||||
/* ['k' 'e' 'r' 'n'] ['e' 'l' '\0' ..] */
|
||||
i += 2;
|
||||
|
||||
fake_preload[i++] = MODINFO_TYPE;
|
||||
fake_preload[i++] = strlen("elf kernel") + 1;
|
||||
strcpy((char*)&fake_preload[i], "elf kernel");
|
||||
/* ['e' 'l' 'f' ' '] ['k' 'e' 'r' 'n'] ['e' 'l' '\0' ..] */
|
||||
i += 3;
|
||||
|
||||
#ifdef __powerpc64__
|
||||
/* Padding -- Fields start on u_long boundaries */
|
||||
fake_preload[i++] = 0;
|
||||
#endif
|
||||
|
||||
fake_preload[i++] = MODINFO_ADDR;
|
||||
fake_preload[i++] = sizeof(vm_offset_t);
|
||||
*(vm_offset_t *)&fake_preload[i] =
|
||||
(vm_offset_t)(__startkernel);
|
||||
i += (sizeof(vm_offset_t) / 4);
|
||||
|
||||
fake_preload[i++] = MODINFO_SIZE;
|
||||
fake_preload[i++] = sizeof(vm_offset_t);
|
||||
*(vm_offset_t *)&fake_preload[i] =
|
||||
(vm_offset_t)(__endkernel) - (vm_offset_t)(__startkernel);
|
||||
i += (sizeof(vm_offset_t) / 4);
|
||||
|
||||
/*
|
||||
* MODINFOMD_SSYM and MODINFOMD_ESYM cannot be provided here,
|
||||
* as the memory comes from outside the loaded ELF sections.
|
||||
*
|
||||
* If the symbols are being provided by other means (MFS), the
|
||||
* tables will be loaded into the debugger directly.
|
||||
*/
|
||||
|
||||
/* Null field at end to mark end of data. */
|
||||
fake_preload[i++] = 0;
|
||||
fake_preload[i] = 0;
|
||||
preload_metadata = (void*)fake_preload;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the D-cache for non-DMA I/O so that the I-cache can
|
||||
* be made coherent later.
|
||||
|
Loading…
Reference in New Issue
Block a user