mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-03 09:00:21 +00:00
Compute the number of initial kernel page table pages (NKPT) dynamically.
This eliminates the need to recompile the kernel when the default value of NKPT is not big enough - for e.g. when loading large kernel modules or memory disk images from the loader. If NKPT is defined in the kernel configuration file then it overrides the dynamic calculation. Reviewed by: alc, kib
This commit is contained in:
parent
0a8038e3d7
commit
6d62a48f47
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=246384
@ -232,7 +232,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Walk page table pages, set bits in vm_page_dump */
|
||||
pmapsize = 0;
|
||||
pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
|
||||
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + NKPT * NBPDR,
|
||||
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR,
|
||||
kernel_vm_end); ) {
|
||||
/*
|
||||
* We always write a page, even if it is zero. Each
|
||||
@ -364,7 +364,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Dump kernel page directory pages */
|
||||
bzero(fakepd, sizeof(fakepd));
|
||||
pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
|
||||
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + NKPT * NBPDR,
|
||||
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR,
|
||||
kernel_vm_end); va += NBPDP) {
|
||||
i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
|
||||
|
||||
|
@ -202,6 +202,10 @@ struct pmap kernel_pmap_store;
|
||||
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
|
||||
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
|
||||
|
||||
int nkpt;
|
||||
SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
|
||||
"Number of kernel page table pages allocated on bootup");
|
||||
|
||||
static int ndmpdp;
|
||||
static vm_paddr_t dmaplimit;
|
||||
vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
|
||||
@ -495,17 +499,42 @@ allocpages(vm_paddr_t *firstaddr, int n)
|
||||
|
||||
CTASSERT(powerof2(NDMPML4E));
|
||||
|
||||
/* number of kernel PDP slots */
|
||||
#define NKPDPE(ptpgs) howmany((ptpgs), NPDEPG)
|
||||
|
||||
static void
|
||||
nkpt_init(vm_paddr_t addr)
|
||||
{
|
||||
int pt_pages;
|
||||
|
||||
#ifdef NKPT
|
||||
pt_pages = NKPT;
|
||||
#else
|
||||
pt_pages = howmany(addr, 1 << PDRSHIFT);
|
||||
pt_pages += NKPDPE(pt_pages);
|
||||
|
||||
/*
|
||||
* Add some slop beyond the bare minimum required for bootstrapping
|
||||
* the kernel.
|
||||
*
|
||||
* This is quite important when allocating KVA for kernel modules.
|
||||
* The modules are required to be linked in the negative 2GB of
|
||||
* the address space. If we run out of KVA in this region then
|
||||
* pmap_growkernel() will need to allocate page table pages to map
|
||||
* the entire 512GB of KVA space which is an unnecessary tax on
|
||||
* physical memory.
|
||||
*/
|
||||
pt_pages += 8; /* 16MB additional slop for kernel modules */
|
||||
#endif
|
||||
nkpt = pt_pages;
|
||||
}
|
||||
|
||||
static void
|
||||
create_pagetables(vm_paddr_t *firstaddr)
|
||||
{
|
||||
int i, j, ndm1g;
|
||||
|
||||
/* Allocate pages */
|
||||
KPTphys = allocpages(firstaddr, NKPT);
|
||||
KPML4phys = allocpages(firstaddr, 1);
|
||||
KPDPphys = allocpages(firstaddr, NKPML4E);
|
||||
KPDphys = allocpages(firstaddr, NKPDPE);
|
||||
int i, j, ndm1g, nkpdpe;
|
||||
|
||||
/* Allocate page table pages for the direct map */
|
||||
ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
|
||||
if (ndmpdp < 4) /* Minimum 4GB of dirmap */
|
||||
ndmpdp = 4;
|
||||
@ -517,6 +546,22 @@ create_pagetables(vm_paddr_t *firstaddr)
|
||||
DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
|
||||
dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
|
||||
|
||||
/* Allocate pages */
|
||||
KPML4phys = allocpages(firstaddr, 1);
|
||||
KPDPphys = allocpages(firstaddr, NKPML4E);
|
||||
|
||||
/*
|
||||
* Allocate the initial number of kernel page table pages required to
|
||||
* bootstrap. We defer this until after all memory-size dependent
|
||||
* allocations are done (e.g. direct map), so that we don't have to
|
||||
* build in too much slop in our estimate.
|
||||
*/
|
||||
nkpt_init(*firstaddr);
|
||||
nkpdpe = NKPDPE(nkpt);
|
||||
|
||||
KPTphys = allocpages(firstaddr, nkpt);
|
||||
KPDphys = allocpages(firstaddr, nkpdpe);
|
||||
|
||||
/* Fill in the underlying page table pages */
|
||||
/* Read-only from zero to physfree */
|
||||
/* XXX not fully used, underneath 2M pages */
|
||||
@ -526,7 +571,7 @@ create_pagetables(vm_paddr_t *firstaddr)
|
||||
}
|
||||
|
||||
/* Now map the page tables at their location within PTmap */
|
||||
for (i = 0; i < NKPT; i++) {
|
||||
for (i = 0; i < nkpt; i++) {
|
||||
((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
|
||||
((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
|
||||
}
|
||||
@ -539,7 +584,7 @@ create_pagetables(vm_paddr_t *firstaddr)
|
||||
}
|
||||
|
||||
/* And connect up the PD to the PDP */
|
||||
for (i = 0; i < NKPDPE; i++) {
|
||||
for (i = 0; i < nkpdpe; i++) {
|
||||
((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys +
|
||||
(i << PAGE_SHIFT);
|
||||
((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
|
||||
@ -768,7 +813,7 @@ pmap_init(void)
|
||||
* Initialize the vm page array entries for the kernel pmap's
|
||||
* page table pages.
|
||||
*/
|
||||
for (i = 0; i < NKPT; i++) {
|
||||
for (i = 0; i < nkpt; i++) {
|
||||
mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
|
||||
KASSERT(mpte >= vm_page_array &&
|
||||
mpte < &vm_page_array[vm_page_array_size],
|
||||
@ -1995,7 +2040,7 @@ pmap_growkernel(vm_offset_t addr)
|
||||
* any new kernel page table pages between "kernel_vm_end" and
|
||||
* "KERNBASE".
|
||||
*/
|
||||
if (KERNBASE < addr && addr <= KERNBASE + NKPT * NBPDR)
|
||||
if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR)
|
||||
return;
|
||||
|
||||
addr = roundup2(addr, NBPDR);
|
||||
|
@ -113,13 +113,7 @@
|
||||
((unsigned long)(l2) << PDRSHIFT) | \
|
||||
((unsigned long)(l1) << PAGE_SHIFT))
|
||||
|
||||
/* Initial number of kernel page tables. */
|
||||
#ifndef NKPT
|
||||
#define NKPT 32
|
||||
#endif
|
||||
|
||||
#define NKPML4E 1 /* number of kernel PML4 slots */
|
||||
#define NKPDPE howmany(NKPT, NPDEPG)/* number of kernel PDP slots */
|
||||
|
||||
#define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */
|
||||
#define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */
|
||||
@ -181,6 +175,7 @@ typedef u_int64_t pml4_entry_t;
|
||||
#define PML4map ((pd_entry_t *)(addr_PML4map))
|
||||
#define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e))
|
||||
|
||||
extern int nkpt; /* Initial number of kernel page tables */
|
||||
extern u_int64_t KPDPphys; /* physical address of kernel level 3 */
|
||||
extern u_int64_t KPML4phys; /* physical address of kernel level 4 */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user