mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-24 11:29:10 +00:00
Implement pv_bit_count which is used by pmap_ts_referenced.
Remove the modified tte bit and add a softwrite bit. Mappings are only writeable if they have been written to, thus in general modify just duplicates the write bit. The softwrite bit makes it easier to distinguish mappings which should be writeable but are not yet modified. Move the exec bit down one, it was being sign extended when used as an immediate operand. Use the lock bit to mean tsb page and remove the tsb bit. These are the only form of locked (tsb) entries we support and we need to conserve bits where possible. Implement pmap_copy_page and pmap_is_modified and friends. Detect mappings that are being being upgraded from read-only to read-write due to copy-on-write and update the write bit appropriately. Make trap_mmu_fault do the right thing for protection faults, which is necessary to implement copy on write correctly. Also handle a bunch more userland trap types and add ktr traces.
This commit is contained in:
parent
9a0f54a4de
commit
ccc64d13f3
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=82903
@ -188,6 +188,7 @@ void pv_insert(pmap_t pm, vm_offset_t pa, vm_offset_t va, struct stte *stp);
|
||||
void pv_remove_virt(struct stte *stp);
|
||||
|
||||
void pv_bit_clear(vm_page_t m, u_long bits);
|
||||
int pv_bit_count(vm_page_t m, u_long bits);
|
||||
void pv_bit_set(vm_page_t m, u_long bits);
|
||||
int pv_bit_test(vm_page_t m, u_long bits);
|
||||
|
||||
|
@ -72,12 +72,11 @@
|
||||
#define TD_VA_LOW_SHIFT TD_SOFT2_SHIFT
|
||||
#define TD_VA_LOW_MASK TD_SOFT2_MASK
|
||||
|
||||
#define TS_EXEC (1L << 5)
|
||||
#define TS_INIT (1L << 4)
|
||||
#define TS_MNG (1L << 3)
|
||||
#define TS_MOD (1L << 2)
|
||||
#define TS_EXEC (1L << 4)
|
||||
#define TS_INIT (1L << 3)
|
||||
#define TS_MNG (1L << 2)
|
||||
#define TS_REF (1L << 1)
|
||||
#define TS_TSB (1L << 0)
|
||||
#define TS_W (1L << 0)
|
||||
|
||||
#define TD_V (1L << 63)
|
||||
#define TD_8K (0L << TD_SIZE_SHIFT)
|
||||
@ -92,9 +91,8 @@
|
||||
#define TD_EXEC (TS_EXEC << TD_SOFT_SHIFT)
|
||||
#define TD_INIT (TS_INIT << TD_SOFT_SHIFT)
|
||||
#define TD_MNG (TS_MNG << TD_SOFT_SHIFT)
|
||||
#define TD_MOD (TS_MOD << TD_SOFT_SHIFT)
|
||||
#define TD_REF (TS_REF << TD_SOFT_SHIFT)
|
||||
#define TD_TSB (TS_TSB << TD_SOFT_SHIFT)
|
||||
#define TD_SW (TS_W << TD_SOFT_SHIFT)
|
||||
#define TD_L (1L << 6)
|
||||
#define TD_CP (1L << 5)
|
||||
#define TD_CV (1L << 4)
|
||||
|
@ -229,7 +229,7 @@ pmap_bootstrap(vm_offset_t skpa, vm_offset_t ekva)
|
||||
va = TSB_KERNEL_MIN_ADDRESS + i * PAGE_SIZE_4M;
|
||||
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
|
||||
tte.tte_data = TD_V | TD_4M | TD_VA_LOW(va) | TD_PA(pa) |
|
||||
TD_MOD | TD_REF | TD_TSB | TD_L | TD_CP | TD_P | TD_W;
|
||||
TD_L | TD_CP | TD_P | TD_W;
|
||||
tlb_store_slot(TLB_DTLB, va, TLB_CTX_KERNEL, tte,
|
||||
TLB_SLOT_TSB_KERNEL_MIN + i);
|
||||
}
|
||||
@ -368,8 +368,10 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
|
||||
|
||||
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
|
||||
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
|
||||
TD_MOD | TD_REF | TD_CP | TD_P | TD_W;
|
||||
TD_REF | TD_SW | TD_CP | TD_P | TD_W;
|
||||
stp = tsb_kvtostte(va);
|
||||
CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx stp=%p data=%#lx",
|
||||
va, pa, stp, stp->st_tte.tte_data);
|
||||
stp->st_tte = tte;
|
||||
}
|
||||
|
||||
@ -382,6 +384,7 @@ pmap_kremove(vm_offset_t va)
|
||||
struct stte *stp;
|
||||
|
||||
stp = tsb_kvtostte(va);
|
||||
CTR2(KTR_PMAP, "pmap_kremove: va=%#lx stp=%p", va, stp);
|
||||
tte_invalidate(&stp->st_tte);
|
||||
}
|
||||
|
||||
@ -424,8 +427,13 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
struct stte *stp;
|
||||
struct tte tte;
|
||||
vm_offset_t pa;
|
||||
u_long data;
|
||||
|
||||
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
|
||||
("pmap_enter: non current pmap"));
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
CTR5(KTR_PMAP, "pmap_enter: ctx=%p va=%#lx pa=%#lx prot=%#x wired=%d",
|
||||
pm->pm_context, va, pa, prot, wired);
|
||||
tte.tte_tag = TT_CTX(pm->pm_context) | TT_VA(va);
|
||||
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
|
||||
TD_CP | TD_CV;
|
||||
@ -434,10 +442,10 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if (wired == TRUE) {
|
||||
tte.tte_data |= TD_REF;
|
||||
if (prot & VM_PROT_WRITE)
|
||||
tte.tte_data |= TD_MOD;
|
||||
tte.tte_data |= TD_W;
|
||||
}
|
||||
if (prot & VM_PROT_WRITE)
|
||||
tte.tte_data |= TD_W;
|
||||
tte.tte_data |= TD_SW;
|
||||
if (prot & VM_PROT_EXECUTE) {
|
||||
tte.tte_data |= TD_EXEC;
|
||||
icache_global_flush(pa);
|
||||
@ -447,6 +455,14 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
|
||||
PMAP_LOCK(pm);
|
||||
if ((stp = tsb_stte_lookup(pm, va)) != NULL) {
|
||||
data = stp->st_tte.tte_data;
|
||||
if (TD_PA(data) == pa) {
|
||||
if (prot & VM_PROT_WRITE)
|
||||
tte.tte_data |= TD_W;
|
||||
CTR3(KTR_PMAP,
|
||||
"pmap_enter: update pa=%#lx data=%#lx to %#lx",
|
||||
pa, data, tte.tte_data);
|
||||
}
|
||||
if (stp->st_tte.tte_data & TD_MNG)
|
||||
pv_remove_virt(stp);
|
||||
tsb_stte_remove(stp);
|
||||
@ -464,6 +480,8 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
|
||||
{
|
||||
struct stte *stp;
|
||||
|
||||
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
|
||||
("pmap_remove: non current pmap"));
|
||||
PMAP_LOCK(pm);
|
||||
for (; start < end; start += PAGE_SIZE) {
|
||||
if ((stp = tsb_stte_lookup(pm, start)) == NULL)
|
||||
@ -532,23 +550,6 @@ pmap_growkernel(vm_offset_t addr)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Zero a page of physical memory by temporarily mapping it into the tlb.
|
||||
*/
|
||||
void
|
||||
pmap_zero_page(vm_offset_t pa)
|
||||
{
|
||||
struct tte tte;
|
||||
vm_offset_t va;
|
||||
|
||||
va = CADDR2;
|
||||
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
|
||||
tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_L | TD_CP | TD_P | TD_W;
|
||||
tlb_store(TLB_DTLB, va, TLB_CTX_KERNEL, tte);
|
||||
bzero((void *)va, PAGE_SIZE);
|
||||
tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make the specified page pageable (or not). Unneeded.
|
||||
*/
|
||||
@ -621,7 +622,7 @@ pmap_page_protect(vm_page_t m, vm_prot_t prot)
|
||||
if (m->flags & PG_FICTITIOUS || prot & VM_PROT_WRITE)
|
||||
return;
|
||||
if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
|
||||
pv_bit_clear(m, TD_W);
|
||||
pv_bit_clear(m, TD_W | TD_SW);
|
||||
else
|
||||
pv_global_remove_all(m);
|
||||
}
|
||||
@ -632,7 +633,34 @@ pmap_clear_modify(vm_page_t m)
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return;
|
||||
pv_bit_clear(m, TD_MOD);
|
||||
pv_bit_clear(m, TD_W);
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_is_modified(vm_page_t m)
|
||||
{
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return FALSE;
|
||||
return (pv_bit_test(m, TD_W));
|
||||
}
|
||||
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return;
|
||||
pv_bit_clear(m, TD_REF);
|
||||
}
|
||||
|
||||
int
|
||||
pmap_ts_referenced(vm_page_t m)
|
||||
{
|
||||
|
||||
if (m->flags & PG_FICTITIOUS)
|
||||
return (0);
|
||||
return (pv_bit_count(m, TD_REF));
|
||||
}
|
||||
|
||||
void
|
||||
@ -667,10 +695,43 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy a page of physical memory by temporarily mapping it into the tlb.
|
||||
*/
|
||||
void
|
||||
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
|
||||
{
|
||||
TODO;
|
||||
struct tte tte;
|
||||
|
||||
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(CADDR1);
|
||||
tte.tte_data = TD_V | TD_8K | TD_PA(src) | TD_L | TD_CP | TD_P | TD_W;
|
||||
tlb_store(TLB_DTLB, CADDR1, TLB_CTX_KERNEL, tte);
|
||||
|
||||
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(CADDR2);
|
||||
tte.tte_data = TD_V | TD_8K | TD_PA(dst) | TD_L | TD_CP | TD_P | TD_W;
|
||||
tlb_store(TLB_DTLB, CADDR2, TLB_CTX_KERNEL, tte);
|
||||
|
||||
bcopy((void *)CADDR1, (void *)CADDR2, PAGE_SIZE);
|
||||
|
||||
tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, CADDR1);
|
||||
tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, CADDR2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Zero a page of physical memory by temporarily mapping it into the tlb.
|
||||
*/
|
||||
void
|
||||
pmap_zero_page(vm_offset_t pa)
|
||||
{
|
||||
struct tte tte;
|
||||
vm_offset_t va;
|
||||
|
||||
va = CADDR2;
|
||||
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
|
||||
tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_L | TD_CP | TD_P | TD_W;
|
||||
tlb_store(TLB_DTLB, va, TLB_CTX_KERNEL, tte);
|
||||
bzero((void *)va, PAGE_SIZE);
|
||||
tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
|
||||
}
|
||||
|
||||
void
|
||||
@ -695,26 +756,6 @@ pmap_extract(pmap_t pmap, vm_offset_t va)
|
||||
return (0);
|
||||
}
|
||||
|
||||
boolean_t
|
||||
pmap_is_modified(vm_page_t m)
|
||||
{
|
||||
TODO;
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_clear_reference(vm_page_t m)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
int
|
||||
pmap_ts_referenced(vm_page_t m)
|
||||
{
|
||||
TODO;
|
||||
return (0);
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
pmap_kextract(vm_offset_t va)
|
||||
{
|
||||
@ -744,14 +785,18 @@ pmap_page_exists(pmap_t pmap, vm_page_t m)
|
||||
}
|
||||
|
||||
void
|
||||
pmap_prefault(pmap_t pmap, vm_offset_t va, vm_map_entry_t entry)
|
||||
pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
|
||||
{
|
||||
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
|
||||
("pmap_prefault: non current pmap"));
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
void
|
||||
pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
{
|
||||
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
|
||||
("pmap_protect: non current pmap"));
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
@ -770,15 +815,16 @@ pmap_reference(pmap_t pm)
|
||||
}
|
||||
|
||||
void
|
||||
pmap_release(pmap_t pmap)
|
||||
pmap_release(pmap_t pm)
|
||||
{
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
void
|
||||
pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
|
||||
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
|
||||
("pmap_remove_pages: non current pmap"));
|
||||
/* XXX */
|
||||
}
|
||||
|
||||
|
@ -107,16 +107,14 @@ pv_bit_clear(vm_page_t m, u_long bits)
|
||||
for (pstp = pvh_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
|
||||
tte = pv_get_tte(pstp);
|
||||
KASSERT(TD_PA(tte.tte_data) == pa,
|
||||
("pmap_bit_clear: corrupt alias chain"));
|
||||
("pv_bit_clear: corrupt alias chain"));
|
||||
if ((tte.tte_data & bits) == 0)
|
||||
continue;
|
||||
va = tte_get_va(tte);
|
||||
if (bits == TD_W && !pmap_track_modified(va))
|
||||
if (bits & (TD_W | TD_SW) && !pmap_track_modified(va))
|
||||
continue;
|
||||
if (bits == TD_W && tte.tte_data & TD_MOD) {
|
||||
if (bits & (TD_W | TD_SW) && tte.tte_data & TD_W)
|
||||
vm_page_dirty(m);
|
||||
bits |= TD_MOD;
|
||||
}
|
||||
pv_atomic_bit_clear(pstp, bits);
|
||||
#ifdef notyet
|
||||
generation = pv_generation;
|
||||
@ -133,6 +131,44 @@ pv_bit_clear(vm_page_t m, u_long bits)
|
||||
PV_UNLOCK();
|
||||
}
|
||||
|
||||
int
|
||||
pv_bit_count(vm_page_t m, u_long bits)
|
||||
{
|
||||
vm_offset_t pstp;
|
||||
vm_offset_t pa;
|
||||
vm_offset_t pvh;
|
||||
struct tte tte;
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
pvh = pv_lookup(pa);
|
||||
PV_LOCK();
|
||||
#ifdef notyet
|
||||
restart:
|
||||
#endif
|
||||
for (pstp = pvh_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
|
||||
tte = pv_get_tte(pstp);
|
||||
KASSERT(TD_PA(tte.tte_data) == pa,
|
||||
("pv_bit_count: corrupt alias chain"));
|
||||
if (tte.tte_data & bits)
|
||||
count++;
|
||||
pv_atomic_bit_clear(pstp, bits);
|
||||
#ifdef notyet
|
||||
generation = pv_generation;
|
||||
PV_UNLOCK();
|
||||
ipi_all(IPI_TLB_PAGE_DEMAP);
|
||||
PV_LOCK();
|
||||
if (generation != pv_generation)
|
||||
goto restart;
|
||||
#else
|
||||
tlb_page_demap(TLB_DTLB, tte_get_ctx(tte), tte_get_va(tte));
|
||||
#endif
|
||||
}
|
||||
PV_UNLOCK();
|
||||
return (count);
|
||||
}
|
||||
|
||||
void
|
||||
pv_bit_set(vm_page_t m, u_long bits)
|
||||
{
|
||||
@ -150,7 +186,7 @@ pv_bit_set(vm_page_t m, u_long bits)
|
||||
for (pstp = pvh_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
|
||||
tte = pv_get_tte(pstp);
|
||||
KASSERT(TD_PA(tte.tte_data) == pa,
|
||||
("pmap_bit_set: corrupt alias chain"));
|
||||
("pv_bit_set: corrupt alias chain"));
|
||||
if (tte.tte_data & bits)
|
||||
continue;
|
||||
pv_atomic_bit_set(pstp, bits);
|
||||
@ -222,15 +258,3 @@ pv_local_remove_all(vm_offset_t pvh)
|
||||
}
|
||||
PV_UNLOCK();
|
||||
}
|
||||
|
||||
void
|
||||
pv_dump(vm_offset_t pvh)
|
||||
{
|
||||
vm_offset_t pstp;
|
||||
|
||||
printf("pv_dump: pvh=%#lx first=%#lx\n", pvh, pvh_get_first(pvh));
|
||||
for (pstp = pvh_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp))
|
||||
printf("\tpstp=%#lx next=%#lx prev=%#lx\n", pstp,
|
||||
pv_get_next(pstp), pv_get_prev(pstp));
|
||||
printf("pv_dump: done\n");
|
||||
}
|
||||
|
@ -41,8 +41,10 @@
|
||||
*/
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_ktr.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/ktr.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/systm.h>
|
||||
@ -77,6 +79,10 @@ void trap(struct trapframe *tf);
|
||||
int trap_mmu_fault(struct proc *p, struct trapframe *tf);
|
||||
void syscall(struct proc *p, struct trapframe *tf, u_int sticks);
|
||||
|
||||
u_long trap_mask = 0xffffffffffffffffL & ~(1 << T_INTR);
|
||||
|
||||
extern char *syscallnames[];
|
||||
|
||||
const char *trap_msg[] = {
|
||||
"reserved",
|
||||
"power on reset",
|
||||
@ -106,11 +112,12 @@ const char *trap_msg[] = {
|
||||
"fast instruction access mmu miss",
|
||||
"fast data access mmu miss",
|
||||
"fast data access protection",
|
||||
"clock",
|
||||
"bad spill",
|
||||
"bad fill",
|
||||
"spill",
|
||||
"fill",
|
||||
"fill",
|
||||
"breakpoint",
|
||||
"syscall",
|
||||
"trap instruction",
|
||||
};
|
||||
|
||||
void
|
||||
@ -127,44 +134,114 @@ trap(struct trapframe *tf)
|
||||
KASSERT(PCPU_GET(curproc) != NULL, ("trap: curproc NULL"));
|
||||
KASSERT(PCPU_GET(curpcb) != NULL, ("trap: curpcb NULL"));
|
||||
|
||||
error = 0;
|
||||
p = PCPU_GET(curproc);
|
||||
type = T_TYPE(tf->tf_type);
|
||||
type = tf->tf_type;
|
||||
ucode = type; /* XXX */
|
||||
sticks = 0;
|
||||
|
||||
if ((type & T_KERNEL) == 0)
|
||||
#if KTR_COMPILE & KTR_TRAP
|
||||
if (trap_mask & (1 << (type & ~T_KERNEL))) {
|
||||
CTR5(KTR_TRAP, "trap: %s type=%s (%s) ws=%#lx ow=%#lx",
|
||||
p->p_comm, trap_msg[type & ~T_KERNEL],
|
||||
((type & T_KERNEL) ? "kernel" : "user"),
|
||||
rdpr(wstate), rdpr(otherwin));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (type == T_SYSCALL)
|
||||
cnt.v_syscall++;
|
||||
else if ((type & ~T_KERNEL) == T_INTR)
|
||||
cnt.v_intr++;
|
||||
else
|
||||
cnt.v_trap++;
|
||||
|
||||
if ((type & T_KERNEL) == 0) {
|
||||
sticks = p->p_sticks;
|
||||
p->p_frame = tf;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case T_ALIGN:
|
||||
case T_ALIGN_LDDF:
|
||||
case T_ALIGN_STDF:
|
||||
sig = SIGBUS;
|
||||
goto trapsig;
|
||||
case T_BREAKPOINT:
|
||||
sig = SIGTRAP;
|
||||
goto trapsig;
|
||||
case T_DIVIDE:
|
||||
sig = SIGFPE;
|
||||
goto trapsig;
|
||||
case T_FP_DISABLED:
|
||||
if (fp_enable_proc(p))
|
||||
goto user;
|
||||
else {
|
||||
/* Fallthrough. */
|
||||
case T_FP_IEEE:
|
||||
case T_FP_OTHER:
|
||||
sig = SIGFPE;
|
||||
goto trapsig;
|
||||
}
|
||||
break;
|
||||
case T_IMMU_MISS:
|
||||
case T_DATA_ERROR:
|
||||
case T_DATA_EXCPTN:
|
||||
case T_INSN_ERROR:
|
||||
case T_INSN_EXCPTN:
|
||||
sig = SIGILL; /* XXX */
|
||||
goto trapsig;
|
||||
case T_DMMU_MISS:
|
||||
case T_DMMU_PROT:
|
||||
mtx_lock(&Giant);
|
||||
case T_IMMU_MISS:
|
||||
error = trap_mmu_fault(p, tf);
|
||||
mtx_unlock(&Giant);
|
||||
if (error == 0)
|
||||
goto user;
|
||||
break;
|
||||
sig = error;
|
||||
goto trapsig;
|
||||
case T_FILL:
|
||||
if (rwindow_load(p, tf, 2))
|
||||
sigexit(p, SIGILL);
|
||||
goto out;
|
||||
case T_FILL_RET:
|
||||
if (rwindow_load(p, tf, 1))
|
||||
sigexit(p, SIGILL);
|
||||
goto out;
|
||||
case T_INSN_ILLEGAL:
|
||||
sig = SIGILL;
|
||||
goto trapsig;
|
||||
case T_INTR:
|
||||
intr_dispatch(T_LEVEL(tf->tf_type), tf);
|
||||
goto user;
|
||||
intr_dispatch(tf->tf_arg, tf);
|
||||
goto out;
|
||||
case T_PRIV_ACTION:
|
||||
case T_PRIV_OPCODE:
|
||||
sig = SIGBUS;
|
||||
goto trapsig;
|
||||
case T_SOFT:
|
||||
sig = SIGILL;
|
||||
goto trapsig;
|
||||
case T_SPILL:
|
||||
if (rwindow_save(p))
|
||||
sigexit(p, SIGILL);
|
||||
goto out;
|
||||
case T_SYSCALL:
|
||||
/* syscall() calls userret(), so we need goto out; */
|
||||
syscall(p, tf, sticks);
|
||||
goto out;
|
||||
case T_TAG_OVFLW:
|
||||
sig = SIGEMT;
|
||||
goto trapsig;
|
||||
#ifdef DDB
|
||||
case T_BREAKPOINT | T_KERNEL:
|
||||
if (kdb_trap(tf) != 0)
|
||||
goto out;
|
||||
break;
|
||||
#endif
|
||||
case T_DMMU_MISS | T_KERNEL:
|
||||
case T_DMMU_PROT | T_KERNEL:
|
||||
error = trap_mmu_fault(p, tf);
|
||||
if (error == 0)
|
||||
goto out;
|
||||
break;
|
||||
case T_INTR | T_KERNEL:
|
||||
intr_dispatch(tf->tf_arg, tf);
|
||||
goto out;
|
||||
case T_WATCH_VIRT | T_KERNEL:
|
||||
/*
|
||||
* At the moment, just print the information from the trap,
|
||||
@ -204,17 +281,6 @@ trap(struct trapframe *tf)
|
||||
*(u_int *)tf->tf_tpc = PCPU_GET(wp_insn);
|
||||
flush(tf->tf_tpc);
|
||||
goto out;
|
||||
case T_DMMU_MISS | T_KERNEL:
|
||||
case T_DMMU_PROT | T_KERNEL:
|
||||
mtx_lock(&Giant);
|
||||
error = trap_mmu_fault(p, tf);
|
||||
mtx_unlock(&Giant);
|
||||
if (error == 0)
|
||||
goto out;
|
||||
break;
|
||||
case T_INTR | T_KERNEL:
|
||||
intr_dispatch(T_LEVEL(tf->tf_type), tf);
|
||||
goto out;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -227,9 +293,12 @@ trap(struct trapframe *tf)
|
||||
trapsignal(p, sig, ucode);
|
||||
user:
|
||||
userret(p, tf, sticks);
|
||||
if (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
out:
|
||||
#if KTR_COMPILE & KTR_TRAP
|
||||
if (trap_mask & (1 << (type & ~T_KERNEL))) {
|
||||
CTR1(KTR_TRAP, "trap: p=%p return", p);
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
@ -238,73 +307,80 @@ trap_mmu_fault(struct proc *p, struct trapframe *tf)
|
||||
{
|
||||
struct mmuframe *mf;
|
||||
struct vmspace *vm;
|
||||
struct stte *stp;
|
||||
struct pcb *pcb;
|
||||
struct tte tte;
|
||||
vm_offset_t va;
|
||||
vm_prot_t type;
|
||||
vm_prot_t prot;
|
||||
u_long ctx;
|
||||
pmap_t pm;
|
||||
int flags;
|
||||
int type;
|
||||
int rv;
|
||||
|
||||
KASSERT(p->p_vmspace != NULL, ("trap_dmmu_miss: vmspace NULL"));
|
||||
|
||||
type = 0;
|
||||
rv = KERN_FAILURE;
|
||||
mf = tf->tf_arg;
|
||||
va = TLB_TAR_VA(mf->mf_tar);
|
||||
switch (tf->tf_type) {
|
||||
case T_DMMU_MISS | T_KERNEL:
|
||||
/*
|
||||
* If the context is nucleus this is a soft fault on kernel
|
||||
* memory, just fault in the pages.
|
||||
*/
|
||||
if (TLB_TAR_CTX(mf->mf_tar) == TLB_CTX_KERNEL) {
|
||||
rv = vm_fault(kernel_map, va, VM_PROT_READ,
|
||||
VM_FAULT_NORMAL);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't allow kernel mode faults on user memory unless
|
||||
* pcb_onfault is set.
|
||||
*/
|
||||
if (PCPU_GET(curpcb)->pcb_onfault == NULL)
|
||||
break;
|
||||
/* Fallthrough. */
|
||||
case T_IMMU_MISS:
|
||||
case T_DMMU_MISS:
|
||||
/*
|
||||
* First try the tsb. The primary tsb was already searched.
|
||||
*/
|
||||
vm = p->p_vmspace;
|
||||
if (tsb_miss(&vm->vm_pmap, tf->tf_type, mf) == 0) {
|
||||
rv = KERN_SUCCESS;
|
||||
break;
|
||||
mf = (struct mmuframe *)tf->tf_arg;
|
||||
ctx = TLB_TAR_CTX(mf->mf_tar);
|
||||
pcb = PCPU_GET(curpcb);
|
||||
type = tf->tf_type & ~T_KERNEL;
|
||||
va = TLB_TAR_VA(mf->mf_tar);
|
||||
|
||||
CTR4(KTR_TRAP, "trap_mmu_fault: p=%p pm_ctx=%#lx va=%#lx ctx=%#lx",
|
||||
p, p->p_vmspace->vm_pmap.pm_context, va, ctx);
|
||||
|
||||
if (type == T_DMMU_PROT) {
|
||||
prot = VM_PROT_WRITE;
|
||||
flags = VM_FAULT_DIRTY;
|
||||
} else {
|
||||
if (type == T_DMMU_MISS)
|
||||
prot = VM_PROT_READ;
|
||||
else
|
||||
prot = VM_PROT_READ | VM_PROT_EXECUTE;
|
||||
flags = VM_FAULT_NORMAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Not found, call the vm system.
|
||||
*/
|
||||
|
||||
if (tf->tf_type == T_IMMU_MISS)
|
||||
type = VM_PROT_EXECUTE | VM_PROT_READ;
|
||||
if (ctx == TLB_CTX_KERNEL) {
|
||||
mtx_lock(&Giant);
|
||||
rv = vm_fault(kernel_map, va, prot, VM_FAULT_NORMAL);
|
||||
mtx_unlock(&Giant);
|
||||
if (rv == KERN_SUCCESS) {
|
||||
stp = tsb_kvtostte(va);
|
||||
tte = stp->st_tte;
|
||||
if (type == T_IMMU_MISS)
|
||||
tlb_store(TLB_DTLB | TLB_ITLB, va, ctx, tte);
|
||||
else
|
||||
type = VM_PROT_READ;
|
||||
|
||||
tlb_store(TLB_DTLB, va, ctx, tte);
|
||||
}
|
||||
} else if (tf->tf_type & T_KERNEL &&
|
||||
(p->p_intr_nesting_level != 0 || pcb->pcb_onfault == NULL)) {
|
||||
rv = KERN_FAILURE;
|
||||
} else {
|
||||
mtx_lock(&Giant);
|
||||
vm = p->p_vmspace;
|
||||
pm = &vm->vm_pmap;
|
||||
stp = tsb_stte_lookup(pm, va);
|
||||
if (stp == NULL || type == T_DMMU_PROT) {
|
||||
/*
|
||||
* Keep the process from being swapped out at this critical
|
||||
* time.
|
||||
* Keep the process from being swapped out at this
|
||||
* critical time.
|
||||
*/
|
||||
PROC_LOCK(p);
|
||||
++p->p_lock;
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
/*
|
||||
* Grow the stack if necessary. vm_map_growstack only fails
|
||||
* if the va falls into a growable stack region and the stack
|
||||
* growth fails. If it succeeds, or the va was not within a
|
||||
* growable stack region, fault in the user page.
|
||||
* Grow the stack if necessary. vm_map_growstack only
|
||||
* fails if the va falls into a growable stack region
|
||||
* and the stack growth fails. If it succeeds, or the
|
||||
* va was not within a growable stack region, fault in
|
||||
* the user page.
|
||||
*/
|
||||
if (vm_map_growstack(p, va) != KERN_SUCCESS)
|
||||
rv = KERN_FAILURE;
|
||||
else
|
||||
rv = vm_fault(&vm->vm_map, va, type, VM_FAULT_NORMAL);
|
||||
rv = vm_fault(&vm->vm_map, va, prot, flags);
|
||||
|
||||
/*
|
||||
* Now the process can be swapped again.
|
||||
@ -312,42 +388,26 @@ trap_mmu_fault(struct proc *p, struct trapframe *tf)
|
||||
PROC_LOCK(p);
|
||||
--p->p_lock;
|
||||
PROC_UNLOCK(p);
|
||||
break;
|
||||
case T_DMMU_PROT | T_KERNEL:
|
||||
/*
|
||||
* Protection faults should not happen on kernel memory.
|
||||
*/
|
||||
if (TLB_TAR_CTX(mf->mf_tar) == TLB_CTX_KERNEL)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Don't allow kernel mode faults on user memory unless
|
||||
* pcb_onfault is set.
|
||||
*/
|
||||
if (PCPU_GET(curpcb)->pcb_onfault == NULL)
|
||||
break;
|
||||
/* Fallthrough. */
|
||||
case T_DMMU_PROT:
|
||||
/*
|
||||
* Only look in the tsb. Write access to an unmapped page
|
||||
* causes a miss first, so the page must have already been
|
||||
* brought in by vm_fault, we just need to find the tte and
|
||||
* update the write bit. XXX How do we tell them vm system
|
||||
* that we are now writing?
|
||||
*/
|
||||
vm = p->p_vmspace;
|
||||
if (tsb_miss(&vm->vm_pmap, tf->tf_type, mf) == 0)
|
||||
rv = KERN_SUCCESS;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
} else if (type == T_IMMU_MISS) {
|
||||
if ((stp->st_tte.tte_data & TD_EXEC) == 0)
|
||||
rv = KERN_FAILURE;
|
||||
else
|
||||
tlb_store(TLB_DTLB | TLB_ITLB, va, ctx,
|
||||
stp->st_tte);
|
||||
} else if (type == T_DMMU_PROT &&
|
||||
(stp->st_tte.tte_data & TD_SW) == 0) {
|
||||
rv = KERN_FAILURE;
|
||||
} else {
|
||||
tlb_store(TLB_DTLB, va, ctx, stp->st_tte);
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
CTR3(KTR_TRAP, "trap_mmu_fault: return p=%p va=%#lx rv=%d", p, va, rv);
|
||||
if (rv == KERN_SUCCESS)
|
||||
return (0);
|
||||
if (tf->tf_type & T_KERNEL) {
|
||||
if (PCPU_GET(curpcb)->pcb_onfault != NULL &&
|
||||
TLB_TAR_CTX(mf->mf_tar) != TLB_CTX_KERNEL) {
|
||||
tf->tf_tpc = (u_long)PCPU_GET(curpcb)->pcb_onfault;
|
||||
if (pcb->pcb_onfault != NULL && ctx != TLB_CTX_KERNEL) {
|
||||
tf->tf_tpc = (u_long)pcb->pcb_onfault;
|
||||
tf->tf_tnpc = tf->tf_tpc + 4;
|
||||
return (0);
|
||||
}
|
||||
@ -374,14 +434,13 @@ syscall(struct proc *p, struct trapframe *tf, u_int sticks)
|
||||
int narg;
|
||||
int error;
|
||||
register_t args[8];
|
||||
void *argp;
|
||||
register_t *argp;
|
||||
|
||||
narg = 0;
|
||||
error = 0;
|
||||
reg = 0;
|
||||
regcnt = REG_MAXARGS;
|
||||
code = tf->tf_global[1];
|
||||
atomic_add_int(&cnt.v_syscall, 1);
|
||||
/*
|
||||
* For syscalls, we don't want to retry the faulting instruction
|
||||
* (usually), instead we need to advance one instruction.
|
||||
@ -426,6 +485,9 @@ syscall(struct proc *p, struct trapframe *tf, u_int sticks)
|
||||
goto bad;
|
||||
}
|
||||
|
||||
CTR5(KTR_SYSC, "syscall: p=%p %s(%#lx, %#lx, %#lx)", p,
|
||||
syscallnames[code], argp[0], argp[1], argp[2]);
|
||||
|
||||
/*
|
||||
* Try to run the syscall without the MP lock if the syscall
|
||||
* is MP safe.
|
||||
@ -449,6 +511,9 @@ syscall(struct proc *p, struct trapframe *tf, u_int sticks)
|
||||
|
||||
error = (*callp->sy_call)(p, argp);
|
||||
|
||||
CTR5(KTR_SYSC, "syscall: p=%p error=%d %s return %#lx %#lx ", p,
|
||||
error, syscallnames[code], p->p_retval[0], p->p_retval[1]);
|
||||
|
||||
/*
|
||||
* MP SAFE (we may or may not have the MP lock at this point)
|
||||
*/
|
||||
|
@ -91,47 +91,6 @@ tsb_get_bucket(pmap_t pm, u_int level, vm_offset_t va, int allocate)
|
||||
return (bucket);
|
||||
}
|
||||
|
||||
int
|
||||
tsb_miss(pmap_t pm, u_int type, struct mmuframe *mf)
|
||||
{
|
||||
struct stte *stp;
|
||||
struct tte tte;
|
||||
vm_offset_t va;
|
||||
u_long ctx;
|
||||
|
||||
va = TLB_TAR_VA(mf->mf_tar);
|
||||
ctx = TLB_TAR_CTX(mf->mf_tar);
|
||||
if ((stp = tsb_stte_lookup(pm, va)) == NULL)
|
||||
return (EFAULT);
|
||||
switch (type) {
|
||||
case T_IMMU_MISS:
|
||||
if ((stp->st_tte.tte_data & TD_EXEC) == 0)
|
||||
return (EFAULT);
|
||||
stp->st_tte.tte_data |= TD_REF;
|
||||
tlb_store(TLB_DTLB | TLB_ITLB, va, ctx, stp->st_tte);
|
||||
break;
|
||||
case T_DMMU_MISS:
|
||||
case T_DMMU_MISS | T_KERNEL:
|
||||
stp->st_tte.tte_data |= TD_REF;
|
||||
tte = stp->st_tte;
|
||||
if ((tte.tte_data & TD_MOD) == 0)
|
||||
tte.tte_data &= ~TD_W;
|
||||
tlb_store(TLB_DTLB, va, ctx, tte);
|
||||
break;
|
||||
case T_DMMU_PROT:
|
||||
case T_DMMU_PROT | T_KERNEL:
|
||||
if ((stp->st_tte.tte_data & TD_W) == 0)
|
||||
return (EFAULT);
|
||||
tlb_page_demap(TLB_DTLB, ctx, va);
|
||||
stp->st_tte.tte_data |= TD_MOD;
|
||||
tlb_store(TLB_DTLB, va, ctx, stp->st_tte);
|
||||
break;
|
||||
default:
|
||||
return (EFAULT);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
struct tte
|
||||
tsb_page_alloc(pmap_t pm, vm_offset_t va)
|
||||
{
|
||||
@ -145,7 +104,7 @@ tsb_page_alloc(pmap_t pm, vm_offset_t va)
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
|
||||
tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) | TD_L |
|
||||
TD_MOD | TD_REF | TD_CP | TD_P | TD_W;
|
||||
TD_CP | TD_P | TD_W;
|
||||
return (tte);
|
||||
}
|
||||
|
||||
@ -178,7 +137,7 @@ tsb_page_init(void *va, int level)
|
||||
for (i = 0; i < PAGE_SIZE; i += inc) {
|
||||
p = (caddr_t)va + i;
|
||||
stp = (struct stte *)p + bsize - 1;
|
||||
stp->st_tte.tte_data = TD_TSB;
|
||||
stp->st_tte.tte_data = TD_L;
|
||||
}
|
||||
}
|
||||
|
||||
@ -186,22 +145,48 @@ struct stte *
|
||||
tsb_stte_lookup(pmap_t pm, vm_offset_t va)
|
||||
{
|
||||
struct stte *bucket;
|
||||
struct stte *stp;
|
||||
u_int level;
|
||||
u_int i;
|
||||
|
||||
if (pm == kernel_pmap)
|
||||
return tsb_kvtostte(va);
|
||||
CTR5(KTR_CT1,
|
||||
"tsb_stte_lookup: ws=%#lx ow=%#lx cr=%#lx cs=%#lx cwp=%#lx",
|
||||
rdpr(wstate), rdpr(otherwin), rdpr(canrestore), rdpr(cansave),
|
||||
rdpr(cwp));
|
||||
|
||||
if (pm == kernel_pmap) {
|
||||
stp = tsb_kvtostte(va);
|
||||
CTR3(KTR_CT1,
|
||||
"tsb_stte_lookup: kernel va=%#lx stp=%#lx data=%#lx",
|
||||
va, stp, stp->st_tte.tte_data);
|
||||
if (tte_match(stp->st_tte, va)) {
|
||||
CTR1(KTR_CT1, "tsb_stte_lookup: match va=%#lx", va);
|
||||
return (stp);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
CTR2(KTR_CT1, "tsb_stte_lookup: ctx=%#lx va=%#lx", pm->pm_context, va);
|
||||
|
||||
va = trunc_page(va);
|
||||
for (level = 0; level < TSB_DEPTH; level++) {
|
||||
bucket = tsb_get_bucket(pm, level, va, 0);
|
||||
CTR2(KTR_CT1, "tsb_stte_lookup: lvl=%d b=%p", level, bucket);
|
||||
if (bucket == NULL)
|
||||
break;
|
||||
for (i = 0; i < tsb_bucket_size(level); i++) {
|
||||
if (tte_match(bucket[i].st_tte, va))
|
||||
return (&bucket[i]);
|
||||
if (tte_match(bucket[i].st_tte, va)) {
|
||||
stp = &bucket[i];
|
||||
CTR2(KTR_CT1,
|
||||
"tsb_stte_lookup: match va=%#lx stp=%p",
|
||||
va, stp);
|
||||
return (stp);
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
CTR2(KTR_CT1, "tsb_stte_lookup: miss ctx=%#lx va=%#lx",
|
||||
pm->pm_context, va);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -222,8 +207,9 @@ tsb_stte_promote(pmap_t pm, vm_offset_t va, struct stte *stp)
|
||||
if ((bucket[i].st_tte.tte_data & TD_V) == 0 ||
|
||||
(bucket[i].st_tte.tte_data & (TD_L | TD_REF)) == 0) {
|
||||
tte = stp->st_tte;
|
||||
stp->st_tte.tte_data = 0;
|
||||
if (tte.tte_data & TD_MNG)
|
||||
pv_remove_virt(stp);
|
||||
stp->st_tte.tte_data = 0;
|
||||
return (tsb_tte_enter(pm, va, tte));
|
||||
}
|
||||
} while ((i = (i + 1) & bmask) != b0);
|
||||
@ -236,8 +222,10 @@ tsb_stte_remove(struct stte *stp)
|
||||
struct tte tte;
|
||||
|
||||
tte = stp->st_tte;
|
||||
if (tte.tte_data & TD_V) {
|
||||
tte_invalidate(&stp->st_tte);
|
||||
tsb_tte_local_remove(&tte);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -264,9 +252,24 @@ tsb_tte_enter(pmap_t pm, vm_offset_t va, struct tte tte)
|
||||
int b0;
|
||||
int i;
|
||||
|
||||
CTR3(KTR_CT1, "tsb_tte_enter: ctx=%#lx va=%#lx data=%#lx",
|
||||
pm->pm_context, va, tte.tte_data);
|
||||
|
||||
if (pm == kernel_pmap) {
|
||||
stp = tsb_kvtostte(va);
|
||||
if (stp->st_tte.tte_data & TD_MNG)
|
||||
pv_remove_virt(stp);
|
||||
stp->st_tte = tte;
|
||||
if (tte.tte_data & TD_MNG)
|
||||
pv_insert(pm, TD_PA(tte.tte_data), va, stp);
|
||||
return (stp);
|
||||
}
|
||||
|
||||
nstp = NULL;
|
||||
for (level = 0; level < TSB_DEPTH; level++) {
|
||||
bucket = tsb_get_bucket(pm, level, va, 1);
|
||||
CTR3(KTR_CT1, "tsb_tte_enter: va=%#lx bucket=%p level=%d",
|
||||
va, bucket, level);
|
||||
|
||||
stp = NULL;
|
||||
rstp = NULL;
|
||||
@ -274,7 +277,7 @@ tsb_tte_enter(pmap_t pm, vm_offset_t va, struct tte tte)
|
||||
b0 = rd(tick) & bmask;
|
||||
i = b0;
|
||||
do {
|
||||
if ((bucket[i].st_tte.tte_data & (TD_TSB | TD_L)) != 0)
|
||||
if ((bucket[i].st_tte.tte_data & TD_L) != 0)
|
||||
continue;
|
||||
if ((bucket[i].st_tte.tte_data & TD_V) == 0) {
|
||||
stp = &bucket[i];
|
||||
@ -296,15 +299,17 @@ tsb_tte_enter(pmap_t pm, vm_offset_t va, struct tte tte)
|
||||
nstp = stp;
|
||||
|
||||
otte = stp->st_tte;
|
||||
if (otte.tte_data & TD_V)
|
||||
if (otte.tte_data & TD_V && otte.tte_data & TD_MNG)
|
||||
pv_remove_virt(stp);
|
||||
stp->st_tte = tte;
|
||||
if (tte.tte_data & TD_MNG)
|
||||
pv_insert(pm, TD_PA(tte.tte_data), va, stp);
|
||||
if ((otte.tte_data & TD_V) == 0)
|
||||
break;
|
||||
tte = otte;
|
||||
va = tte_get_va(tte);
|
||||
}
|
||||
CTR1(KTR_CT1, "tsb_tte_enter: return stp=%p", nstp);
|
||||
if (level >= TSB_DEPTH)
|
||||
panic("tsb_enter_tte: TSB full");
|
||||
return (nstp);
|
||||
|
Loading…
Reference in New Issue
Block a user