mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-19 10:53:58 +00:00
When injecting a page fault into the guest also update the guest's %cr2 to
indicate the faulting linear address. If the guest PML4 entry has the PG_PS bit set then inject a page fault into the guest with the PGEX_RSV bit set in the error_code. Get rid of redundant checks for the PG_RW violations when walking the page tables.
This commit is contained in:
parent
11d47032ee
commit
37a723a5b3
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=266626
@ -237,7 +237,7 @@ int vm_exception_pending(struct vm *vm, int vcpuid, struct vm_exception *vme);
|
||||
|
||||
void vm_inject_gp(struct vm *vm, int vcpuid); /* general protection fault */
|
||||
void vm_inject_ud(struct vm *vm, int vcpuid); /* undefined instruction fault */
|
||||
void vm_inject_pf(struct vm *vm, int vcpuid, int error_code); /* page fault */
|
||||
void vm_inject_pf(struct vm *vm, int vcpuid, int error_code, uint64_t cr2);
|
||||
|
||||
enum vm_reg_name vm_segment_name(int seg_encoding);
|
||||
|
||||
@ -284,6 +284,7 @@ enum vm_reg_name {
|
||||
VM_REG_GUEST_IDTR,
|
||||
VM_REG_GUEST_GDTR,
|
||||
VM_REG_GUEST_EFER,
|
||||
VM_REG_GUEST_CR2,
|
||||
VM_REG_LAST
|
||||
};
|
||||
|
||||
|
@ -2383,6 +2383,8 @@ vmxctx_regptr(struct vmxctx *vmxctx, int reg)
|
||||
return (&vmxctx->guest_r14);
|
||||
case VM_REG_GUEST_R15:
|
||||
return (&vmxctx->guest_r15);
|
||||
case VM_REG_GUEST_CR2:
|
||||
return (&vmxctx->guest_cr2);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1441,13 +1441,20 @@ vm_inject_fault(struct vm *vm, int vcpuid, struct vm_exception *exception)
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_pf(struct vm *vm, int vcpuid, int error_code)
|
||||
vm_inject_pf(struct vm *vm, int vcpuid, int error_code, uint64_t cr2)
|
||||
{
|
||||
struct vm_exception pf = {
|
||||
.vector = IDT_PF,
|
||||
.error_code_valid = 1,
|
||||
.error_code = error_code
|
||||
};
|
||||
int error;
|
||||
|
||||
VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
|
||||
error_code, cr2);
|
||||
|
||||
error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
|
||||
KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
|
||||
|
||||
vm_inject_fault(vm, vcpuid, &pf);
|
||||
}
|
||||
|
@ -599,7 +599,7 @@ vie_init(struct vie *vie)
|
||||
}
|
||||
|
||||
static int
|
||||
pf_error_code(int usermode, int prot, uint64_t pte)
|
||||
pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
|
||||
{
|
||||
int error_code = 0;
|
||||
|
||||
@ -609,6 +609,8 @@ pf_error_code(int usermode, int prot, uint64_t pte)
|
||||
error_code |= PGEX_W;
|
||||
if (usermode)
|
||||
error_code |= PGEX_U;
|
||||
if (rsvd)
|
||||
error_code |= PGEX_RSV;
|
||||
if (prot & VM_PROT_EXECUTE)
|
||||
error_code |= PGEX_I;
|
||||
|
||||
@ -679,14 +681,12 @@ vmm_gla2gpa(struct vm *vm, int vcpuid, uint64_t gla, uint64_t ptpphys,
|
||||
if ((pte32 & PG_V) == 0 ||
|
||||
(usermode && (pte32 & PG_U) == 0) ||
|
||||
(writable && (pte32 & PG_RW) == 0)) {
|
||||
pfcode = pf_error_code(usermode, prot, pte32);
|
||||
vm_inject_pf(vm, vcpuid, pfcode);
|
||||
pfcode = pf_error_code(usermode, prot, 0,
|
||||
pte32);
|
||||
vm_inject_pf(vm, vcpuid, pfcode, gla);
|
||||
goto pagefault;
|
||||
}
|
||||
|
||||
if (writable && (pte32 & PG_RW) == 0)
|
||||
goto error;
|
||||
|
||||
/*
|
||||
* Emulate the x86 MMU's management of the accessed
|
||||
* and dirty flags. While the accessed flag is set
|
||||
@ -735,8 +735,8 @@ vmm_gla2gpa(struct vm *vm, int vcpuid, uint64_t gla, uint64_t ptpphys,
|
||||
pte = ptpbase[ptpindex];
|
||||
|
||||
if ((pte & PG_V) == 0) {
|
||||
pfcode = pf_error_code(usermode, prot, pte);
|
||||
vm_inject_pf(vm, vcpuid, pfcode);
|
||||
pfcode = pf_error_code(usermode, prot, 0, pte);
|
||||
vm_inject_pf(vm, vcpuid, pfcode, gla);
|
||||
goto pagefault;
|
||||
}
|
||||
|
||||
@ -762,14 +762,11 @@ vmm_gla2gpa(struct vm *vm, int vcpuid, uint64_t gla, uint64_t ptpphys,
|
||||
if ((pte & PG_V) == 0 ||
|
||||
(usermode && (pte & PG_U) == 0) ||
|
||||
(writable && (pte & PG_RW) == 0)) {
|
||||
pfcode = pf_error_code(usermode, prot, pte);
|
||||
vm_inject_pf(vm, vcpuid, pfcode);
|
||||
pfcode = pf_error_code(usermode, prot, 0, pte);
|
||||
vm_inject_pf(vm, vcpuid, pfcode, gla);
|
||||
goto pagefault;
|
||||
}
|
||||
|
||||
if (writable && (pte & PG_RW) == 0)
|
||||
goto error;
|
||||
|
||||
/* Set the accessed bit in the page table entry */
|
||||
if ((pte & PG_A) == 0) {
|
||||
if (atomic_cmpset_64(&ptpbase[ptpindex],
|
||||
@ -779,10 +776,12 @@ vmm_gla2gpa(struct vm *vm, int vcpuid, uint64_t gla, uint64_t ptpphys,
|
||||
}
|
||||
|
||||
if (nlevels > 0 && (pte & PG_PS) != 0) {
|
||||
if (pgsize > 1 * GB)
|
||||
goto error;
|
||||
else
|
||||
break;
|
||||
if (pgsize > 1 * GB) {
|
||||
pfcode = pf_error_code(usermode, prot, 1, pte);
|
||||
vm_inject_pf(vm, vcpuid, pfcode, gla);
|
||||
goto pagefault;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
ptpphys = pte;
|
||||
|
Loading…
Reference in New Issue
Block a user