mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-15 10:17:20 +00:00
Restructure the MSR handling so it is entirely handled by processor-specific
code. There are only a handful of MSRs common between the two so there isn't too much duplicate functionality. The VT-x code has the following types of MSRs: - MSRs that are unconditionally saved/restored on every guest/host context switch (e.g., MSR_GSBASE). - MSRs that are restored to guest values on entry to vmx_run() and saved before returning. This is an optimization for MSRs that are not used in host kernel context (e.g., MSR_KGSBASE). - MSRs that are emulated and every access by the guest causes a trap into the hypervisor (e.g., MSR_IA32_MISC_ENABLE). Reviewed by: grehan
This commit is contained in:
parent
17bb5fd106
commit
c3498942a5
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=271888
@ -195,7 +195,6 @@ void vm_nmi_clear(struct vm *vm, int vcpuid);
|
||||
int vm_inject_extint(struct vm *vm, int vcpu);
|
||||
int vm_extint_pending(struct vm *vm, int vcpuid);
|
||||
void vm_extint_clear(struct vm *vm, int vcpuid);
|
||||
uint64_t *vm_guest_msrs(struct vm *vm, int cpu);
|
||||
struct vlapic *vm_lapic(struct vm *vm, int cpu);
|
||||
struct vioapic *vm_ioapic(struct vm *vm);
|
||||
struct vhpet *vm_hpet(struct vm *vm);
|
||||
|
@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include "vmx_cpufunc.h"
|
||||
#include "vmm_ipi.h"
|
||||
#include "vmx_msr.h"
|
||||
#include "ept.h"
|
||||
|
||||
#define EPT_SUPPORTS_EXEC_ONLY(cap) ((cap) & (1UL << 0))
|
||||
|
@ -54,6 +54,10 @@ int vmcs_getdesc(struct vmcs *vmcs, int running, int ident,
|
||||
int vmcs_setdesc(struct vmcs *vmcs, int running, int ident,
|
||||
struct seg_desc *desc);
|
||||
|
||||
/*
|
||||
* Avoid header pollution caused by inline use of 'vtophys()' in vmx_cpufunc.h
|
||||
*/
|
||||
#ifdef _VMX_CPUFUNC_H_
|
||||
static __inline uint64_t
|
||||
vmcs_read(uint32_t encoding)
|
||||
{
|
||||
@ -73,6 +77,7 @@ vmcs_write(uint32_t encoding, uint64_t val)
|
||||
error = vmwrite(encoding, val);
|
||||
KASSERT(error == 0, ("vmcs_write(%u) error %d", encoding, error));
|
||||
}
|
||||
#endif /* _VMX_CPUFUNC_H_ */
|
||||
|
||||
#define vmexit_instruction_length() vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH)
|
||||
#define vmcs_guest_rip() vmcs_read(VMCS_GUEST_RIP)
|
||||
|
@ -52,20 +52,20 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/vmm.h>
|
||||
#include <machine/vmm_dev.h>
|
||||
#include <machine/vmm_instruction_emul.h>
|
||||
#include "vmm_lapic.h"
|
||||
#include "vmm_host.h"
|
||||
#include "vmm_ioport.h"
|
||||
#include "vmm_ipi.h"
|
||||
#include "vmm_msr.h"
|
||||
#include "vmm_ktr.h"
|
||||
#include "vmm_stat.h"
|
||||
#include "vatpic.h"
|
||||
#include "vlapic.h"
|
||||
#include "vlapic_priv.h"
|
||||
|
||||
#include "vmx_msr.h"
|
||||
#include "ept.h"
|
||||
#include "vmx_cpufunc.h"
|
||||
#include "vmx.h"
|
||||
#include "vmx_msr.h"
|
||||
#include "x86.h"
|
||||
#include "vmx_controls.h"
|
||||
|
||||
@ -116,12 +116,6 @@ __FBSDID("$FreeBSD$");
|
||||
VM_ENTRY_INTO_SMM | \
|
||||
VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
|
||||
|
||||
#define guest_msr_rw(vmx, msr) \
|
||||
msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
|
||||
|
||||
#define guest_msr_ro(vmx, msr) \
|
||||
msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ)
|
||||
|
||||
#define HANDLED 1
|
||||
#define UNHANDLED 0
|
||||
|
||||
@ -208,6 +202,7 @@ SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
|
||||
|
||||
static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
|
||||
static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
|
||||
static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
|
||||
static void vmx_inject_pir(struct vlapic *vlapic);
|
||||
|
||||
#ifdef KTR
|
||||
@ -474,22 +469,6 @@ vpid_init(void)
|
||||
vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
msr_save_area_init(struct msr_entry *g_area, int *g_count)
|
||||
{
|
||||
int cnt;
|
||||
|
||||
static struct msr_entry guest_msrs[] = {
|
||||
{ MSR_KGSBASE, 0, 0 },
|
||||
};
|
||||
|
||||
cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]);
|
||||
if (cnt > GUEST_MSR_MAX_ENTRIES)
|
||||
panic("guest msr save area overrun");
|
||||
bcopy(guest_msrs, g_area, sizeof(guest_msrs));
|
||||
*g_count = cnt;
|
||||
}
|
||||
|
||||
static void
|
||||
vmx_disable(void *arg __unused)
|
||||
{
|
||||
@ -655,7 +634,6 @@ vmx_init(int ipinum)
|
||||
} else {
|
||||
if (bootverbose)
|
||||
printf("vmm: PAT MSR access not supported\n");
|
||||
guest_msr_valid(MSR_PAT);
|
||||
vmx_patmsr = 0;
|
||||
}
|
||||
}
|
||||
@ -800,6 +778,8 @@ vmx_init(int ipinum)
|
||||
|
||||
vpid_init();
|
||||
|
||||
vmx_msr_init();
|
||||
|
||||
/* enable VMX operation */
|
||||
smp_rendezvous(NULL, vmx_enable, NULL, NULL);
|
||||
|
||||
@ -869,7 +849,7 @@ static void *
|
||||
vmx_vminit(struct vm *vm, pmap_t pmap)
|
||||
{
|
||||
uint16_t vpid[VM_MAXCPU];
|
||||
int i, error, guest_msr_count;
|
||||
int i, error;
|
||||
struct vmx *vmx;
|
||||
struct vmcs *vmcs;
|
||||
|
||||
@ -958,6 +938,8 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
|
||||
error, i);
|
||||
}
|
||||
|
||||
vmx_msr_guest_init(vmx, i);
|
||||
|
||||
error = vmcs_init(vmcs);
|
||||
KASSERT(error == 0, ("vmcs_init error %d", error));
|
||||
|
||||
@ -996,13 +978,6 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
|
||||
vmx->state[i].lastcpu = NOCPU;
|
||||
vmx->state[i].vpid = vpid[i];
|
||||
|
||||
msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
|
||||
|
||||
error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]),
|
||||
guest_msr_count);
|
||||
if (error != 0)
|
||||
panic("vmcs_set_msr_save error %d", error);
|
||||
|
||||
/*
|
||||
* Set up the CR0/4 shadows, and init the read shadow
|
||||
* to the power-on register value from the Intel Sys Arch.
|
||||
@ -2077,6 +2052,46 @@ vmx_task_switch_reason(uint64_t qual)
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (lapic_msr(num))
|
||||
error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
|
||||
else
|
||||
error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
static int
|
||||
emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
|
||||
{
|
||||
struct vmxctx *vmxctx;
|
||||
uint64_t result;
|
||||
uint32_t eax, edx;
|
||||
int error;
|
||||
|
||||
if (lapic_msr(num))
|
||||
error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
|
||||
else
|
||||
error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
|
||||
|
||||
if (error == 0) {
|
||||
eax = result;
|
||||
vmxctx = &vmx->ctx[vcpuid];
|
||||
error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
|
||||
KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
|
||||
|
||||
edx = result >> 32;
|
||||
error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
|
||||
KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
|
||||
}
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
||||
{
|
||||
@ -2215,7 +2230,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
||||
retu = false;
|
||||
ecx = vmxctx->guest_rcx;
|
||||
VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
|
||||
error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu);
|
||||
error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
|
||||
if (error) {
|
||||
vmexit->exitcode = VM_EXITCODE_RDMSR;
|
||||
vmexit->u.msr.code = ecx;
|
||||
@ -2224,7 +2239,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
||||
} else {
|
||||
/* Return to userspace with a valid exitcode */
|
||||
KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
|
||||
("emulate_wrmsr retu with bogus exitcode"));
|
||||
("emulate_rdmsr retu with bogus exitcode"));
|
||||
}
|
||||
break;
|
||||
case EXIT_REASON_WRMSR:
|
||||
@ -2235,7 +2250,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
||||
edx = vmxctx->guest_rdx;
|
||||
VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
|
||||
ecx, (uint64_t)edx << 32 | eax);
|
||||
error = emulate_wrmsr(vmx->vm, vcpu, ecx,
|
||||
error = emulate_wrmsr(vmx, vcpu, ecx,
|
||||
(uint64_t)edx << 32 | eax, &retu);
|
||||
if (error) {
|
||||
vmexit->exitcode = VM_EXITCODE_WRMSR;
|
||||
@ -2523,6 +2538,8 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
|
||||
KASSERT(vmxctx->pmap == pmap,
|
||||
("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
|
||||
|
||||
vmx_msr_guest_enter(vmx, vcpu);
|
||||
|
||||
VMPTRLD(vmcs);
|
||||
|
||||
/*
|
||||
@ -2624,6 +2641,8 @@ vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
|
||||
vmexit->exitcode);
|
||||
|
||||
VMCLEAR(vmcs);
|
||||
vmx_msr_guest_exit(vmx, vcpu);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -33,8 +33,6 @@
|
||||
|
||||
struct pmap;
|
||||
|
||||
#define GUEST_MSR_MAX_ENTRIES 64 /* arbitrary */
|
||||
|
||||
struct vmxctx {
|
||||
register_t guest_rdi; /* Guest state */
|
||||
register_t guest_rsi;
|
||||
@ -97,13 +95,23 @@ struct pir_desc {
|
||||
} __aligned(64);
|
||||
CTASSERT(sizeof(struct pir_desc) == 64);
|
||||
|
||||
/* Index into the 'guest_msrs[]' array */
|
||||
enum {
|
||||
IDX_MSR_LSTAR,
|
||||
IDX_MSR_CSTAR,
|
||||
IDX_MSR_STAR,
|
||||
IDX_MSR_SF_MASK,
|
||||
IDX_MSR_KGSBASE,
|
||||
GUEST_MSR_NUM /* must be the last enumeration */
|
||||
};
|
||||
|
||||
/* virtual machine softc */
|
||||
struct vmx {
|
||||
struct vmcs vmcs[VM_MAXCPU]; /* one vmcs per virtual cpu */
|
||||
struct apic_page apic_page[VM_MAXCPU]; /* one apic page per vcpu */
|
||||
char msr_bitmap[PAGE_SIZE];
|
||||
struct pir_desc pir_desc[VM_MAXCPU];
|
||||
struct msr_entry guest_msrs[VM_MAXCPU][GUEST_MSR_MAX_ENTRIES];
|
||||
uint64_t guest_msrs[VM_MAXCPU][GUEST_MSR_NUM];
|
||||
struct vmxctx ctx[VM_MAXCPU];
|
||||
struct vmxcap cap[VM_MAXCPU];
|
||||
struct vmxstate state[VM_MAXCPU];
|
||||
@ -113,7 +121,6 @@ struct vmx {
|
||||
};
|
||||
CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
|
||||
CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
|
||||
CTASSERT((offsetof(struct vmx, guest_msrs) & 15) == 0);
|
||||
CTASSERT((offsetof(struct vmx, pir_desc[0]) & 63) == 0);
|
||||
|
||||
#define VMX_GUEST_VMEXIT 0
|
||||
|
@ -31,10 +31,13 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/cpuset.h>
|
||||
|
||||
#include <machine/cpufunc.h>
|
||||
#include <machine/specialreg.h>
|
||||
#include <machine/vmm.h>
|
||||
|
||||
#include "vmx.h"
|
||||
#include "vmx_msr.h"
|
||||
|
||||
static boolean_t
|
||||
@ -171,3 +174,115 @@ msr_bitmap_change_access(char *bitmap, u_int msr, int access)
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static uint64_t misc_enable;
|
||||
static uint64_t host_msrs[GUEST_MSR_NUM];
|
||||
|
||||
void
|
||||
vmx_msr_init(void)
|
||||
{
|
||||
/*
|
||||
* It is safe to cache the values of the following MSRs because
|
||||
* they don't change based on curcpu, curproc or curthread.
|
||||
*/
|
||||
host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
|
||||
host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
|
||||
host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
|
||||
host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
|
||||
|
||||
/*
|
||||
* Initialize emulated MSRs
|
||||
*/
|
||||
misc_enable = rdmsr(MSR_IA32_MISC_ENABLE);
|
||||
/*
|
||||
* Set mandatory bits
|
||||
* 11: branch trace disabled
|
||||
* 12: PEBS unavailable
|
||||
* Clear unsupported features
|
||||
* 16: SpeedStep enable
|
||||
* 18: enable MONITOR FSM
|
||||
*/
|
||||
misc_enable |= (1 << 12) | (1 << 11);
|
||||
misc_enable &= ~((1 << 18) | (1 << 16));
|
||||
}
|
||||
|
||||
void
|
||||
vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
|
||||
{
|
||||
/*
|
||||
* The permissions bitmap is shared between all vcpus so initialize it
|
||||
* once when initializing the vBSP.
|
||||
*/
|
||||
if (vcpuid == 0) {
|
||||
guest_msr_rw(vmx, MSR_LSTAR);
|
||||
guest_msr_rw(vmx, MSR_CSTAR);
|
||||
guest_msr_rw(vmx, MSR_STAR);
|
||||
guest_msr_rw(vmx, MSR_SF_MASK);
|
||||
guest_msr_rw(vmx, MSR_KGSBASE);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
vmx_msr_guest_enter(struct vmx *vmx, int vcpuid)
|
||||
{
|
||||
uint64_t *guest_msrs = vmx->guest_msrs[vcpuid];
|
||||
|
||||
/* Save host MSRs (if any) and restore guest MSRs */
|
||||
wrmsr(MSR_LSTAR, guest_msrs[IDX_MSR_LSTAR]);
|
||||
wrmsr(MSR_CSTAR, guest_msrs[IDX_MSR_CSTAR]);
|
||||
wrmsr(MSR_STAR, guest_msrs[IDX_MSR_STAR]);
|
||||
wrmsr(MSR_SF_MASK, guest_msrs[IDX_MSR_SF_MASK]);
|
||||
wrmsr(MSR_KGSBASE, guest_msrs[IDX_MSR_KGSBASE]);
|
||||
}
|
||||
|
||||
void
|
||||
vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
|
||||
{
|
||||
uint64_t *guest_msrs = vmx->guest_msrs[vcpuid];
|
||||
|
||||
/* Save guest MSRs */
|
||||
guest_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
|
||||
guest_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
|
||||
guest_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
|
||||
guest_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
|
||||
guest_msrs[IDX_MSR_KGSBASE] = rdmsr(MSR_KGSBASE);
|
||||
|
||||
/* Restore host MSRs */
|
||||
wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
|
||||
wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
|
||||
wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
|
||||
wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
|
||||
|
||||
/* MSR_KGSBASE will be restored on the way back to userspace */
|
||||
}
|
||||
|
||||
int
|
||||
vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
switch (num) {
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
*val = misc_enable;
|
||||
break;
|
||||
default:
|
||||
error = EINVAL;
|
||||
break;
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
switch (num) {
|
||||
default:
|
||||
error = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
@ -29,6 +29,15 @@
|
||||
#ifndef _VMX_MSR_H_
|
||||
#define _VMX_MSR_H_
|
||||
|
||||
struct vmx;
|
||||
|
||||
void vmx_msr_init(void);
|
||||
void vmx_msr_guest_init(struct vmx *vmx, int vcpuid);
|
||||
void vmx_msr_guest_enter(struct vmx *vmx, int vcpuid);
|
||||
void vmx_msr_guest_exit(struct vmx *vmx, int vcpuid);
|
||||
int vmx_rdmsr(struct vmx *, int vcpuid, u_int num, uint64_t *val, bool *retu);
|
||||
int vmx_wrmsr(struct vmx *, int vcpuid, u_int num, uint64_t val, bool *retu);
|
||||
|
||||
uint32_t vmx_revision(void);
|
||||
|
||||
int vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
|
||||
@ -52,4 +61,10 @@ int vmx_set_ctlreg(int ctl_reg, int true_ctl_reg, uint32_t ones_mask,
|
||||
void msr_bitmap_initialize(char *bitmap);
|
||||
int msr_bitmap_change_access(char *bitmap, u_int msr, int access);
|
||||
|
||||
#define guest_msr_rw(vmx, msr) \
|
||||
msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
|
||||
|
||||
#define guest_msr_ro(vmx, msr) \
|
||||
msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ)
|
||||
|
||||
#endif
|
||||
|
@ -74,7 +74,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include "vhpet.h"
|
||||
#include "vioapic.h"
|
||||
#include "vlapic.h"
|
||||
#include "vmm_msr.h"
|
||||
#include "vmm_ipi.h"
|
||||
#include "vmm_stat.h"
|
||||
#include "vmm_lapic.h"
|
||||
@ -105,7 +104,6 @@ struct vcpu {
|
||||
struct savefpu *guestfpu; /* (a,i) guest fpu state */
|
||||
uint64_t guest_xcr0; /* (i) guest %xcr0 register */
|
||||
void *stats; /* (a,i) statistics */
|
||||
uint64_t guest_msrs[VMM_MSR_NUM]; /* (i) emulated MSRs */
|
||||
struct vm_exit exitinfo; /* (x) exit reason and collateral */
|
||||
};
|
||||
|
||||
@ -188,7 +186,6 @@ static struct vmm_ops *ops;
|
||||
#define fpu_stop_emulating() clts()
|
||||
|
||||
static MALLOC_DEFINE(M_VM, "vm", "vm");
|
||||
CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */
|
||||
|
||||
/* statistics */
|
||||
static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
|
||||
@ -249,7 +246,6 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
|
||||
vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
|
||||
fpu_save_area_reset(vcpu->guestfpu);
|
||||
vmm_stat_init(vcpu->stats);
|
||||
guest_msrs_init(vm, vcpu_id);
|
||||
}
|
||||
|
||||
struct vm_exit *
|
||||
@ -293,7 +289,6 @@ vmm_init(void)
|
||||
else
|
||||
return (ENXIO);
|
||||
|
||||
vmm_msr_init();
|
||||
vmm_resume_p = vmm_resume;
|
||||
|
||||
return (VMM_INIT(vmm_ipinum));
|
||||
@ -1440,7 +1435,6 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
|
||||
pcb = PCPU_GET(curpcb);
|
||||
set_pcb_flags(pcb, PCB_FULL_IRET);
|
||||
|
||||
restore_guest_msrs(vm, vcpuid);
|
||||
restore_guest_fpustate(vcpu);
|
||||
|
||||
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
|
||||
@ -1448,7 +1442,6 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
|
||||
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
|
||||
|
||||
save_guest_fpustate(vcpu);
|
||||
restore_host_msrs(vm, vcpuid);
|
||||
|
||||
vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
|
||||
|
||||
@ -1890,12 +1883,6 @@ vm_set_capability(struct vm *vm, int vcpu, int type, int val)
|
||||
return (VMSETCAP(vm->cookie, vcpu, type, val));
|
||||
}
|
||||
|
||||
uint64_t *
|
||||
vm_guest_msrs(struct vm *vm, int cpu)
|
||||
{
|
||||
return (vm->vcpu[cpu].guest_msrs);
|
||||
}
|
||||
|
||||
struct vlapic *
|
||||
vm_lapic(struct vm *vm, int cpu)
|
||||
{
|
||||
|
@ -1,273 +0,0 @@
|
||||
/*-
|
||||
* Copyright (c) 2011 NetApp, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/smp.h>
|
||||
|
||||
#include <machine/specialreg.h>
|
||||
|
||||
#include <machine/vmm.h>
|
||||
#include "vmm_lapic.h"
|
||||
#include "vmm_msr.h"
|
||||
|
||||
#define VMM_MSR_F_EMULATE 0x01
|
||||
#define VMM_MSR_F_READONLY 0x02
|
||||
#define VMM_MSR_F_INVALID 0x04 /* guest_msr_valid() can override this */
|
||||
|
||||
struct vmm_msr {
|
||||
int num;
|
||||
int flags;
|
||||
uint64_t hostval;
|
||||
};
|
||||
|
||||
static struct vmm_msr vmm_msr[] = {
|
||||
{ MSR_LSTAR, 0 },
|
||||
{ MSR_CSTAR, 0 },
|
||||
{ MSR_STAR, 0 },
|
||||
{ MSR_SF_MASK, 0 },
|
||||
{ MSR_PAT, VMM_MSR_F_EMULATE | VMM_MSR_F_INVALID },
|
||||
{ MSR_BIOS_SIGN,VMM_MSR_F_EMULATE },
|
||||
{ MSR_MCG_CAP, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
|
||||
{ MSR_IA32_PLATFORM_ID, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
|
||||
{ MSR_IA32_MISC_ENABLE, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
|
||||
};
|
||||
|
||||
#define vmm_msr_num (sizeof(vmm_msr) / sizeof(vmm_msr[0]))
|
||||
CTASSERT(VMM_MSR_NUM >= vmm_msr_num);
|
||||
|
||||
#define readonly_msr(idx) \
|
||||
((vmm_msr[(idx)].flags & VMM_MSR_F_READONLY) != 0)
|
||||
|
||||
#define emulated_msr(idx) \
|
||||
((vmm_msr[(idx)].flags & VMM_MSR_F_EMULATE) != 0)
|
||||
|
||||
#define invalid_msr(idx) \
|
||||
((vmm_msr[(idx)].flags & VMM_MSR_F_INVALID) != 0)
|
||||
|
||||
void
|
||||
vmm_msr_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vmm_msr_num; i++) {
|
||||
if (emulated_msr(i))
|
||||
continue;
|
||||
/*
|
||||
* XXX this assumes that the value of the host msr does not
|
||||
* change after we have cached it.
|
||||
*/
|
||||
vmm_msr[i].hostval = rdmsr(vmm_msr[i].num);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
guest_msrs_init(struct vm *vm, int cpu)
|
||||
{
|
||||
int i;
|
||||
uint64_t *guest_msrs, misc;
|
||||
|
||||
guest_msrs = vm_guest_msrs(vm, cpu);
|
||||
|
||||
for (i = 0; i < vmm_msr_num; i++) {
|
||||
switch (vmm_msr[i].num) {
|
||||
case MSR_LSTAR:
|
||||
case MSR_CSTAR:
|
||||
case MSR_STAR:
|
||||
case MSR_SF_MASK:
|
||||
case MSR_BIOS_SIGN:
|
||||
case MSR_MCG_CAP:
|
||||
guest_msrs[i] = 0;
|
||||
break;
|
||||
case MSR_PAT:
|
||||
guest_msrs[i] = PAT_VALUE(0, PAT_WRITE_BACK) |
|
||||
PAT_VALUE(1, PAT_WRITE_THROUGH) |
|
||||
PAT_VALUE(2, PAT_UNCACHED) |
|
||||
PAT_VALUE(3, PAT_UNCACHEABLE) |
|
||||
PAT_VALUE(4, PAT_WRITE_BACK) |
|
||||
PAT_VALUE(5, PAT_WRITE_THROUGH) |
|
||||
PAT_VALUE(6, PAT_UNCACHED) |
|
||||
PAT_VALUE(7, PAT_UNCACHEABLE);
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
misc = rdmsr(MSR_IA32_MISC_ENABLE);
|
||||
/*
|
||||
* Set mandatory bits
|
||||
* 11: branch trace disabled
|
||||
* 12: PEBS unavailable
|
||||
* Clear unsupported features
|
||||
* 16: SpeedStep enable
|
||||
* 18: enable MONITOR FSM
|
||||
*/
|
||||
misc |= (1 << 12) | (1 << 11);
|
||||
misc &= ~((1 << 18) | (1 << 16));
|
||||
guest_msrs[i] = misc;
|
||||
break;
|
||||
case MSR_IA32_PLATFORM_ID:
|
||||
guest_msrs[i] = 0;
|
||||
break;
|
||||
default:
|
||||
panic("guest_msrs_init: missing initialization for msr "
|
||||
"0x%0x", vmm_msr[i].num);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
msr_num_to_idx(u_int num)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vmm_msr_num; i++)
|
||||
if (vmm_msr[i].num == num)
|
||||
return (i);
|
||||
|
||||
return (-1);
|
||||
}
|
||||
|
||||
int
|
||||
emulate_wrmsr(struct vm *vm, int cpu, u_int num, uint64_t val, bool *retu)
|
||||
{
|
||||
int idx;
|
||||
uint64_t *guest_msrs;
|
||||
|
||||
if (lapic_msr(num))
|
||||
return (lapic_wrmsr(vm, cpu, num, val, retu));
|
||||
|
||||
idx = msr_num_to_idx(num);
|
||||
if (idx < 0 || invalid_msr(idx))
|
||||
return (EINVAL);
|
||||
|
||||
if (!readonly_msr(idx)) {
|
||||
guest_msrs = vm_guest_msrs(vm, cpu);
|
||||
|
||||
/* Stash the value */
|
||||
guest_msrs[idx] = val;
|
||||
|
||||
/* Update processor state for non-emulated MSRs */
|
||||
if (!emulated_msr(idx))
|
||||
wrmsr(vmm_msr[idx].num, val);
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
emulate_rdmsr(struct vm *vm, int cpu, u_int num, bool *retu)
|
||||
{
|
||||
int error, idx;
|
||||
uint32_t eax, edx;
|
||||
uint64_t result, *guest_msrs;
|
||||
|
||||
if (lapic_msr(num)) {
|
||||
error = lapic_rdmsr(vm, cpu, num, &result, retu);
|
||||
goto done;
|
||||
}
|
||||
|
||||
idx = msr_num_to_idx(num);
|
||||
if (idx < 0 || invalid_msr(idx)) {
|
||||
error = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
guest_msrs = vm_guest_msrs(vm, cpu);
|
||||
result = guest_msrs[idx];
|
||||
|
||||
/*
|
||||
* If this is not an emulated msr register make sure that the processor
|
||||
* state matches our cached state.
|
||||
*/
|
||||
if (!emulated_msr(idx) && (rdmsr(num) != result)) {
|
||||
panic("emulate_rdmsr: msr 0x%0x has inconsistent cached "
|
||||
"(0x%016lx) and actual (0x%016lx) values", num,
|
||||
result, rdmsr(num));
|
||||
}
|
||||
|
||||
error = 0;
|
||||
|
||||
done:
|
||||
if (error == 0) {
|
||||
eax = result;
|
||||
edx = result >> 32;
|
||||
error = vm_set_register(vm, cpu, VM_REG_GUEST_RAX, eax);
|
||||
if (error)
|
||||
panic("vm_set_register(rax) error %d", error);
|
||||
error = vm_set_register(vm, cpu, VM_REG_GUEST_RDX, edx);
|
||||
if (error)
|
||||
panic("vm_set_register(rdx) error %d", error);
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
void
|
||||
restore_guest_msrs(struct vm *vm, int cpu)
|
||||
{
|
||||
int i;
|
||||
uint64_t *guest_msrs;
|
||||
|
||||
guest_msrs = vm_guest_msrs(vm, cpu);
|
||||
|
||||
for (i = 0; i < vmm_msr_num; i++) {
|
||||
if (emulated_msr(i))
|
||||
continue;
|
||||
else
|
||||
wrmsr(vmm_msr[i].num, guest_msrs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
restore_host_msrs(struct vm *vm, int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vmm_msr_num; i++) {
|
||||
if (emulated_msr(i))
|
||||
continue;
|
||||
else
|
||||
wrmsr(vmm_msr[i].num, vmm_msr[i].hostval);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called by the CPU-specific code before any guests are
|
||||
* created
|
||||
*/
|
||||
void
|
||||
guest_msr_valid(int msr)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vmm_msr_num; i++) {
|
||||
if (vmm_msr[i].num == msr && invalid_msr(i)) {
|
||||
vmm_msr[i].flags &= ~VMM_MSR_F_INVALID;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
/*-
|
||||
* Copyright (c) 2011 NetApp, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _VMM_MSR_H_
|
||||
#define _VMM_MSR_H_
|
||||
|
||||
#define VMM_MSR_NUM 16
|
||||
struct vm;
|
||||
|
||||
void vmm_msr_init(void);
|
||||
int emulate_wrmsr(struct vm *vm, int vcpu, u_int msr, uint64_t val,
|
||||
bool *retu);
|
||||
int emulate_rdmsr(struct vm *vm, int vcpu, u_int msr, bool *retu);
|
||||
void guest_msrs_init(struct vm *vm, int cpu);
|
||||
void guest_msr_valid(int msr);
|
||||
void restore_host_msrs(struct vm *vm, int cpu);
|
||||
void restore_guest_msrs(struct vm *vm, int cpu);
|
||||
|
||||
#endif
|
@ -19,7 +19,6 @@ SRCS+= vmm.c \
|
||||
vmm_ipi.c \
|
||||
vmm_lapic.c \
|
||||
vmm_mem.c \
|
||||
vmm_msr.c \
|
||||
vmm_stat.c \
|
||||
vmm_util.c \
|
||||
x86.c \
|
||||
|
@ -437,6 +437,10 @@
|
||||
#define MSR_MC4_STATUS 0x411
|
||||
#define MSR_MC4_ADDR 0x412
|
||||
#define MSR_MC4_MISC 0x413
|
||||
#define MSR_PKG_ENERGY_STATUS 0x611
|
||||
#define MSR_DRAM_ENERGY_STATUS 0x619
|
||||
#define MSR_PP0_ENERGY_STATUS 0x639
|
||||
#define MSR_PP1_ENERGY_STATUS 0x641
|
||||
|
||||
/*
|
||||
* VMX MSRs
|
||||
|
@ -803,6 +803,12 @@ main(int argc, char *argv[])
|
||||
exit(1);
|
||||
}
|
||||
|
||||
error = init_msr();
|
||||
if (error) {
|
||||
fprintf(stderr, "init_msr error %d", error);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
init_mem();
|
||||
init_inout();
|
||||
pci_irq_init(ctx);
|
||||
|
@ -31,33 +31,84 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <machine/cpufunc.h>
|
||||
#include <machine/vmm.h>
|
||||
#include <machine/specialreg.h>
|
||||
|
||||
#include <vmmapi.h>
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "xmsr.h"
|
||||
|
||||
static int cpu_vendor_intel, cpu_vendor_amd;
|
||||
|
||||
int
|
||||
emulate_wrmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t val)
|
||||
{
|
||||
|
||||
switch (code) {
|
||||
case 0xd04: /* Sandy Bridge uncore PMC MSRs */
|
||||
case 0xc24:
|
||||
return (0);
|
||||
case 0x79:
|
||||
return (0); /* IA32_BIOS_UPDT_TRIG MSR */
|
||||
default:
|
||||
break;
|
||||
if (cpu_vendor_intel) {
|
||||
switch (code) {
|
||||
case 0xd04: /* Sandy Bridge uncore PMCs */
|
||||
case 0xc24:
|
||||
return (0);
|
||||
case MSR_BIOS_UPDT_TRIG:
|
||||
return (0);
|
||||
case MSR_BIOS_SIGN:
|
||||
return (0);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (-1);
|
||||
}
|
||||
|
||||
int
|
||||
emulate_rdmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t *val)
|
||||
emulate_rdmsr(struct vmctx *ctx, int vcpu, uint32_t num, uint64_t *val)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
return (-1);
|
||||
if (cpu_vendor_intel) {
|
||||
switch (num) {
|
||||
case MSR_BIOS_SIGN:
|
||||
case MSR_IA32_PLATFORM_ID:
|
||||
case MSR_PKG_ENERGY_STATUS:
|
||||
case MSR_PP0_ENERGY_STATUS:
|
||||
case MSR_PP1_ENERGY_STATUS:
|
||||
case MSR_DRAM_ENERGY_STATUS:
|
||||
*val = 0;
|
||||
break;
|
||||
default:
|
||||
error = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
init_msr(void)
|
||||
{
|
||||
int error;
|
||||
u_int regs[4];
|
||||
char cpu_vendor[13];
|
||||
|
||||
do_cpuid(0, regs);
|
||||
((u_int *)&cpu_vendor)[0] = regs[1];
|
||||
((u_int *)&cpu_vendor)[1] = regs[3];
|
||||
((u_int *)&cpu_vendor)[2] = regs[2];
|
||||
cpu_vendor[12] = '\0';
|
||||
|
||||
error = 0;
|
||||
if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
|
||||
cpu_vendor_amd = 1;
|
||||
} else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
|
||||
cpu_vendor_intel = 1;
|
||||
} else {
|
||||
fprintf(stderr, "Unknown cpu vendor \"%s\"\n", cpu_vendor);
|
||||
error = -1;
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
@ -29,6 +29,7 @@
|
||||
#ifndef _XMSR_H_
|
||||
#define _XMSR_H_
|
||||
|
||||
int init_msr(void);
|
||||
int emulate_wrmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t val);
|
||||
int emulate_rdmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t *val);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user