mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-01 08:27:59 +00:00
vmm: Refactor storage of CPU-dependent per-vCPU data.
Rather than storing static arrays of per-vCPU data in the CPU-specific per-VM structure, adopt a more dynamic model similar to that used to manage CPU-specific per-VM data. That is, add new vmmops methods to init and cleanup a single vCPU. The init method returns a pointer that is stored in 'struct vcpu' as a cookie pointer. This cookie pointer is now passed to other vmmops callbacks in place of the integer index. The index is now only used in KTR traces and when calling back into the CPU-independent layer. Reviewed by: corvink, markj Differential Revision: https://reviews.freebsd.org/D37151
This commit is contained in:
parent
73abae4493
commit
1aa5150479
@ -167,27 +167,29 @@ typedef int (*vmm_init_func_t)(int ipinum);
|
||||
typedef int (*vmm_cleanup_func_t)(void);
|
||||
typedef void (*vmm_resume_func_t)(void);
|
||||
typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
|
||||
typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
|
||||
typedef int (*vmi_run_func_t)(void *vmi, void *vcpui, register_t rip,
|
||||
struct pmap *pmap, struct vm_eventinfo *info);
|
||||
typedef void (*vmi_cleanup_func_t)(void *vmi);
|
||||
typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
|
||||
typedef void * (*vmi_vcpu_init_func_t)(void *vmi, int vcpu_id);
|
||||
typedef void (*vmi_vcpu_cleanup_func_t)(void *vmi, void *vcpui);
|
||||
typedef int (*vmi_get_register_t)(void *vmi, void *vcpui, int num,
|
||||
uint64_t *retval);
|
||||
typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
|
||||
typedef int (*vmi_set_register_t)(void *vmi, void *vcpui, int num,
|
||||
uint64_t val);
|
||||
typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
|
||||
typedef int (*vmi_get_desc_t)(void *vmi, void *vcpui, int num,
|
||||
struct seg_desc *desc);
|
||||
typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
|
||||
typedef int (*vmi_set_desc_t)(void *vmi, void *vcpui, int num,
|
||||
struct seg_desc *desc);
|
||||
typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
|
||||
typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
|
||||
typedef int (*vmi_get_cap_t)(void *vmi, void *vcpui, int num, int *retval);
|
||||
typedef int (*vmi_set_cap_t)(void *vmi, void *vcpui, int num, int val);
|
||||
typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
|
||||
typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
|
||||
typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu);
|
||||
typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, void *vcpui);
|
||||
typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
|
||||
typedef int (*vmi_snapshot_t)(void *vmi, struct vm_snapshot_meta *meta);
|
||||
typedef int (*vmi_snapshot_vcpu_t)(void *vmi, struct vm_snapshot_meta *meta,
|
||||
int vcpu);
|
||||
typedef int (*vmi_restore_tsc_t)(void *vmi, int vcpuid, uint64_t now);
|
||||
void *vcpui);
|
||||
typedef int (*vmi_restore_tsc_t)(void *vmi, void *vcpui, uint64_t now);
|
||||
|
||||
struct vmm_ops {
|
||||
vmm_init_func_t modinit; /* module wide initialization */
|
||||
@ -197,6 +199,8 @@ struct vmm_ops {
|
||||
vmi_init_func_t init; /* vm-specific initialization */
|
||||
vmi_run_func_t run;
|
||||
vmi_cleanup_func_t cleanup;
|
||||
vmi_vcpu_init_func_t vcpu_init;
|
||||
vmi_vcpu_cleanup_func_t vcpu_cleanup;
|
||||
vmi_get_register_t getreg;
|
||||
vmi_set_register_t setreg;
|
||||
vmi_get_desc_t getdesc;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -33,6 +33,7 @@
|
||||
|
||||
struct pcpu;
|
||||
struct svm_softc;
|
||||
struct svm_vcpu;
|
||||
|
||||
/*
|
||||
* Guest register state that is saved outside the VMCB.
|
||||
@ -68,7 +69,8 @@ struct svm_regctx {
|
||||
|
||||
void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct pcpu *pcpu);
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
int svm_set_tsc_offset(struct svm_softc *sc, int vcpu, uint64_t offset);
|
||||
int svm_set_tsc_offset(struct svm_softc *sc, struct svm_vcpu *vcpu,
|
||||
uint64_t offset);
|
||||
#endif
|
||||
|
||||
#endif /* _SVM_H_ */
|
||||
|
@ -72,7 +72,7 @@ svm_msr_init(void)
|
||||
}
|
||||
|
||||
void
|
||||
svm_msr_guest_init(struct svm_softc *sc, int vcpu)
|
||||
svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* All the MSRs accessible to the guest are either saved/restored by
|
||||
@ -86,7 +86,7 @@ svm_msr_guest_init(struct svm_softc *sc, int vcpu)
|
||||
}
|
||||
|
||||
void
|
||||
svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
|
||||
svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Save host MSRs (if any) and restore guest MSRs (if any).
|
||||
@ -94,7 +94,7 @@ svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
|
||||
}
|
||||
|
||||
void
|
||||
svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
|
||||
svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Save guest MSRs (if any) and restore host MSRs.
|
||||
@ -108,8 +108,8 @@ svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
|
||||
}
|
||||
|
||||
int
|
||||
svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
|
||||
bool *retu)
|
||||
svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
|
||||
uint64_t *result, bool *retu)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
@ -124,8 +124,8 @@ svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
|
||||
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
|
||||
case MSR_MTRR64kBase:
|
||||
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
|
||||
if (vm_rdmtrr(&sc->vcpu[vcpu].mtrr, num, result) != 0) {
|
||||
vm_inject_gp(sc->vm, vcpu);
|
||||
if (vm_rdmtrr(&vcpu->mtrr, num, result) != 0) {
|
||||
vm_inject_gp(sc->vm, vcpu->vcpuid);
|
||||
}
|
||||
break;
|
||||
case MSR_SYSCFG:
|
||||
@ -142,7 +142,8 @@ svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
|
||||
}
|
||||
|
||||
int
|
||||
svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
|
||||
svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, uint64_t val,
|
||||
bool *retu)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
@ -156,8 +157,8 @@ svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
|
||||
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
|
||||
case MSR_MTRR64kBase:
|
||||
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
|
||||
if (vm_wrmtrr(&sc->vcpu[vcpu].mtrr, num, val) != 0) {
|
||||
vm_inject_gp(sc->vm, vcpu);
|
||||
if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
|
||||
vm_inject_gp(sc->vm, vcpu->vcpuid);
|
||||
}
|
||||
break;
|
||||
case MSR_SYSCFG:
|
||||
|
@ -32,15 +32,16 @@
|
||||
#define _SVM_MSR_H_
|
||||
|
||||
struct svm_softc;
|
||||
struct svm_vcpu;
|
||||
|
||||
void svm_msr_init(void);
|
||||
void svm_msr_guest_init(struct svm_softc *sc, int vcpu);
|
||||
void svm_msr_guest_enter(struct svm_softc *sc, int vcpu);
|
||||
void svm_msr_guest_exit(struct svm_softc *sc, int vcpu);
|
||||
void svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu);
|
||||
void svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu);
|
||||
void svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu);
|
||||
|
||||
int svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
|
||||
bool *retu);
|
||||
int svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
|
||||
bool *retu);
|
||||
int svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
|
||||
uint64_t val, bool *retu);
|
||||
int svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
|
||||
uint64_t *result, bool *retu);
|
||||
|
||||
#endif /* _SVM_MSR_H_ */
|
||||
|
@ -51,62 +51,52 @@ struct svm_vcpu {
|
||||
long eptgen; /* pmap->pm_eptgen when the vcpu last ran */
|
||||
struct asid asid;
|
||||
struct vm_mtrr mtrr;
|
||||
int vcpuid;
|
||||
};
|
||||
|
||||
/*
|
||||
* SVM softc, one per virtual machine.
|
||||
*/
|
||||
struct svm_softc {
|
||||
struct svm_vcpu vcpu[VM_MAXCPU];
|
||||
vm_offset_t nptp; /* nested page table */
|
||||
vm_paddr_t nptp; /* nested page table */
|
||||
uint8_t *iopm_bitmap; /* shared by all vcpus */
|
||||
uint8_t *msr_bitmap; /* shared by all vcpus */
|
||||
struct vm *vm;
|
||||
};
|
||||
|
||||
static __inline struct svm_vcpu *
|
||||
svm_get_vcpu(struct svm_softc *sc, int vcpu)
|
||||
{
|
||||
|
||||
return (&(sc->vcpu[vcpu]));
|
||||
}
|
||||
|
||||
static __inline struct vmcb *
|
||||
svm_get_vmcb(struct svm_softc *sc, int vcpu)
|
||||
svm_get_vmcb(struct svm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
return ((sc->vcpu[vcpu].vmcb));
|
||||
return (vcpu->vmcb);
|
||||
}
|
||||
|
||||
static __inline struct vmcb_state *
|
||||
svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
|
||||
svm_get_vmcb_state(struct svm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
return (&(sc->vcpu[vcpu].vmcb->state));
|
||||
return (&vcpu->vmcb->state);
|
||||
}
|
||||
|
||||
static __inline struct vmcb_ctrl *
|
||||
svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
|
||||
svm_get_vmcb_ctrl(struct svm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
return (&(sc->vcpu[vcpu].vmcb->ctrl));
|
||||
return (&vcpu->vmcb->ctrl);
|
||||
}
|
||||
|
||||
static __inline struct svm_regctx *
|
||||
svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
|
||||
svm_get_guest_regctx(struct svm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
return (&(sc->vcpu[vcpu].swctx));
|
||||
return (&vcpu->swctx);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
|
||||
svm_set_dirty(struct svm_vcpu *vcpu, uint32_t dirtybits)
|
||||
{
|
||||
struct svm_vcpu *vcpustate;
|
||||
|
||||
vcpustate = svm_get_vcpu(sc, vcpu);
|
||||
|
||||
vcpustate->dirty |= dirtybits;
|
||||
vcpu->dirty |= dirtybits;
|
||||
}
|
||||
|
||||
#endif /* _SVM_SOFTC_H_ */
|
||||
|
@ -116,14 +116,14 @@ vmcb_segptr(struct vmcb *vmcb, int type)
|
||||
}
|
||||
|
||||
static int
|
||||
vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
|
||||
uint64_t *val)
|
||||
vmcb_access(struct svm_softc *softc, struct svm_vcpu *vcpu, int write,
|
||||
int ident, uint64_t *val)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
int off, bytes;
|
||||
char *ptr;
|
||||
|
||||
vmcb = svm_get_vmcb(softc, vcpu);
|
||||
vmcb = svm_get_vmcb(vcpu);
|
||||
off = VMCB_ACCESS_OFFSET(ident);
|
||||
bytes = VMCB_ACCESS_BYTES(ident);
|
||||
|
||||
@ -146,14 +146,14 @@ vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
|
||||
memcpy(val, ptr + off, bytes);
|
||||
break;
|
||||
default:
|
||||
VCPU_CTR1(softc->vm, vcpu,
|
||||
VCPU_CTR1(softc->vm, vcpu->vcpuid,
|
||||
"Invalid size %d for VMCB access: %d", bytes);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
/* Invalidate all VMCB state cached by h/w. */
|
||||
if (write)
|
||||
svm_set_dirty(softc, vcpu, 0xffffffff);
|
||||
svm_set_dirty(vcpu, 0xffffffff);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -162,14 +162,15 @@ vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
|
||||
* Read from segment selector, control and general purpose register of VMCB.
|
||||
*/
|
||||
int
|
||||
vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
|
||||
vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t *retval)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
struct vmcb_state *state;
|
||||
struct vmcb_segment *seg;
|
||||
int err;
|
||||
|
||||
vmcb = svm_get_vmcb(sc, vcpu);
|
||||
vmcb = svm_get_vmcb(vcpu);
|
||||
state = &vmcb->state;
|
||||
err = 0;
|
||||
|
||||
@ -252,14 +253,14 @@ vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
|
||||
* Write to segment selector, control and general purpose register of VMCB.
|
||||
*/
|
||||
int
|
||||
vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
|
||||
vmcb_write(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident, uint64_t val)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
struct vmcb_state *state;
|
||||
struct vmcb_segment *seg;
|
||||
int err, dirtyseg;
|
||||
|
||||
vmcb = svm_get_vmcb(sc, vcpu);
|
||||
vmcb = svm_get_vmcb(vcpu);
|
||||
state = &vmcb->state;
|
||||
dirtyseg = 0;
|
||||
err = 0;
|
||||
@ -270,38 +271,38 @@ vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
|
||||
switch (ident) {
|
||||
case VM_REG_GUEST_CR0:
|
||||
state->cr0 = val;
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_CR);
|
||||
break;
|
||||
|
||||
case VM_REG_GUEST_CR2:
|
||||
state->cr2 = val;
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_CR2);
|
||||
break;
|
||||
|
||||
case VM_REG_GUEST_CR3:
|
||||
state->cr3 = val;
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_CR);
|
||||
break;
|
||||
|
||||
case VM_REG_GUEST_CR4:
|
||||
state->cr4 = val;
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_CR);
|
||||
break;
|
||||
|
||||
case VM_REG_GUEST_DR6:
|
||||
state->dr6 = val;
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_DR);
|
||||
break;
|
||||
|
||||
case VM_REG_GUEST_DR7:
|
||||
state->dr7 = val;
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_DR);
|
||||
break;
|
||||
|
||||
case VM_REG_GUEST_EFER:
|
||||
/* EFER_SVM must always be set when the guest is executing */
|
||||
state->efer = val | EFER_SVM;
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_CR);
|
||||
break;
|
||||
|
||||
case VM_REG_GUEST_RAX:
|
||||
@ -334,7 +335,7 @@ vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
|
||||
__func__, ident));
|
||||
seg->selector = val;
|
||||
if (dirtyseg)
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_SEG);
|
||||
break;
|
||||
|
||||
case VM_REG_GUEST_GDTR:
|
||||
@ -365,15 +366,14 @@ vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
|
||||
vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
struct seg_desc *desc)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
struct svm_softc *sc;
|
||||
struct vmcb_segment *seg;
|
||||
uint16_t attrib;
|
||||
|
||||
sc = arg;
|
||||
vmcb = svm_get_vmcb(sc, vcpu);
|
||||
vmcb = svm_get_vmcb(vcpu);
|
||||
|
||||
seg = vmcb_segptr(vmcb, reg);
|
||||
KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
|
||||
@ -395,7 +395,7 @@ vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
|
||||
seg->attrib = attrib;
|
||||
}
|
||||
|
||||
VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
|
||||
VCPU_CTR4(sc->vm, vcpu->vcpuid, "Setting desc %d: base (%#lx), limit (%#x), "
|
||||
"attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
|
||||
|
||||
switch (reg) {
|
||||
@ -403,11 +403,11 @@ vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
|
||||
case VM_REG_GUEST_DS:
|
||||
case VM_REG_GUEST_ES:
|
||||
case VM_REG_GUEST_SS:
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_SEG);
|
||||
break;
|
||||
case VM_REG_GUEST_GDTR:
|
||||
case VM_REG_GUEST_IDTR:
|
||||
svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_DT);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -417,14 +417,13 @@ vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
|
||||
vmcb_getdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
struct seg_desc *desc)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
struct svm_softc *sc;
|
||||
struct vmcb_segment *seg;
|
||||
|
||||
sc = arg;
|
||||
vmcb = svm_get_vmcb(sc, vcpu);
|
||||
vmcb = svm_get_vmcb(vcpu);
|
||||
seg = vmcb_segptr(vmcb, reg);
|
||||
KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
|
||||
__func__, reg));
|
||||
@ -459,15 +458,11 @@ vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
|
||||
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
int
|
||||
vmcb_getany(struct svm_softc *sc, int vcpu, int ident, uint64_t *val)
|
||||
vmcb_getany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t *val)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (vcpu < 0 || vcpu >= vm_get_maxcpus(sc->vm)) {
|
||||
error = EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (ident >= VM_REG_LAST) {
|
||||
error = EINVAL;
|
||||
goto err;
|
||||
@ -480,15 +475,11 @@ vmcb_getany(struct svm_softc *sc, int vcpu, int ident, uint64_t *val)
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_setany(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
|
||||
vmcb_setany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t val)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (vcpu < 0 || vcpu >= vm_get_maxcpus(sc->vm)) {
|
||||
error = EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (ident >= VM_REG_LAST) {
|
||||
error = EINVAL;
|
||||
goto err;
|
||||
@ -501,13 +492,14 @@ vmcb_setany(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_snapshot_desc(void *arg, int vcpu, int reg, struct vm_snapshot_meta *meta)
|
||||
vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
struct vm_snapshot_meta *meta)
|
||||
{
|
||||
int ret;
|
||||
struct seg_desc desc;
|
||||
|
||||
if (meta->op == VM_SNAPSHOT_SAVE) {
|
||||
ret = vmcb_getdesc(arg, vcpu, reg, &desc);
|
||||
ret = vmcb_getdesc(sc, vcpu, reg, &desc);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
|
||||
@ -519,7 +511,7 @@ vmcb_snapshot_desc(void *arg, int vcpu, int reg, struct vm_snapshot_meta *meta)
|
||||
SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
|
||||
SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
|
||||
|
||||
ret = vmcb_setdesc(arg, vcpu, reg, &desc);
|
||||
ret = vmcb_setdesc(sc, vcpu, reg, &desc);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
} else {
|
||||
@ -532,7 +524,7 @@ vmcb_snapshot_desc(void *arg, int vcpu, int reg, struct vm_snapshot_meta *meta)
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_snapshot_any(struct svm_softc *sc, int vcpu, int ident,
|
||||
vmcb_snapshot_any(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
struct vm_snapshot_meta *meta)
|
||||
{
|
||||
int ret;
|
||||
|
@ -234,6 +234,7 @@
|
||||
#ifdef _KERNEL
|
||||
|
||||
struct svm_softc;
|
||||
struct svm_vcpu;
|
||||
struct vm_snapshot_meta;
|
||||
|
||||
/* VMCB save state area segment format */
|
||||
@ -353,17 +354,23 @@ struct vmcb {
|
||||
CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
|
||||
CTASSERT(offsetof(struct vmcb, state) == 0x400);
|
||||
|
||||
int vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval);
|
||||
int vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val);
|
||||
int vmcb_setdesc(void *arg, int vcpu, int ident, struct seg_desc *desc);
|
||||
int vmcb_getdesc(void *arg, int vcpu, int ident, struct seg_desc *desc);
|
||||
int vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t *retval);
|
||||
int vmcb_write(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t val);
|
||||
int vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
struct seg_desc *desc);
|
||||
int vmcb_getdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
struct seg_desc *desc);
|
||||
int vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg);
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
int vmcb_getany(struct svm_softc *sc, int vcpu, int ident, uint64_t *val);
|
||||
int vmcb_setany(struct svm_softc *sc, int vcpu, int ident, uint64_t val);
|
||||
int vmcb_snapshot_desc(void *arg, int vcpu, int reg,
|
||||
int vmcb_getany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t *val);
|
||||
int vmcb_setany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t val);
|
||||
int vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
struct vm_snapshot_meta *meta);
|
||||
int vmcb_snapshot_any(struct svm_softc *sc, int vcpu, int ident,
|
||||
int vmcb_snapshot_any(struct svm_softc *sc, struct svm_vcpu*vcpu, int ident,
|
||||
struct vm_snapshot_meta *meta);
|
||||
#endif
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -31,6 +31,9 @@
|
||||
#ifndef _VMX_H_
|
||||
#define _VMX_H_
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/pmap.h>
|
||||
|
||||
#include "vmcs.h"
|
||||
#include "x86.h"
|
||||
|
||||
@ -131,15 +134,17 @@ struct vmx_vcpu {
|
||||
struct vmxcap cap;
|
||||
struct vmxstate state;
|
||||
struct vm_mtrr mtrr;
|
||||
int vcpuid;
|
||||
};
|
||||
|
||||
/* virtual machine softc */
|
||||
struct vmx {
|
||||
struct vmx_vcpu vcpus[VM_MAXCPU];
|
||||
struct vm *vm;
|
||||
char *msr_bitmap;
|
||||
uint64_t eptp;
|
||||
struct vm *vm;
|
||||
long eptgen[MAXCPU]; /* cached pmap->pm_eptgen */
|
||||
pmap_t pmap;
|
||||
uint16_t vpids[VM_MAXCPU];
|
||||
};
|
||||
|
||||
extern bool vmx_have_msr_tsc_aux;
|
||||
@ -153,7 +158,8 @@ void vmx_call_isr(uintptr_t entry);
|
||||
u_long vmx_fix_cr0(u_long cr0);
|
||||
u_long vmx_fix_cr4(u_long cr4);
|
||||
|
||||
int vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset);
|
||||
int vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu,
|
||||
uint64_t offset);
|
||||
|
||||
extern char vmx_exit_guest[];
|
||||
extern char vmx_exit_guest_flush_rsb[];
|
||||
|
@ -314,15 +314,13 @@ vmx_msr_init(void)
|
||||
}
|
||||
|
||||
void
|
||||
vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
|
||||
vmx_msr_guest_init(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
{
|
||||
struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
|
||||
|
||||
/*
|
||||
* The permissions bitmap is shared between all vcpus so initialize it
|
||||
* once when initializing the vBSP.
|
||||
*/
|
||||
if (vcpuid == 0) {
|
||||
if (vcpu->vcpuid == 0) {
|
||||
guest_msr_rw(vmx, MSR_LSTAR);
|
||||
guest_msr_rw(vmx, MSR_CSTAR);
|
||||
guest_msr_rw(vmx, MSR_STAR);
|
||||
@ -333,7 +331,7 @@ vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
|
||||
/*
|
||||
* Initialize guest IA32_PAT MSR with default value after reset.
|
||||
*/
|
||||
vmx_vcpu->guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) |
|
||||
vcpu->guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) |
|
||||
PAT_VALUE(1, PAT_WRITE_THROUGH) |
|
||||
PAT_VALUE(2, PAT_UNCACHED) |
|
||||
PAT_VALUE(3, PAT_UNCACHEABLE) |
|
||||
@ -346,24 +344,22 @@ vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
|
||||
}
|
||||
|
||||
void
|
||||
vmx_msr_guest_enter(struct vmx *vmx, int vcpuid)
|
||||
vmx_msr_guest_enter(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
{
|
||||
struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
|
||||
|
||||
/* Save host MSRs (in particular, KGSBASE) and restore guest MSRs */
|
||||
update_pcb_bases(curpcb);
|
||||
wrmsr(MSR_LSTAR, vmx_vcpu->guest_msrs[IDX_MSR_LSTAR]);
|
||||
wrmsr(MSR_CSTAR, vmx_vcpu->guest_msrs[IDX_MSR_CSTAR]);
|
||||
wrmsr(MSR_STAR, vmx_vcpu->guest_msrs[IDX_MSR_STAR]);
|
||||
wrmsr(MSR_SF_MASK, vmx_vcpu->guest_msrs[IDX_MSR_SF_MASK]);
|
||||
wrmsr(MSR_KGSBASE, vmx_vcpu->guest_msrs[IDX_MSR_KGSBASE]);
|
||||
wrmsr(MSR_LSTAR, vcpu->guest_msrs[IDX_MSR_LSTAR]);
|
||||
wrmsr(MSR_CSTAR, vcpu->guest_msrs[IDX_MSR_CSTAR]);
|
||||
wrmsr(MSR_STAR, vcpu->guest_msrs[IDX_MSR_STAR]);
|
||||
wrmsr(MSR_SF_MASK, vcpu->guest_msrs[IDX_MSR_SF_MASK]);
|
||||
wrmsr(MSR_KGSBASE, vcpu->guest_msrs[IDX_MSR_KGSBASE]);
|
||||
}
|
||||
|
||||
void
|
||||
vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, int vcpuid)
|
||||
vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
{
|
||||
struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
|
||||
uint64_t guest_tsc_aux = vmx_vcpu->guest_msrs[IDX_MSR_TSC_AUX];
|
||||
uint64_t guest_tsc_aux = vcpu->guest_msrs[IDX_MSR_TSC_AUX];
|
||||
uint32_t host_aux = cpu_auxmsr();
|
||||
|
||||
if (vmx_have_msr_tsc_aux && guest_tsc_aux != host_aux)
|
||||
@ -371,16 +367,15 @@ vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, int vcpuid)
|
||||
}
|
||||
|
||||
void
|
||||
vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
|
||||
vmx_msr_guest_exit(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
{
|
||||
struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
|
||||
|
||||
/* Save guest MSRs */
|
||||
vmx_vcpu->guest_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
|
||||
vmx_vcpu->guest_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
|
||||
vmx_vcpu->guest_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
|
||||
vmx_vcpu->guest_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
|
||||
vmx_vcpu->guest_msrs[IDX_MSR_KGSBASE] = rdmsr(MSR_KGSBASE);
|
||||
vcpu->guest_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
|
||||
vcpu->guest_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
|
||||
vcpu->guest_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
|
||||
vcpu->guest_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
|
||||
vcpu->guest_msrs[IDX_MSR_KGSBASE] = rdmsr(MSR_KGSBASE);
|
||||
|
||||
/* Restore host MSRs */
|
||||
wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
|
||||
@ -392,16 +387,15 @@ vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
|
||||
}
|
||||
|
||||
void
|
||||
vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, int vcpuid)
|
||||
vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
{
|
||||
struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
|
||||
uint64_t guest_tsc_aux = vmx_vcpu->guest_msrs[IDX_MSR_TSC_AUX];
|
||||
uint64_t guest_tsc_aux = vcpu->guest_msrs[IDX_MSR_TSC_AUX];
|
||||
uint32_t host_aux = cpu_auxmsr();
|
||||
|
||||
if (vmx_have_msr_tsc_aux && guest_tsc_aux != host_aux)
|
||||
/*
|
||||
* Note that it is not necessary to save the guest value
|
||||
* here; vmx->guest_msrs[vcpuid][IDX_MSR_TSC_AUX] always
|
||||
* here; vcpu->guest_msrs[IDX_MSR_TSC_AUX] always
|
||||
* contains the current value since it is updated whenever
|
||||
* the guest writes to it (which is expected to be very
|
||||
* rare).
|
||||
@ -410,9 +404,9 @@ vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, int vcpuid)
|
||||
}
|
||||
|
||||
int
|
||||
vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
|
||||
vmx_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t *val,
|
||||
bool *retu)
|
||||
{
|
||||
struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
|
||||
int error;
|
||||
|
||||
error = 0;
|
||||
@ -428,8 +422,8 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
|
||||
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
|
||||
case MSR_MTRR64kBase:
|
||||
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
|
||||
if (vm_rdmtrr(&vmx_vcpu->mtrr, num, val) != 0) {
|
||||
vm_inject_gp(vmx->vm, vcpuid);
|
||||
if (vm_rdmtrr(&vcpu->mtrr, num, val) != 0) {
|
||||
vm_inject_gp(vmx->vm, vcpu->vcpuid);
|
||||
}
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
@ -443,7 +437,7 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
|
||||
*val = turbo_ratio_limit;
|
||||
break;
|
||||
case MSR_PAT:
|
||||
*val = vmx_vcpu->guest_msrs[IDX_MSR_PAT];
|
||||
*val = vcpu->guest_msrs[IDX_MSR_PAT];
|
||||
break;
|
||||
default:
|
||||
error = EINVAL;
|
||||
@ -453,9 +447,9 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
|
||||
}
|
||||
|
||||
int
|
||||
vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
|
||||
vmx_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
|
||||
bool *retu)
|
||||
{
|
||||
struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
|
||||
uint64_t changed;
|
||||
int error;
|
||||
|
||||
@ -471,8 +465,8 @@ vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
|
||||
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
|
||||
case MSR_MTRR64kBase:
|
||||
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
|
||||
if (vm_wrmtrr(&vmx_vcpu->mtrr, num, val) != 0) {
|
||||
vm_inject_gp(vmx->vm, vcpuid);
|
||||
if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
|
||||
vm_inject_gp(vmx->vm, vcpu->vcpuid);
|
||||
}
|
||||
break;
|
||||
case MSR_IA32_MISC_ENABLE:
|
||||
@ -497,12 +491,12 @@ vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
|
||||
break;
|
||||
case MSR_PAT:
|
||||
if (pat_valid(val))
|
||||
vmx_vcpu->guest_msrs[IDX_MSR_PAT] = val;
|
||||
vcpu->guest_msrs[IDX_MSR_PAT] = val;
|
||||
else
|
||||
vm_inject_gp(vmx->vm, vcpuid);
|
||||
vm_inject_gp(vmx->vm, vcpu->vcpuid);
|
||||
break;
|
||||
case MSR_TSC:
|
||||
error = vmx_set_tsc_offset(vmx, vcpuid, val - rdtsc());
|
||||
error = vmx_set_tsc_offset(vmx, vcpu, val - rdtsc());
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
if (vmx_have_msr_tsc_aux)
|
||||
@ -511,9 +505,9 @@ vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
|
||||
* value when it is called immediately before guest
|
||||
* entry.
|
||||
*/
|
||||
vmx_vcpu->guest_msrs[IDX_MSR_TSC_AUX] = val;
|
||||
vcpu->guest_msrs[IDX_MSR_TSC_AUX] = val;
|
||||
else
|
||||
vm_inject_gp(vmx->vm, vcpuid);
|
||||
vm_inject_gp(vmx->vm, vcpu->vcpuid);
|
||||
break;
|
||||
default:
|
||||
error = EINVAL;
|
||||
|
@ -34,13 +34,15 @@
|
||||
struct vmx;
|
||||
|
||||
void vmx_msr_init(void);
|
||||
void vmx_msr_guest_init(struct vmx *vmx, int vcpuid);
|
||||
void vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, int vcpuid);
|
||||
void vmx_msr_guest_enter(struct vmx *vmx, int vcpuid);
|
||||
void vmx_msr_guest_exit(struct vmx *vmx, int vcpuid);
|
||||
void vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, int vcpuid);
|
||||
int vmx_rdmsr(struct vmx *, int vcpuid, u_int num, uint64_t *val, bool *retu);
|
||||
int vmx_wrmsr(struct vmx *, int vcpuid, u_int num, uint64_t val, bool *retu);
|
||||
void vmx_msr_guest_init(struct vmx *vmx, struct vmx_vcpu *vcpu);
|
||||
void vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu);
|
||||
void vmx_msr_guest_enter(struct vmx *vmx, struct vmx_vcpu *vcpu);
|
||||
void vmx_msr_guest_exit(struct vmx *vmx, struct vmx_vcpu *vcpu);
|
||||
void vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu);
|
||||
int vmx_rdmsr(struct vmx *, struct vmx_vcpu *vcpu, u_int num, uint64_t *val,
|
||||
bool *retu);
|
||||
int vmx_wrmsr(struct vmx *, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
|
||||
bool *retu);
|
||||
|
||||
uint32_t vmx_revision(void);
|
||||
|
||||
|
@ -106,6 +106,7 @@ struct vcpu {
|
||||
enum vcpu_state state; /* (o) vcpu state */
|
||||
int hostcpu; /* (o) vcpu's host cpu */
|
||||
int reqidle; /* (i) request vcpu to idle */
|
||||
void *cookie; /* (i) cpu-specific data */
|
||||
struct vlapic *vlapic; /* (i) APIC device model */
|
||||
enum x2apic_state x2apic_state; /* (i) APIC mode */
|
||||
uint64_t exitintinfo; /* (i) events pending at VM exit */
|
||||
@ -208,30 +209,32 @@ DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
|
||||
DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
|
||||
DEFINE_VMMOPS_IFUNC(void, modresume, (void))
|
||||
DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
|
||||
DEFINE_VMMOPS_IFUNC(int, run, (void *vmi, int vcpu, register_t rip,
|
||||
DEFINE_VMMOPS_IFUNC(int, run, (void *vmi, void *vcpui, register_t rip,
|
||||
struct pmap *pmap, struct vm_eventinfo *info))
|
||||
DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
|
||||
DEFINE_VMMOPS_IFUNC(int, getreg, (void *vmi, int vcpu, int num,
|
||||
DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, int vcpu_id))
|
||||
DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vmi, void *vcpui))
|
||||
DEFINE_VMMOPS_IFUNC(int, getreg, (void *vmi, void *vcpui, int num,
|
||||
uint64_t *retval))
|
||||
DEFINE_VMMOPS_IFUNC(int, setreg, (void *vmi, int vcpu, int num,
|
||||
DEFINE_VMMOPS_IFUNC(int, setreg, (void *vmi, void *vcpui, int num,
|
||||
uint64_t val))
|
||||
DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vmi, int vcpu, int num,
|
||||
DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vmi, void *vcpui, int num,
|
||||
struct seg_desc *desc))
|
||||
DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vmi, int vcpu, int num,
|
||||
DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vmi, void *vcpui, int num,
|
||||
struct seg_desc *desc))
|
||||
DEFINE_VMMOPS_IFUNC(int, getcap, (void *vmi, int vcpu, int num, int *retval))
|
||||
DEFINE_VMMOPS_IFUNC(int, setcap, (void *vmi, int vcpu, int num, int val))
|
||||
DEFINE_VMMOPS_IFUNC(int, getcap, (void *vmi, void *vcpui, int num, int *retval))
|
||||
DEFINE_VMMOPS_IFUNC(int, setcap, (void *vmi, void *vcpui, int num, int val))
|
||||
DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
|
||||
vm_offset_t max))
|
||||
DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
|
||||
DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vmi, int vcpu))
|
||||
DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vmi, void *vcpui))
|
||||
DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (void *vmi, struct vlapic *vlapic))
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta
|
||||
*meta))
|
||||
DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vmi, struct vm_snapshot_meta
|
||||
*meta, int vcpu))
|
||||
DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vmi, int vcpuid, uint64_t now))
|
||||
*meta, void *vcpui))
|
||||
DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vmi, void *vcpui, uint64_t now))
|
||||
#endif
|
||||
|
||||
#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
|
||||
@ -293,12 +296,20 @@ vcpu_state2str(enum vcpu_state state)
|
||||
}
|
||||
#endif
|
||||
|
||||
static __inline void *
|
||||
vcpu_cookie(struct vm *vm, int i)
|
||||
{
|
||||
return (vm->vcpu[i].cookie);
|
||||
}
|
||||
|
||||
static void
|
||||
vcpu_cleanup(struct vm *vm, int i, bool destroy)
|
||||
{
|
||||
struct vcpu *vcpu = &vm->vcpu[i];
|
||||
|
||||
vmmops_vlapic_cleanup(vm->cookie, vcpu->vlapic);
|
||||
vmmops_vcpu_cleanup(vm->cookie, vcpu->cookie);
|
||||
vcpu->cookie = NULL;
|
||||
if (destroy) {
|
||||
vmm_stat_free(vcpu->stats);
|
||||
fpu_save_area_free(vcpu->guestfpu);
|
||||
@ -326,7 +337,8 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
|
||||
vcpu->tsc_offset = 0;
|
||||
}
|
||||
|
||||
vcpu->vlapic = vmmops_vlapic_init(vm->cookie, vcpu_id);
|
||||
vcpu->cookie = vmmops_vcpu_init(vm->cookie, vcpu_id);
|
||||
vcpu->vlapic = vmmops_vlapic_init(vm->cookie, vcpu->cookie);
|
||||
vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
|
||||
vcpu->reqidle = 0;
|
||||
vcpu->exitintinfo = 0;
|
||||
@ -1070,7 +1082,8 @@ vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
|
||||
if (reg >= VM_REG_LAST)
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_getreg(vm->cookie, vcpu, reg, retval));
|
||||
return (vmmops_getreg(vm->cookie, vcpu_cookie(vm, vcpu), reg,
|
||||
retval));
|
||||
}
|
||||
|
||||
int
|
||||
@ -1085,13 +1098,13 @@ vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
|
||||
if (reg >= VM_REG_LAST)
|
||||
return (EINVAL);
|
||||
|
||||
error = vmmops_setreg(vm->cookie, vcpuid, reg, val);
|
||||
vcpu = &vm->vcpu[vcpuid];
|
||||
error = vmmops_setreg(vm->cookie, vcpu->cookie, reg, val);
|
||||
if (error || reg != VM_REG_GUEST_RIP)
|
||||
return (error);
|
||||
|
||||
/* Set 'nextrip' to match the value of %rip */
|
||||
VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
|
||||
vcpu = &vm->vcpu[vcpuid];
|
||||
vcpu->nextrip = val;
|
||||
return (0);
|
||||
}
|
||||
@ -1139,7 +1152,7 @@ vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
|
||||
if (!is_segment_register(reg) && !is_descriptor_table(reg))
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_getdesc(vm->cookie, vcpu, reg, desc));
|
||||
return (vmmops_getdesc(vm->cookie, vcpu_cookie(vm, vcpu), reg, desc));
|
||||
}
|
||||
|
||||
int
|
||||
@ -1152,7 +1165,7 @@ vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
|
||||
if (!is_segment_register(reg) && !is_descriptor_table(reg))
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_setdesc(vm->cookie, vcpu, reg, desc));
|
||||
return (vmmops_setdesc(vm->cookie, vcpu_cookie(vm, vcpu), reg, desc));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1772,7 +1785,8 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
|
||||
restore_guest_fpustate(vcpu);
|
||||
|
||||
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
|
||||
error = vmmops_run(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
|
||||
error = vmmops_run(vm->cookie, vcpu->cookie, vcpu->nextrip, pmap,
|
||||
&evinfo);
|
||||
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
|
||||
|
||||
save_guest_fpustate(vcpu);
|
||||
@ -2278,7 +2292,7 @@ vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
|
||||
if (type < 0 || type >= VM_CAP_MAX)
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_getcap(vm->cookie, vcpu, type, retval));
|
||||
return (vmmops_getcap(vm->cookie, vcpu_cookie(vm, vcpu), type, retval));
|
||||
}
|
||||
|
||||
int
|
||||
@ -2290,7 +2304,7 @@ vm_set_capability(struct vm *vm, int vcpu, int type, int val)
|
||||
if (type < 0 || type >= VM_CAP_MAX)
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_setcap(vm->cookie, vcpu, type, val));
|
||||
return (vmmops_setcap(vm->cookie, vcpu_cookie(vm, vcpu), type, val));
|
||||
}
|
||||
|
||||
struct vlapic *
|
||||
@ -2851,16 +2865,19 @@ vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta)
|
||||
}
|
||||
|
||||
static int
|
||||
vm_snapshot_vmcx(struct vm *vm, struct vm_snapshot_meta *meta)
|
||||
vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta)
|
||||
{
|
||||
int error;
|
||||
struct vcpu *vcpu;
|
||||
uint16_t i, maxcpus;
|
||||
|
||||
error = 0;
|
||||
|
||||
maxcpus = vm_get_maxcpus(vm);
|
||||
for (i = 0; i < maxcpus; i++) {
|
||||
error = vmmops_vcpu_snapshot(vm->cookie, meta, i);
|
||||
vcpu = &vm->vcpu[i];
|
||||
|
||||
error = vmmops_vcpu_snapshot(vm->cookie, meta, vcpu->cookie);
|
||||
if (error != 0) {
|
||||
printf("%s: failed to snapshot vmcs/vmcb data for "
|
||||
"vCPU: %d; error: %d\n", __func__, i, error);
|
||||
@ -2885,7 +2902,7 @@ vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta)
|
||||
ret = vmmops_snapshot(vm->cookie, meta);
|
||||
break;
|
||||
case STRUCT_VMCX:
|
||||
ret = vm_snapshot_vmcx(vm, meta);
|
||||
ret = vm_snapshot_vcpu(vm, meta);
|
||||
break;
|
||||
case STRUCT_VM:
|
||||
ret = vm_snapshot_vm(vm, meta);
|
||||
@ -2951,8 +2968,8 @@ vm_restore_time(struct vm *vm)
|
||||
for (i = 0; i < maxcpus; i++) {
|
||||
vcpu = &vm->vcpu[i];
|
||||
|
||||
error = vmmops_restore_tsc(vm->cookie, i, vcpu->tsc_offset -
|
||||
now);
|
||||
error = vmmops_restore_tsc(vm->cookie, vcpu->cookie,
|
||||
vcpu->tsc_offset - now);
|
||||
if (error)
|
||||
return (error);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user