1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-23 11:18:54 +00:00

Add support for running as a nested hypervisor under VMWare Fusion, on

systems with VT-x/EPT (e.g. Sandybridge Macbooks). This will most
likely work on VMWare Workstation8/Player4 as well. See the VMWare app
note at:

  http://communities.vmware.com/docs/DOC-8970

Fusion doesn't propagate the PAT MSR auto save-restore entry/exit
control bits. Deal with this by noting that fact and setting up the
PAT MSR to essentially be a no-op - it is init'd to power-on default,
and a software shadow copy maintained.

Since it is treated as a no-op, o/s settings are essentially ignored.
This may not give correct results, but since the hypervisor is running
nested, a number of bets are already off.

On a quad-core/HT-enabled 'MacBook8,2', nested VMs with 1/2/4 vCPUs were
fired up. The more nested vCPUs the worse the performance, unless the VMs
were started up in multiplexed mode where things worked perfectly up to
the limit of 8 vCPUs.

Reviewed by:	neel
This commit is contained in:
Peter Grehan 2011-12-24 19:39:02 +00:00
parent 3ee1a36e2e
commit 608f97c359
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/bhyve/; revision=228870
3 changed files with 92 additions and 18 deletions

View File

@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <machine/md_var.h>
#include <machine/pmap.h>
#include <machine/segments.h>
#include <machine/specialreg.h>
#include <machine/vmparam.h>
#include <machine/vmm.h>
@ -85,17 +86,22 @@ __FBSDID("$FreeBSD$");
#define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT
#define PROCBASED_CTLS2_ZERO_SETTING 0
#define VM_EXIT_CTLS_ONE_SETTING \
#define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \
(VM_EXIT_HOST_LMA | \
VM_EXIT_SAVE_EFER | \
VM_EXIT_SAVE_PAT | \
VM_EXIT_LOAD_PAT | \
VM_EXIT_LOAD_EFER)
#define VM_EXIT_CTLS_ONE_SETTING \
(VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \
VM_EXIT_SAVE_PAT | \
VM_EXIT_LOAD_PAT)
#define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS
#define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER
#define VM_ENTRY_CTLS_ONE_SETTING \
(VM_ENTRY_LOAD_PAT | \
VM_ENTRY_LOAD_EFER)
(VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \
VM_ENTRY_LOAD_PAT)
#define VM_ENTRY_CTLS_ZERO_SETTING \
(VM_ENTRY_LOAD_DEBUG_CONTROLS | \
VM_ENTRY_INTO_SMM | \
@ -122,6 +128,8 @@ static uint64_t cr4_ones_mask, cr4_zeros_mask;
static volatile u_int nextvpid;
static int vmx_no_patmsr;
/*
* Virtual NMI blocking conditions.
*
@ -476,16 +484,39 @@ vmx_init(void)
VM_EXIT_CTLS_ZERO_SETTING,
&exit_ctls);
if (error) {
printf("vmx_init: processor does not support desired "
"exit controls\n");
return (error);
/* Try again without the PAT MSR bits */
error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS,
MSR_VMX_TRUE_EXIT_CTLS,
VM_EXIT_CTLS_ONE_SETTING_NO_PAT,
VM_EXIT_CTLS_ZERO_SETTING,
&exit_ctls);
if (error) {
printf("vmx_init: processor does not support desired "
"exit controls\n");
return (error);
} else {
if (bootverbose)
printf("vmm: PAT MSR access not supported\n");
guest_msr_valid(MSR_PAT);
vmx_no_patmsr = 1;
}
}
/* Check support for VM-entry controls */
error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
VM_ENTRY_CTLS_ONE_SETTING,
VM_ENTRY_CTLS_ZERO_SETTING,
&entry_ctls);
if (!vmx_no_patmsr) {
error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
MSR_VMX_TRUE_ENTRY_CTLS,
VM_ENTRY_CTLS_ONE_SETTING,
VM_ENTRY_CTLS_ZERO_SETTING,
&entry_ctls);
} else {
error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
MSR_VMX_TRUE_ENTRY_CTLS,
VM_ENTRY_CTLS_ONE_SETTING_NO_PAT,
VM_ENTRY_CTLS_ZERO_SETTING,
&entry_ctls);
}
if (error) {
printf("vmx_init: processor does not support desired "
"entry controls\n");
@ -646,18 +677,23 @@ vmx_vminit(struct vm *vm)
* MSR_EFER is saved and restored in the guest VMCS area on a
* VM exit and entry respectively. It is also restored from the
* host VMCS area on a VM exit.
*
* MSR_PAT is saved and restored in the guest VMCS are on a VM exit
* and entry respectively. It is also restored from the host VMCS
* area on a VM exit.
*/
if (guest_msr_rw(vmx, MSR_GSBASE) ||
guest_msr_rw(vmx, MSR_FSBASE) ||
guest_msr_rw(vmx, MSR_KGSBASE) ||
guest_msr_rw(vmx, MSR_EFER) ||
guest_msr_rw(vmx, MSR_PAT))
guest_msr_rw(vmx, MSR_EFER))
panic("vmx_vminit: error setting guest msr access");
/*
* MSR_PAT is saved and restored in the guest VMCS are on a VM exit
* and entry respectively. It is also restored from the host VMCS
* area on a VM exit. However, if running on a system with no
* MSR_PAT save/restore support, leave access disabled so accesses
* will be trapped.
*/
if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT))
panic("vmx_vminit: error setting guest pat msr access");
for (i = 0; i < VM_MAXCPU; i++) {
vmx->vmcs[i].identifier = vmx_revision();
error = vmclear(&vmx->vmcs[i]);

View File

@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#define VMM_MSR_F_EMULATE 0x01
#define VMM_MSR_F_READONLY 0x02
#define VMM_MSR_F_INVALID 0x04
struct vmm_msr {
int num;
@ -54,6 +55,7 @@ static struct vmm_msr vmm_msr[] = {
{ MSR_CSTAR, 0 },
{ MSR_STAR, 0 },
{ MSR_SF_MASK, 0 },
{ MSR_PAT, VMM_MSR_F_EMULATE | VMM_MSR_F_INVALID },
{ MSR_APICBASE, VMM_MSR_F_EMULATE },
{ MSR_BIOS_SIGN,VMM_MSR_F_EMULATE },
{ MSR_MCG_CAP, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
@ -68,6 +70,9 @@ CTASSERT(VMM_MSR_NUM >= vmm_msr_num);
#define emulated_msr(idx) \
((vmm_msr[(idx)].flags & VMM_MSR_F_EMULATE) != 0)
#define invalid_msr(idx) \
((vmm_msr[(idx)].flags & VMM_MSR_F_INVALID) != 0)
void
vmm_msr_init(void)
{
@ -108,6 +113,16 @@ guest_msrs_init(struct vm *vm, int cpu)
if (cpu == 0)
guest_msrs[i] |= APICBASE_BSP;
break;
case MSR_PAT:
guest_msrs[i] = PAT_VALUE(0, PAT_WRITE_BACK) |
PAT_VALUE(1, PAT_WRITE_THROUGH) |
PAT_VALUE(2, PAT_UNCACHED) |
PAT_VALUE(3, PAT_UNCACHEABLE) |
PAT_VALUE(4, PAT_WRITE_BACK) |
PAT_VALUE(5, PAT_WRITE_THROUGH) |
PAT_VALUE(6, PAT_UNCACHED) |
PAT_VALUE(7, PAT_UNCACHEABLE);
break;
default:
panic("guest_msrs_init: missing initialization for msr "
"0x%0x", vmm_msr[i].num);
@ -165,6 +180,9 @@ emulate_wrmsr(struct vm *vm, int cpu, u_int num, uint64_t val)
if (idx < 0)
goto done;
if (invalid_msr(idx))
goto done;
if (!readonly_msr(idx)) {
guest_msrs = vm_guest_msrs(vm, cpu);
@ -206,6 +224,9 @@ emulate_rdmsr(struct vm *vm, int cpu, u_int num)
if (idx < 0)
goto done;
if (invalid_msr(idx))
goto done;
guest_msrs = vm_guest_msrs(vm, cpu);
result = guest_msrs[idx];
@ -263,3 +284,19 @@ restore_host_msrs(struct vm *vm, int cpu)
wrmsr(vmm_msr[i].num, vmm_msr[i].hostval);
}
}
/*
* Must be called by the CPU-specific code before any guests are
* created
*/
void
guest_msr_valid(int msr)
{
int i;
for (i = 0; i < vmm_msr_num; i++) {
if (vmm_msr[i].num == msr && invalid_msr(i)) {
vmm_msr[i].flags &= ~VMM_MSR_F_INVALID;
}
}
}

View File

@ -36,6 +36,7 @@ void vmm_msr_init(void);
int emulate_wrmsr(struct vm *vm, int vcpu, u_int msr, uint64_t val);
int emulate_rdmsr(struct vm *vm, int vcpu, u_int msr);
void guest_msrs_init(struct vm *vm, int cpu);
void guest_msr_valid(int msr);
void restore_host_msrs(struct vm *vm, int cpu);
void restore_guest_msrs(struct vm *vm, int cpu);