1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-23 11:18:54 +00:00

Some Linux guests will implement a 'halt' by disabling the APIC and executing

the 'HLT' instruction. This condition was detected by 'vm_handle_hlt()' and
converted into the SPINDOWN_CPU exitcode . The bhyve(8) process would exit
the vcpu thread in response to a SPINDOWN_CPU and when the last vcpu was
spun down it would reset the virtual machine via vm_suspend(VM_SUSPEND_RESET).

This functionality was broken in r263780 in a way that made it impossible
to kill the bhyve(8) process because it would loop forever in
vm_handle_suspend().

Unbreak this by removing the code to spindown vcpus. Thus a 'halt' from
a Linux guest will appear to be hung but this is consistent with the
behavior on bare metal. The guest can be rebooted by using the bhyvectl
options '--force-reset' or '--force-poweroff'.

Reviewed by:	grehan@
This commit is contained in:
Neel Natu 2014-04-29 18:42:56 +00:00
parent a17937bdd0
commit c6a0cc2e21
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=265101
3 changed files with 32 additions and 87 deletions

View File

@ -325,7 +325,7 @@ enum vm_exitcode {
VM_EXITCODE_PAGING,
VM_EXITCODE_INST_EMUL,
VM_EXITCODE_SPINUP_AP,
VM_EXITCODE_SPINDOWN_CPU,
VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */
VM_EXITCODE_RENDEZVOUS,
VM_EXITCODE_IOAPIC_EOI,
VM_EXITCODE_SUSPENDED,

View File

@ -191,8 +191,6 @@ static int vmm_ipinum;
SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
"IPI vector used for vcpu notifications");
static void vm_deactivate_cpu(struct vm *vm, int vcpuid);
static void
vcpu_cleanup(struct vm *vm, int i)
{
@ -1006,60 +1004,47 @@ vm_handle_rendezvous(struct vm *vm, int vcpuid)
static int
vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
{
struct vm_exit *vmexit;
struct vcpu *vcpu;
int t, timo, spindown;
const char *wmesg;
int t;
vcpu = &vm->vcpu[vcpuid];
spindown = 0;
vcpu_lock(vcpu);
while (1) {
/*
* Do a final check for pending NMI or interrupts before
* really putting this thread to sleep. Also check for
* software events that would cause this vcpu to wakeup.
*
* These interrupts/events could have happened after the
* vcpu returned from VMRUN() and before it acquired the
* vcpu lock above.
*/
if (vm->rendezvous_func != NULL || vm->suspend)
break;
if (vm_nmi_pending(vm, vcpuid))
break;
if (!intr_disabled) {
if (vm_extint_pending(vm, vcpuid) ||
vlapic_pending_intr(vcpu->vlapic, NULL)) {
break;
}
}
if (vlapic_enabled(vcpu->vlapic))
wmesg = "vmidle";
else
wmesg = "vmhalt";
/*
* Do a final check for pending NMI or interrupts before
* really putting this thread to sleep.
*
* These interrupts could have happened any time after we
* returned from VMRUN() and before we grabbed the vcpu lock.
*/
if (vm->rendezvous_func == NULL &&
!vm_nmi_pending(vm, vcpuid) &&
(intr_disabled || !vlapic_pending_intr(vcpu->vlapic, NULL))) {
t = ticks;
vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
if (vlapic_enabled(vcpu->vlapic)) {
/*
* XXX msleep_spin() is not interruptible so use the
* 'timo' to put an upper bound on the sleep time.
*/
timo = hz;
msleep_spin(vcpu, &vcpu->mtx, "vmidle", timo);
} else {
/*
* Spindown the vcpu if the APIC is disabled and it
* had entered the halted state, but never spin
* down the BSP.
*/
if (vcpuid != 0)
spindown = 1;
}
msleep_spin(vcpu, &vcpu->mtx, wmesg, 0);
vcpu_require_state_locked(vcpu, VCPU_FROZEN);
vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
}
vcpu_unlock(vcpu);
/*
* Since 'vm_deactivate_cpu()' grabs a sleep mutex we must call it
* outside the confines of the vcpu spinlock.
*/
if (spindown) {
*retu = true;
vmexit = vm_exitinfo(vm, vcpuid);
vmexit->exitcode = VM_EXITCODE_SPINDOWN_CPU;
vm_deactivate_cpu(vm, vcpuid);
VCPU_CTR0(vm, vcpuid, "spinning down cpu");
}
return (0);
}
@ -1673,30 +1658,6 @@ vm_activate_cpu(struct vm *vm, int vcpuid)
CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
}
static void
vm_deactivate_cpu(struct vm *vm, int vcpuid)
{
KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU,
("vm_deactivate_cpu: invalid vcpuid %d", vcpuid));
KASSERT(CPU_ISSET(vcpuid, &vm->active_cpus),
("vm_deactivate_cpu: vcpuid %d is not active", vcpuid));
VCPU_CTR0(vm, vcpuid, "deactivated");
CPU_CLR_ATOMIC(vcpuid, &vm->active_cpus);
/*
* If a vcpu rendezvous is in progress then it could be blocked
* on 'vcpuid' - unblock it before disappearing forever.
*/
mtx_lock(&vm->rendezvous_mtx);
if (vm->rendezvous_func != NULL) {
VCPU_CTR0(vm, vcpuid, "unblock rendezvous after deactivation");
wakeup(&vm->rendezvous_func);
}
mtx_unlock(&vm->rendezvous_mtx);
}
cpuset_t
vm_active_cpus(struct vm *vm)
{

View File

@ -114,6 +114,7 @@ struct bhyvestats {
uint64_t cpu_switch_rotate;
uint64_t cpu_switch_direct;
int io_reset;
int io_poweroff;
} stats;
struct mt_vmm_info {
@ -236,13 +237,6 @@ fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
return (CPU_EMPTY(&cpumask));
}
static int
vmexit_catch_reset(void)
{
stats.io_reset++;
return (VMEXIT_RESET);
}
static int
vmexit_catch_inout(void)
{
@ -293,8 +287,10 @@ vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
case INOUT_OK:
return (VMEXIT_CONTINUE);
case INOUT_RESET:
stats.io_reset++;
return (VMEXIT_RESET);
case INOUT_POWEROFF:
stats.io_poweroff++;
return (VMEXIT_POWEROFF);
default:
fprintf(stderr, "Unhandled %s%c 0x%04x\n",
@ -364,17 +360,6 @@ vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
return (retval);
}
static int
vmexit_spindown_cpu(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
{
int lastcpu;
lastcpu = fbsdrun_deletecpu(ctx, *pvcpu);
if (!lastcpu)
pthread_exit(NULL);
return (vmexit_catch_reset());
}
static int
vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
@ -501,7 +486,6 @@ static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
[VM_EXITCODE_MTRAP] = vmexit_mtrap,
[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
[VM_EXITCODE_SPINDOWN_CPU] = vmexit_spindown_cpu,
[VM_EXITCODE_SUSPENDED] = vmexit_suspend
};