Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 12 Sep 2014 06:15:20 +0000 (UTC)
From:      Neel Natu <neel@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r271451 - in head/sys/amd64: include vmm vmm/intel
Message-ID:  <201409120615.s8C6FKdO016063@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: neel
Date: Fri Sep 12 06:15:20 2014
New Revision: 271451
URL: http://svnweb.freebsd.org/changeset/base/271451

Log:
  Optimize the common case of injecting an interrupt into a vcpu after a HLT
  by explicitly moving it out of the interrupt shadow. The hypervisor is done
  "executing" the HLT and by definition this moves the vcpu out of the
  1-instruction interrupt shadow.
  
  Prior to this change the interrupt would be held pending because the VMCS
  guest-interruptibility-state would indicate that "blocking by STI" was in
  effect. This resulted in an unnecessary round trip into the guest before
  the pending interrupt could be injected.
  
  Reviewed by:	grehan

Modified:
  head/sys/amd64/include/vmm.h
  head/sys/amd64/vmm/intel/vmx.c
  head/sys/amd64/vmm/vmm.c

Modified: head/sys/amd64/include/vmm.h
==============================================================================
--- head/sys/amd64/include/vmm.h	Fri Sep 12 05:25:56 2014	(r271450)
+++ head/sys/amd64/include/vmm.h	Fri Sep 12 06:15:20 2014	(r271451)
@@ -82,6 +82,7 @@ enum vm_reg_name {
 	VM_REG_GUEST_PDPTE1,
 	VM_REG_GUEST_PDPTE2,
 	VM_REG_GUEST_PDPTE3,
+	VM_REG_GUEST_INTR_SHADOW,
 	VM_REG_LAST
 };
 

Modified: head/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- head/sys/amd64/vmm/intel/vmx.c	Fri Sep 12 05:25:56 2014	(r271450)
+++ head/sys/amd64/vmm/intel/vmx.c	Fri Sep 12 06:15:20 2014	(r271451)
@@ -2712,6 +2712,46 @@ vmxctx_setreg(struct vmxctx *vmxctx, int
 }
 
 static int
+vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
+{
+	uint64_t gi;
+	int error;
+
+	error = vmcs_getreg(&vmx->vmcs[vcpu], running, 
+	    VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
+	*retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
+	return (error);
+}
+
+static int
+vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
+{
+	struct vmcs *vmcs;
+	uint64_t gi;
+	int error, ident;
+
+	/*
+	 * Forcing the vcpu into an interrupt shadow is not supported.
+	 */
+	if (val) {
+		error = EINVAL;
+		goto done;
+	}
+
+	vmcs = &vmx->vmcs[vcpu];
+	ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
+	error = vmcs_getreg(vmcs, running, ident, &gi);
+	if (error == 0) {
+		gi &= ~HWINTR_BLOCKING;
+		error = vmcs_setreg(vmcs, running, ident, gi);
+	}
+done:
+	VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
+	    error ? "failed" : "succeeded");
+	return (error);
+}
+
+static int
 vmx_shadow_reg(int reg)
 {
 	int shreg;
@@ -2742,6 +2782,9 @@ vmx_getreg(void *arg, int vcpu, int reg,
 	if (running && hostcpu != curcpu)
 		panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
 
+	if (reg == VM_REG_GUEST_INTR_SHADOW)
+		return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
+
 	if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
 		return (0);
 
@@ -2760,6 +2803,9 @@ vmx_setreg(void *arg, int vcpu, int reg,
 	if (running && hostcpu != curcpu)
 		panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
 
+	if (reg == VM_REG_GUEST_INTR_SHADOW)
+		return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
+
 	if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
 		return (0);
 

Modified: head/sys/amd64/vmm/vmm.c
==============================================================================
--- head/sys/amd64/vmm/vmm.c	Fri Sep 12 05:25:56 2014	(r271450)
+++ head/sys/amd64/vmm/vmm.c	Fri Sep 12 06:15:20 2014	(r271451)
@@ -1090,7 +1090,7 @@ vm_handle_hlt(struct vm *vm, int vcpuid,
 {
 	struct vcpu *vcpu;
 	const char *wmesg;
-	int t, vcpu_halted, vm_halted;
+	int error, t, vcpu_halted, vm_halted;
 
 	KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
 
@@ -1098,6 +1098,22 @@ vm_handle_hlt(struct vm *vm, int vcpuid,
 	vcpu_halted = 0;
 	vm_halted = 0;
 
+	/*
+	 * The typical way to halt a cpu is to execute: "sti; hlt"
+	 *
+	 * STI sets RFLAGS.IF to enable interrupts. However, the processor
+	 * remains in an "interrupt shadow" for an additional instruction
+	 * following the STI. This guarantees that "sti; hlt" sequence is
+	 * atomic and a pending interrupt will be recognized after the HLT.
+	 *
+	 * After the HLT emulation is done the vcpu is no longer in an
+	 * interrupt shadow and a pending interrupt can be injected on
+	 * the next entry into the guest.
+	 */
+	error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
+	KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
+	    __func__, error));
+
 	vcpu_lock(vcpu);
 	while (1) {
 		/*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201409120615.s8C6FKdO016063>