Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 30 Dec 2014 08:24:15 +0000 (UTC)
From:      Neel Natu <neel@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r276403 - in stable/10: sys/amd64/include sys/amd64/vmm sys/amd64/vmm/amd sys/amd64/vmm/intel sys/amd64/vmm/io sys/modules/vmm usr.sbin/bhyve usr.sbin/bhyvectl
Message-ID:  <201412300824.sBU8OF3D073229@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: neel
Date: Tue Dec 30 08:24:14 2014
New Revision: 276403
URL: https://svnweb.freebsd.org/changeset/base/276403

Log:
  MFC r273375
  Add support AMD processors with the SVM/AMD-V hardware extensions.
  
  MFC r273749
  Remove bhyve SVM feature printf's now that they are available in the general
  CPU feature detection code.
  
  MFC r273766
  Add missing 'break' pointed out by Coverity CID 1249760.
  
  MFC r276098
  Allow ktr(4) tracing of all guest exceptions via the tunable "hw.vmm.trace_guest_exceptions"
  
  MFC r276392
  Inject #UD into the guest when it executes either 'MONITOR' or 'MWAIT' on an
  AMD/SVM host.
  
  MFC r276402
  Remove "svn:mergeinfo" property that was dragged along when these files were
  svn copied in r273375.

Added:
     - copied unchanged from r273375, head/sys/amd64/vmm/amd/npt.c
     - copied unchanged from r273375, head/sys/amd64/vmm/amd/npt.h
  stable/10/sys/amd64/vmm/amd/svm.c   (contents, props changed)
     - copied, changed from r273375, head/sys/amd64/vmm/amd/svm.c
     - copied unchanged from r273375, head/sys/amd64/vmm/amd/svm.h
     - copied unchanged from r273375, head/sys/amd64/vmm/amd/svm_genassym.c
     - copied unchanged from r273375, head/sys/amd64/vmm/amd/svm_msr.c
     - copied unchanged from r273375, head/sys/amd64/vmm/amd/svm_msr.h
     - copied unchanged from r273375, head/sys/amd64/vmm/amd/svm_softc.h
     - copied unchanged from r273375, head/sys/amd64/vmm/amd/svm_support.S
  stable/10/sys/amd64/vmm/amd/vmcb.c   (contents, props changed)
     - copied, changed from r273375, head/sys/amd64/vmm/amd/vmcb.c
  stable/10/sys/amd64/vmm/amd/vmcb.h   (contents, props changed)
     - copied, changed from r273375, head/sys/amd64/vmm/amd/vmcb.h
Directory Properties:
  stable/10/sys/amd64/vmm/amd/npt.c   (props changed)
  stable/10/sys/amd64/vmm/amd/npt.h   (props changed)
  stable/10/sys/amd64/vmm/amd/svm.h   (props changed)
  stable/10/sys/amd64/vmm/amd/svm_genassym.c   (props changed)
  stable/10/sys/amd64/vmm/amd/svm_msr.c   (props changed)
  stable/10/sys/amd64/vmm/amd/svm_msr.h   (props changed)
  stable/10/sys/amd64/vmm/amd/svm_softc.h   (props changed)
  stable/10/sys/amd64/vmm/amd/svm_support.S   (props changed)
Modified:
  stable/10/sys/amd64/include/vmm.h
  stable/10/sys/amd64/include/vmm_instruction_emul.h
  stable/10/sys/amd64/vmm/amd/amdv.c
  stable/10/sys/amd64/vmm/intel/vmcs.c
  stable/10/sys/amd64/vmm/intel/vmcs.h
  stable/10/sys/amd64/vmm/intel/vmx.c
  stable/10/sys/amd64/vmm/io/vlapic.c
  stable/10/sys/amd64/vmm/vmm.c
  stable/10/sys/amd64/vmm/vmm_instruction_emul.c
  stable/10/sys/amd64/vmm/x86.c
  stable/10/sys/modules/vmm/Makefile
  stable/10/usr.sbin/bhyve/bhyverun.c
  stable/10/usr.sbin/bhyve/xmsr.c
  stable/10/usr.sbin/bhyvectl/bhyvectl.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/amd64/include/vmm.h
==============================================================================
--- stable/10/sys/amd64/include/vmm.h	Tue Dec 30 07:08:30 2014	(r276402)
+++ stable/10/sys/amd64/include/vmm.h	Tue Dec 30 08:24:14 2014	(r276403)
@@ -357,6 +357,8 @@ void vm_copyin(struct vm *vm, int vcpuid
     void *kaddr, size_t len);
 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
     struct vm_copyinfo *copyinfo, size_t len);
+
+int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
 #endif	/* KERNEL */
 
 #define	VM_MAXCPU	16			/* maximum virtual cpus */
@@ -487,6 +489,7 @@ enum vm_exitcode {
 	VM_EXITCODE_TASK_SWITCH,
 	VM_EXITCODE_MONITOR,
 	VM_EXITCODE_MWAIT,
+	VM_EXITCODE_SVM,
 	VM_EXITCODE_MAX
 };
 
@@ -564,6 +567,14 @@ struct vm_exit {
 			int		inst_type;
 			int		inst_error;
 		} vmx;
+		/*
+		 * SVM specific payload.
+		 */
+		struct {
+			uint64_t	exitcode;
+			uint64_t	exitinfo1;
+			uint64_t	exitinfo2;
+		} svm;
 		struct {
 			uint32_t	code;		/* ecx value */
 			uint64_t	wval;

Modified: stable/10/sys/amd64/include/vmm_instruction_emul.h
==============================================================================
--- stable/10/sys/amd64/include/vmm_instruction_emul.h	Tue Dec 30 07:08:30 2014	(r276402)
+++ stable/10/sys/amd64/include/vmm_instruction_emul.h	Tue Dec 30 08:24:14 2014	(r276403)
@@ -93,7 +93,7 @@ int vmm_fetch_instruction(struct vm *vm,
 int vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
     uint64_t gla, int prot, uint64_t *gpa);
 
-void vie_init(struct vie *vie);
+void vie_init(struct vie *vie, const char *inst_bytes, int inst_length);
 
 /*
  * Decode the instruction fetched into 'vie' so it can be emulated.

Modified: stable/10/sys/amd64/vmm/amd/amdv.c
==============================================================================
--- stable/10/sys/amd64/vmm/amd/amdv.c	Tue Dec 30 07:08:30 2014	(r276402)
+++ stable/10/sys/amd64/vmm/amd/amdv.c	Tue Dec 30 08:24:14 2014	(r276403)
@@ -38,149 +38,6 @@ __FBSDID("$FreeBSD$");
 #include "io/iommu.h"
 
 static int
-amdv_init(int ipinum)
-{
-
-	printf("amdv_init: not implemented\n");
-	return (ENXIO);
-}
-
-static int
-amdv_cleanup(void)
-{
-
-	printf("amdv_cleanup: not implemented\n");
-	return (ENXIO);
-}
-
-static void
-amdv_resume(void)
-{
-}
-
-static void *
-amdv_vminit(struct vm *vm, struct pmap *pmap)
-{
-
-	printf("amdv_vminit: not implemented\n");
-	return (NULL);
-}
-
-static int
-amdv_vmrun(void *arg, int vcpu, register_t rip, struct pmap *pmap,
-    void *rptr, void *sptr)
-{
-
-	printf("amdv_vmrun: not implemented\n");
-	return (ENXIO);
-}
-
-static void
-amdv_vmcleanup(void *arg)
-{
-
-	printf("amdv_vmcleanup: not implemented\n");
-	return;
-}
-
-static int
-amdv_getreg(void *arg, int vcpu, int regnum, uint64_t *retval)
-{
-	
-	printf("amdv_getreg: not implemented\n");
-	return (EINVAL);
-}
-
-static int
-amdv_setreg(void *arg, int vcpu, int regnum, uint64_t val)
-{
-	
-	printf("amdv_setreg: not implemented\n");
-	return (EINVAL);
-}
-
-static int
-amdv_getdesc(void *vmi, int vcpu, int num, struct seg_desc *desc)
-{
-
-	printf("amdv_get_desc: not implemented\n");
-	return (EINVAL);
-}
-
-static int
-amdv_setdesc(void *vmi, int vcpu, int num, struct seg_desc *desc)
-{
-
-	printf("amdv_get_desc: not implemented\n");
-	return (EINVAL);
-}
-
-static int
-amdv_getcap(void *arg, int vcpu, int type, int *retval)
-{
-
-	printf("amdv_getcap: not implemented\n");
-	return (EINVAL);
-}
-
-static int
-amdv_setcap(void *arg, int vcpu, int type, int val)
-{
-
-	printf("amdv_setcap: not implemented\n");
-	return (EINVAL);
-}
-
-static struct vmspace *
-amdv_vmspace_alloc(vm_offset_t min, vm_offset_t max)
-{
-
-	printf("amdv_vmspace_alloc: not implemented\n");
-	return (NULL);
-}
-
-static void
-amdv_vmspace_free(struct vmspace *vmspace)
-{
-
-	printf("amdv_vmspace_free: not implemented\n");
-	return;
-}
-
-static struct vlapic *
-amdv_vlapic_init(void *arg, int vcpuid)
-{
-
-	panic("amdv_vlapic_init: not implmented");
-}
-
-static void
-amdv_vlapic_cleanup(void *arg, struct vlapic *vlapic)
-{
-
-	panic("amdv_vlapic_cleanup: not implemented");
-}
-
-struct vmm_ops vmm_ops_amd = {
-	amdv_init,
-	amdv_cleanup,
-	amdv_resume,
-	amdv_vminit,
-	amdv_vmrun,
-	amdv_vmcleanup,
-	amdv_getreg,
-	amdv_setreg,
-	amdv_getdesc,
-	amdv_setdesc,
-	amdv_getcap,
-	amdv_setcap,
-	amdv_vmspace_alloc,
-	amdv_vmspace_free,
-	amdv_vlapic_init,
-	amdv_vlapic_cleanup,
-};
-
-static int
 amd_iommu_init(void)
 {
 

Copied: stable/10/sys/amd64/vmm/amd/npt.c (from r273375, head/sys/amd64/vmm/amd/npt.c)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/amd64/vmm/amd/npt.c	Tue Dec 30 08:24:14 2014	(r276403, copy of r273375, head/sys/amd64/vmm/amd/npt.c)
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/sysctl.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+
+#include <machine/pmap.h>
+
+#include "npt.h"
+
+SYSCTL_DECL(_hw_vmm);
+SYSCTL_NODE(_hw_vmm, OID_AUTO, npt, CTLFLAG_RW, NULL, NULL);
+
+static int npt_flags;
+SYSCTL_INT(_hw_vmm_npt, OID_AUTO, pmap_flags, CTLFLAG_RD,
+	&npt_flags, 0, NULL);
+
+#define NPT_IPIMASK	0xFF
+
+/*
+ * AMD nested page table init.
+ */
+int
+svm_npt_init(int ipinum)
+{
+	int enable_superpage = 1;
+
+	npt_flags = ipinum & NPT_IPIMASK;
+	TUNABLE_INT_FETCH("hw.vmm.npt.enable_superpage", &enable_superpage);
+	if (enable_superpage)
+		npt_flags |= PMAP_PDE_SUPERPAGE; 
+	
+	return (0);
+}
+
+static int
+npt_pinit(pmap_t pmap)
+{
+
+	return (pmap_pinit_type(pmap, PT_RVI, npt_flags));
+}
+
+struct vmspace *
+svm_npt_alloc(vm_offset_t min, vm_offset_t max)
+{
+	
+	return (vmspace_alloc(min, max, npt_pinit));
+}
+
+void
+svm_npt_free(struct vmspace *vmspace)
+{
+
+	vmspace_free(vmspace);
+}

Copied: stable/10/sys/amd64/vmm/amd/npt.h (from r273375, head/sys/amd64/vmm/amd/npt.h)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/amd64/vmm/amd/npt.h	Tue Dec 30 08:24:14 2014	(r276403, copy of r273375, head/sys/amd64/vmm/amd/npt.h)
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SVM_NPT_H_
+#define _SVM_NPT_H_
+
+int 	svm_npt_init(int ipinum);
+struct	vmspace *svm_npt_alloc(vm_offset_t min, vm_offset_t max);
+void	svm_npt_free(struct vmspace *vmspace);
+
+#endif /* _SVM_NPT_H_ */

Copied and modified: stable/10/sys/amd64/vmm/amd/svm.c (from r273375, head/sys/amd64/vmm/amd/svm.c)
==============================================================================
--- head/sys/amd64/vmm/amd/svm.c	Tue Oct 21 07:10:43 2014	(r273375, copy source)
+++ stable/10/sys/amd64/vmm/amd/svm.c	Tue Dec 30 08:24:14 2014	(r276403)
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
 #include <machine/specialreg.h>
 #include <machine/smp.h>
 #include <machine/vmm.h>
+#include <machine/vmm_dev.h>
 #include <machine/vmm_instruction_emul.h>
 
 #include "vmm_lapic.h"
@@ -174,30 +175,9 @@ check_svm_features(void)
 	do_cpuid(0x8000000A, regs);
 	svm_feature = regs[3];
 
-	printf("SVM: Revision %d\n", regs[0] & 0xFF);
-	printf("SVM: NumASID %u\n", regs[1]);
-
 	nasid = regs[1];
 	KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid));
 
-	printf("SVM: Features 0x%b\n", svm_feature,
-		"\020"
-		"\001NP"		/* Nested paging */
-		"\002LbrVirt"		/* LBR virtualization */
-		"\003SVML"		/* SVM lock */
-		"\004NRIPS"		/* NRIP save */
-		"\005TscRateMsr"	/* MSR based TSC rate control */
-		"\006VmcbClean"		/* VMCB clean bits */
-		"\007FlushByAsid"	/* Flush by ASID */
-		"\010DecodeAssist"	/* Decode assist */
-		"\011<b8>"
-		"\012<b9>"
-		"\013PauseFilter"	
-		"\014<b11>"
-		"\015PauseFilterThreshold"	
-		"\016AVIC"	
-		);
-
 	/* bhyve requires the Nested Paging feature */
 	if (!(svm_feature & AMD_CPUID_SVM_NP)) {
 		printf("SVM: Nested Paging feature not available.\n");
@@ -450,8 +430,24 @@ vmcb_init(struct svm_softc *sc, int vcpu
 			svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
 	}
 
-	/* Intercept Machine Check exceptions. */
-	svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
+
+	/*
+	 * Intercept everything when tracing guest exceptions otherwise
+	 * just intercept machine check exception.
+	 */
+	if (vcpu_trace_exceptions(sc->vm, vcpu)) {
+		for (n = 0; n < 32; n++) {
+			/*
+			 * Skip unimplemented vectors in the exception bitmap.
+			 */
+			if (n == 2 || n == 9) {
+				continue;
+			}
+			svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
+		}
+	} else {
+		svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
+	}
 
 	/* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
@@ -465,6 +461,9 @@ vmcb_init(struct svm_softc *sc, int vcpu
 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
 	    VMCB_INTCPT_FERR_FREEZE);
 
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
+	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
+
 	/*
 	 * From section "Canonicalization and Consistency Checks" in APMv2
 	 * the VMRUN intercept bit must be set to pass the consistency check.
@@ -1144,6 +1143,10 @@ exit_reason_to_str(uint64_t reason)
 		return ("msr");
 	case VMCB_EXIT_IRET:
 		return ("iret");
+	case VMCB_EXIT_MONITOR:
+		return ("monitor");
+	case VMCB_EXIT_MWAIT:
+		return ("mwait");
 	default:
 		snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
 		return (reasonbuf);
@@ -1197,9 +1200,10 @@ svm_vmexit(struct svm_softc *svm_sc, int
 	struct vmcb_state *state;
 	struct vmcb_ctrl *ctrl;
 	struct svm_regctx *ctx;
+	struct vm_exception exception;
 	uint64_t code, info1, info2, val;
 	uint32_t eax, ecx, edx;
-	int handled;
+	int error, errcode_valid, handled, idtvec, reflect;
 	bool retu;
 
 	ctx = svm_get_guest_regctx(svm_sc, vcpu);
@@ -1258,8 +1262,78 @@ svm_vmexit(struct svm_softc *svm_sc, int
 	case VMCB_EXIT_NMI:	/* external NMI */
 		handled = 1;
 		break;
-	case VMCB_EXIT_MC:	/* machine check */
+	case 0x40 ... 0x5F:
 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
+		reflect = 1;
+		idtvec = code - 0x40;
+		switch (idtvec) {
+		case IDT_MC:
+			/*
+			 * Call the machine check handler by hand. Also don't
+			 * reflect the machine check back into the guest.
+			 */
+			reflect = 0;
+			VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler");
+			__asm __volatile("int $18");
+			break;
+		case IDT_PF:
+			error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
+			    info2);
+			KASSERT(error == 0, ("%s: error %d updating cr2",
+			    __func__, error));
+			/* fallthru */
+		case IDT_NP:
+		case IDT_SS:
+		case IDT_GP:
+		case IDT_AC:
+		case IDT_TS:
+			errcode_valid = 1;
+			break;
+
+		case IDT_DF:
+			errcode_valid = 1;
+			info1 = 0;
+			break;
+
+		case IDT_BP:
+		case IDT_OF:
+		case IDT_BR:
+			/*
+			 * The 'nrip' field is populated for INT3, INTO and
+			 * BOUND exceptions and this also implies that
+			 * 'inst_length' is non-zero.
+			 *
+			 * Reset 'inst_length' to zero so the guest %rip at
+			 * event injection is identical to what it was when
+			 * the exception originally happened.
+			 */
+			VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d "
+			    "to zero before injecting exception %d",
+			    vmexit->inst_length, idtvec);
+			vmexit->inst_length = 0;
+			/* fallthru */
+		default:
+			errcode_valid = 0;
+			break;
+		}
+		KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) "
+		    "when reflecting exception %d into guest",
+		    vmexit->inst_length, idtvec));
+
+		if (reflect) {
+			/* Reflect the exception back into the guest */
+			exception.vector = idtvec;
+			exception.error_code_valid = errcode_valid;
+			exception.error_code = errcode_valid ? info1 : 0;
+			VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception "
+			    "%d/%#x into the guest", exception.vector,
+			    exception.error_code);
+			error = vm_inject_exception(svm_sc->vm, vcpu,
+			    &exception);
+			KASSERT(error == 0, ("%s: vm_inject_exception error %d",
+			    __func__, error));
+		}
+		handled = 1;
 		break;
 	case VMCB_EXIT_MSR:	/* MSR access. */
 		eax = state->rax;
@@ -1339,6 +1413,12 @@ svm_vmexit(struct svm_softc *svm_sc, int
 			    info2, info1, state->rip);
 		}
 		break;
+	case VMCB_EXIT_MONITOR:
+		vmexit->exitcode = VM_EXITCODE_MONITOR;
+		break;
+	case VMCB_EXIT_MWAIT:
+		vmexit->exitcode = VM_EXITCODE_MWAIT;
+		break;
 	default:
 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
 		break;

Copied: stable/10/sys/amd64/vmm/amd/svm.h (from r273375, head/sys/amd64/vmm/amd/svm.h)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/amd64/vmm/amd/svm.h	Tue Dec 30 08:24:14 2014	(r276403, copy of r273375, head/sys/amd64/vmm/amd/svm.h)
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SVM_H_
+#define _SVM_H_
+
+/*
+ * Guest register state that is saved outside the VMCB.
+ */
+struct svm_regctx {
+	register_t	sctx_rbp;
+	register_t	sctx_rbx;
+	register_t	sctx_rcx;
+	register_t	sctx_rdx;
+	register_t	sctx_rdi;
+	register_t	sctx_rsi;
+	register_t	sctx_r8;
+	register_t	sctx_r9;
+	register_t	sctx_r10;
+	register_t	sctx_r11;
+	register_t	sctx_r12;
+	register_t	sctx_r13;
+	register_t	sctx_r14;
+	register_t	sctx_r15;
+};
+
+void svm_launch(uint64_t pa, struct svm_regctx *);
+
+#endif /* _SVM_H_ */

Copied: stable/10/sys/amd64/vmm/amd/svm_genassym.c (from r273375, head/sys/amd64/vmm/amd/svm_genassym.c)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/amd64/vmm/amd/svm_genassym.c	Tue Dec 30 08:24:14 2014	(r276403, copy of r273375, head/sys/amd64/vmm/amd/svm_genassym.c)
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/assym.h>
+
+#include "svm.h"
+
+ASSYM(SCTX_RBX, offsetof(struct svm_regctx, sctx_rbx));
+ASSYM(SCTX_RCX, offsetof(struct svm_regctx, sctx_rcx));
+ASSYM(SCTX_RBP, offsetof(struct svm_regctx, sctx_rbp));
+ASSYM(SCTX_RDX, offsetof(struct svm_regctx, sctx_rdx));
+ASSYM(SCTX_RDI, offsetof(struct svm_regctx, sctx_rdi));
+ASSYM(SCTX_RSI, offsetof(struct svm_regctx, sctx_rsi));
+ASSYM(SCTX_R8,  offsetof(struct svm_regctx, sctx_r8));
+ASSYM(SCTX_R9,  offsetof(struct svm_regctx, sctx_r9));
+ASSYM(SCTX_R10, offsetof(struct svm_regctx, sctx_r10));
+ASSYM(SCTX_R11, offsetof(struct svm_regctx, sctx_r11));
+ASSYM(SCTX_R12, offsetof(struct svm_regctx, sctx_r12));
+ASSYM(SCTX_R13, offsetof(struct svm_regctx, sctx_r13));
+ASSYM(SCTX_R14, offsetof(struct svm_regctx, sctx_r14));
+ASSYM(SCTX_R15, offsetof(struct svm_regctx, sctx_r15));

Copied: stable/10/sys/amd64/vmm/amd/svm_msr.c (from r273375, head/sys/amd64/vmm/amd/svm_msr.c)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/amd64/vmm/amd/svm_msr.c	Tue Dec 30 08:24:14 2014	(r276403, copy of r273375, head/sys/amd64/vmm/amd/svm_msr.c)
@@ -0,0 +1,136 @@
+/*-
+ * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/errno.h>
+
+#include <machine/cpufunc.h>
+#include <machine/specialreg.h>
+
+#include "svm_msr.h"
+
+#ifndef MSR_AMDK8_IPM
+#define	MSR_AMDK8_IPM	0xc0010055
+#endif
+
+enum {
+	IDX_MSR_LSTAR,
+	IDX_MSR_CSTAR,
+	IDX_MSR_STAR,
+	IDX_MSR_SF_MASK,
+	HOST_MSR_NUM		/* must be the last enumeration */
+};
+
+static uint64_t host_msrs[HOST_MSR_NUM];
+
+void
+svm_msr_init(void)
+{
+	/* 
+	 * It is safe to cache the values of the following MSRs because they
+	 * don't change based on curcpu, curproc or curthread.
+	 */
+	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
+	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
+	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
+	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
+}
+
+void
+svm_msr_guest_init(struct svm_softc *sc, int vcpu)
+{
+	/*
+	 * All the MSRs accessible to the guest are either saved/restored by
+	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
+	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
+	 *
+	 * There are no guest MSRs that are saved/restored "by hand" so nothing
+	 * more to do here.
+	 */
+	return;
+}
+
+void
+svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
+{
+	/*
+	 * Save host MSRs (if any) and restore guest MSRs (if any).
+	 */
+}
+
+void
+svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
+{
+	/*
+	 * Save guest MSRs (if any) and restore host MSRs.
+	 */
+	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
+	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
+	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
+	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
+
+	/* MSR_KGSBASE will be restored on the way back to userspace */
+}
+
+int
+svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
+    bool *retu)
+{
+	int error = 0;
+
+	switch (num) {
+	case MSR_AMDK8_IPM:
+		*result = 0;
+		break;
+	default:
+		error = EINVAL;
+		break;
+	}
+
+	return (error);
+}
+
+int
+svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
+{
+	int error = 0;
+
+	switch (num) {
+	case MSR_AMDK8_IPM:
+		/*
+		 * Ignore writes to the "Interrupt Pending Message" MSR.
+		 */
+		break;
+	default:
+		error = EINVAL;
+		break;
+	}
+
+	return (error);
+}

Copied: stable/10/sys/amd64/vmm/amd/svm_msr.h (from r273375, head/sys/amd64/vmm/amd/svm_msr.h)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/amd64/vmm/amd/svm_msr.h	Tue Dec 30 08:24:14 2014	(r276403, copy of r273375, head/sys/amd64/vmm/amd/svm_msr.h)
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2014 Neel Natu (neel@freebsd.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SVM_MSR_H_
+#define	_SVM_MSR_H_
+
+struct svm_softc;
+
+void svm_msr_init(void);
+void svm_msr_guest_init(struct svm_softc *sc, int vcpu);
+void svm_msr_guest_enter(struct svm_softc *sc, int vcpu);
+void svm_msr_guest_exit(struct svm_softc *sc, int vcpu);
+
+int svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
+    bool *retu);
+int svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
+    bool *retu);
+
+#endif	/* _SVM_MSR_H_ */

Copied: stable/10/sys/amd64/vmm/amd/svm_softc.h (from r273375, head/sys/amd64/vmm/amd/svm_softc.h)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/amd64/vmm/amd/svm_softc.h	Tue Dec 30 08:24:14 2014	(r276403, copy of r273375, head/sys/amd64/vmm/amd/svm_softc.h)
@@ -0,0 +1,113 @@
+/*-
+ * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SVM_SOFTC_H_
+#define _SVM_SOFTC_H_
+
+#define SVM_IO_BITMAP_SIZE	(3 * PAGE_SIZE)
+#define SVM_MSR_BITMAP_SIZE	(2 * PAGE_SIZE)
+
+struct asid {
+	uint64_t	gen;	/* range is [1, ~0UL] */
+	uint32_t	num;	/* range is [1, nasid - 1] */
+};
+
+/*
+ * XXX separate out 'struct vmcb' from 'svm_vcpu' to avoid wasting space
+ * due to VMCB alignment requirements.
+ */
+struct svm_vcpu {
+	struct vmcb	vmcb;	 /* hardware saved vcpu context */
+	struct svm_regctx swctx; /* software saved vcpu context */
+	uint64_t	vmcb_pa; /* VMCB physical address */
+        int		lastcpu; /* host cpu that the vcpu last ran on */
+	uint32_t	dirty;	 /* state cache bits that must be cleared */
+	long		eptgen;	 /* pmap->pm_eptgen when the vcpu last ran */
+	struct asid	asid;
+} __aligned(PAGE_SIZE);
+
+/*
+ * SVM softc, one per virtual machine.
+ */
+struct svm_softc {
+	uint8_t iopm_bitmap[SVM_IO_BITMAP_SIZE];    /* shared by all vcpus */
+	uint8_t msr_bitmap[SVM_MSR_BITMAP_SIZE];    /* shared by all vcpus */
+	uint8_t apic_page[VM_MAXCPU][PAGE_SIZE];
+	struct svm_vcpu vcpu[VM_MAXCPU];
+	vm_offset_t 	nptp;			    /* nested page table */
+	struct vm	*vm;
+} __aligned(PAGE_SIZE);
+
+CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0);
+
+static __inline struct svm_vcpu *
+svm_get_vcpu(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu]));
+}
+
+static __inline struct vmcb *
+svm_get_vmcb(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].vmcb));
+}
+
+static __inline struct vmcb_state *
+svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].vmcb.state));
+}
+
+static __inline struct vmcb_ctrl *
+svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].vmcb.ctrl));
+}
+
+static __inline struct svm_regctx *
+svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
+{
+
+	return (&(sc->vcpu[vcpu].swctx));
+}
+
+static __inline void
+svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
+{
+        struct svm_vcpu *vcpustate;
+
+        vcpustate = svm_get_vcpu(sc, vcpu);
+
+        vcpustate->dirty |= dirtybits;
+}
+
+#endif /* _SVM_SOFTC_H_ */

Copied: stable/10/sys/amd64/vmm/amd/svm_support.S (from r273375, head/sys/amd64/vmm/amd/svm_support.S)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ stable/10/sys/amd64/vmm/amd/svm_support.S	Tue Dec 30 08:24:14 2014	(r276403, copy of r273375, head/sys/amd64/vmm/amd/svm_support.S)
@@ -0,0 +1,115 @@
+/*-
+ * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice unmodified, this list of conditions, and the following
+ *    disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <machine/asmacros.h>
+
+#include "svm_assym.h"
+
+/*
+ * Be friendly to DTrace FBT's prologue/epilogue pattern matching.
+ *
+ * They are also responsible for saving/restoring the host %rbp across VMRUN.
+ */
+#define	VENTER  push %rbp ; mov %rsp,%rbp
+#define	VLEAVE  pop %rbp
+
+/*
+ * svm_launch(uint64_t vmcb, struct svm_regctx *gctx)
+ * %rdi: physical address of VMCB
+ * %rsi: pointer to guest context
+ */
+ENTRY(svm_launch)
+	VENTER
+
+	/*
+	 * Host register state saved across a VMRUN.
+	 *
+	 * All "callee saved registers" except:
+	 * %rsp: because it is preserved by the processor across VMRUN.
+	 * %rbp: because it is saved/restored by the function prologue/epilogue.
+	 */
+	push %rbx
+	push %r12
+	push %r13
+	push %r14
+	push %r15
+
+	/* Save the physical address of the VMCB in %rax */
+	movq %rdi, %rax
+
+	push %rsi		/* push guest context pointer on the stack */
+
+	/*
+	 * Restore guest state.
+	 */
+	movq SCTX_R8(%rsi), %r8

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201412300824.sBU8OF3D073229>