Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 6 Jun 2014 18:23:50 +0000 (UTC)
From:      Tycho Nightingale <tychon@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r267178 - in head/sys/amd64/vmm: intel io
Message-ID:  <201406061823.s56INoha075238@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: tychon
Date: Fri Jun  6 18:23:49 2014
New Revision: 267178
URL: http://svnweb.freebsd.org/changeset/base/267178

Log:
  Support guest accesses to %cr8.
  
  Reviewed by:	neel

Modified:
  head/sys/amd64/vmm/intel/vmx.c
  head/sys/amd64/vmm/io/vlapic.c
  head/sys/amd64/vmm/io/vlapic.h

Modified: head/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- head/sys/amd64/vmm/intel/vmx.c	Fri Jun  6 18:02:32 2014	(r267177)
+++ head/sys/amd64/vmm/intel/vmx.c	Fri Jun  6 18:23:49 2014	(r267178)
@@ -83,7 +83,9 @@ __FBSDID("$FreeBSD$");
 	(PROCBASED_SECONDARY_CONTROLS	|				\
 	 PROCBASED_IO_EXITING		|				\
 	 PROCBASED_MSR_BITMAPS		|				\
-	 PROCBASED_CTLS_WINDOW_SETTING)
+	 PROCBASED_CTLS_WINDOW_SETTING	|				\
+	 PROCBASED_CR8_LOAD_EXITING	|				\
+	 PROCBASED_CR8_STORE_EXITING)
 #define	PROCBASED_CTLS_ZERO_SETTING	\
 	(PROCBASED_CR3_LOAD_EXITING |	\
 	PROCBASED_CR3_STORE_EXITING |	\
@@ -714,6 +716,13 @@ vmx_init(int ipinum)
 		procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
 
 		/*
+		 * No need to emulate accesses to %CR8 if virtual
+		 * interrupt delivery is enabled.
+		 */
+		procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
+		procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
+
+		/*
 		 * Check for Posted Interrupts only if Virtual Interrupt
 		 * Delivery is enabled.
 		 */
@@ -1426,97 +1435,130 @@ vmx_emulate_xsetbv(struct vmx *vmx, int 
 	return (HANDLED);
 }
 
-static int
-vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+static uint64_t
+vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
 {
-	int cr, vmcs_guest_cr, vmcs_shadow_cr;
-	uint64_t crval, regval, ones_mask, zeros_mask;
 	const struct vmxctx *vmxctx;
 
-	/* We only handle mov to %cr0 or %cr4 at this time */
-	if ((exitqual & 0xf0) != 0x00)
-		return (UNHANDLED);
+	vmxctx = &vmx->ctx[vcpu];
 
-	cr = exitqual & 0xf;
-	if (cr != 0 && cr != 4)
-		return (UNHANDLED);
+	switch (ident) {
+	case 0:
+		return (vmxctx->guest_rax);
+	case 1:
+		return (vmxctx->guest_rcx);
+	case 2:
+		return (vmxctx->guest_rdx);
+	case 3:
+		return (vmxctx->guest_rbx);
+	case 4:
+		return (vmcs_read(VMCS_GUEST_RSP));
+	case 5:
+		return (vmxctx->guest_rbp);
+	case 6:
+		return (vmxctx->guest_rsi);
+	case 7:
+		return (vmxctx->guest_rdi);
+	case 8:
+		return (vmxctx->guest_r8);
+	case 9:
+		return (vmxctx->guest_r9);
+	case 10:
+		return (vmxctx->guest_r10);
+	case 11:
+		return (vmxctx->guest_r11);
+	case 12:
+		return (vmxctx->guest_r12);
+	case 13:
+		return (vmxctx->guest_r13);
+	case 14:
+		return (vmxctx->guest_r14);
+	case 15:
+		return (vmxctx->guest_r15);
+	default:
+		panic("invalid vmx register %d", ident);
+	}
+}
+
+static void
+vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
+{
+	struct vmxctx *vmxctx;
 
-	regval = 0; /* silence gcc */
 	vmxctx = &vmx->ctx[vcpu];
 
-	/*
-	 * We must use vmcs_write() directly here because vmcs_setreg() will
-	 * call vmclear(vmcs) as a side-effect which we certainly don't want.
-	 */
-	switch ((exitqual >> 8) & 0xf) {
+	switch (ident) {
 	case 0:
-		regval = vmxctx->guest_rax;
+		vmxctx->guest_rax = regval;
 		break;
 	case 1:
-		regval = vmxctx->guest_rcx;
+		vmxctx->guest_rcx = regval;
 		break;
 	case 2:
-		regval = vmxctx->guest_rdx;
+		vmxctx->guest_rdx = regval;
 		break;
 	case 3:
-		regval = vmxctx->guest_rbx;
+		vmxctx->guest_rbx = regval;
 		break;
 	case 4:
-		regval = vmcs_read(VMCS_GUEST_RSP);
+		vmcs_write(VMCS_GUEST_RSP, regval);
 		break;
 	case 5:
-		regval = vmxctx->guest_rbp;
+		vmxctx->guest_rbp = regval;
 		break;
 	case 6:
-		regval = vmxctx->guest_rsi;
+		vmxctx->guest_rsi = regval;
 		break;
 	case 7:
-		regval = vmxctx->guest_rdi;
+		vmxctx->guest_rdi = regval;
 		break;
 	case 8:
-		regval = vmxctx->guest_r8;
+		vmxctx->guest_r8 = regval;
 		break;
 	case 9:
-		regval = vmxctx->guest_r9;
+		vmxctx->guest_r9 = regval;
 		break;
 	case 10:
-		regval = vmxctx->guest_r10;
+		vmxctx->guest_r10 = regval;
 		break;
 	case 11:
-		regval = vmxctx->guest_r11;
+		vmxctx->guest_r11 = regval;
 		break;
 	case 12:
-		regval = vmxctx->guest_r12;
+		vmxctx->guest_r12 = regval;
 		break;
 	case 13:
-		regval = vmxctx->guest_r13;
+		vmxctx->guest_r13 = regval;
 		break;
 	case 14:
-		regval = vmxctx->guest_r14;
+		vmxctx->guest_r14 = regval;
 		break;
 	case 15:
-		regval = vmxctx->guest_r15;
+		vmxctx->guest_r15 = regval;
 		break;
+	default:
+		panic("invalid vmx register %d", ident);
 	}
+}
 
-	if (cr == 0) {
-		ones_mask = cr0_ones_mask;
-		zeros_mask = cr0_zeros_mask;
-		vmcs_guest_cr = VMCS_GUEST_CR0;
-		vmcs_shadow_cr = VMCS_CR0_SHADOW;
-	} else {
-		ones_mask = cr4_ones_mask;
-		zeros_mask = cr4_zeros_mask;
-		vmcs_guest_cr = VMCS_GUEST_CR4;
-		vmcs_shadow_cr = VMCS_CR4_SHADOW;
-	}
-	vmcs_write(vmcs_shadow_cr, regval);
-
-	crval = regval | ones_mask;
-	crval &= ~zeros_mask;
-	vmcs_write(vmcs_guest_cr, crval);
+static int
+vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+	uint64_t crval, regval;
+
+	/* We only handle mov to %cr0 at this time */
+	if ((exitqual & 0xf0) != 0x00)
+		return (UNHANDLED);
 
-	if (cr == 0 && regval & CR0_PG) {
+	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+
+	vmcs_write(VMCS_CR0_SHADOW, regval);
+
+	crval = regval | cr0_ones_mask;
+	crval &= ~cr0_zeros_mask;
+	vmcs_write(VMCS_GUEST_CR0, crval);
+
+	if (regval & CR0_PG) {
 		uint64_t efer, entry_ctls;
 
 		/*
@@ -1537,6 +1579,48 @@ vmx_emulate_cr_access(struct vmx *vmx, i
 	return (HANDLED);
 }
 
+static int
+vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+	uint64_t crval, regval;
+
+	/* We only handle mov to %cr4 at this time */
+	if ((exitqual & 0xf0) != 0x00)
+		return (UNHANDLED);
+
+	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+
+	vmcs_write(VMCS_CR4_SHADOW, regval);
+
+	crval = regval | cr4_ones_mask;
+	crval &= ~cr4_zeros_mask;
+	vmcs_write(VMCS_GUEST_CR4, crval);
+
+	return (HANDLED);
+}
+
+static int
+vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+{
+	uint64_t regval;
+
+	/* We only handle mov %cr8 to/from a register at this time. */
+	if ((exitqual & 0xe0) != 0x00) {
+		return (UNHANDLED);
+	}
+
+	if (exitqual & 0x10) {
+		regval = vlapic_get_tpr(vm_lapic(vmx->vm, vcpu));
+		vmx_set_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf,
+				  regval >> 4);
+	} else {
+		regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+		vlapic_set_tpr(vm_lapic(vmx->vm, vcpu), regval << 4);
+	}
+
+	return (HANDLED);
+}
+
 /*
  * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
  */
@@ -1929,7 +2013,17 @@ vmx_exit_process(struct vmx *vmx, int vc
 	switch (reason) {
 	case EXIT_REASON_CR_ACCESS:
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
-		handled = vmx_emulate_cr_access(vmx, vcpu, qual);
+		switch (qual & 0xf) {
+		case 0:
+			handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
+			break;
+		case 4:
+			handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
+			break;
+		case 8:
+			handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
+			break;
+		}
 		break;
 	case EXIT_REASON_RDMSR:
 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);

Modified: head/sys/amd64/vmm/io/vlapic.c
==============================================================================
--- head/sys/amd64/vmm/io/vlapic.c	Fri Jun  6 18:02:32 2014	(r267177)
+++ head/sys/amd64/vmm/io/vlapic.c	Fri Jun  6 18:23:49 2014	(r267178)
@@ -1184,7 +1184,7 @@ vlapic_read(struct vlapic *vlapic, int m
 			*data = lapic->version;
 			break;
 		case APIC_OFFSET_TPR:
-			*data = lapic->tpr;
+			*data = vlapic_get_tpr(vlapic);
 			break;
 		case APIC_OFFSET_APR:
 			*data = lapic->apr;
@@ -1305,8 +1305,7 @@ vlapic_write(struct vlapic *vlapic, int 
 			vlapic_id_write_handler(vlapic);
 			break;
 		case APIC_OFFSET_TPR:
-			lapic->tpr = data & 0xff;
-			vlapic_update_ppr(vlapic);
+			vlapic_set_tpr(vlapic, data & 0xff);
 			break;
 		case APIC_OFFSET_EOI:
 			vlapic_process_eoi(vlapic);
@@ -1611,3 +1610,20 @@ vlapic_set_tmr_level(struct vlapic *vlap
 	VLAPIC_CTR1(vlapic, "vector %d set to level-triggered", vector);
 	vlapic_set_tmr(vlapic, vector, true);
 }
+
+void
+vlapic_set_tpr(struct vlapic *vlapic, uint8_t val)
+{
+	struct LAPIC	*lapic = vlapic->apic_page;
+
+	lapic->tpr = val;
+	vlapic_update_ppr(vlapic);
+}
+
+uint8_t
+vlapic_get_tpr(struct vlapic *vlapic)
+{
+	struct LAPIC	*lapic = vlapic->apic_page;
+
+	return (lapic->tpr);
+}

Modified: head/sys/amd64/vmm/io/vlapic.h
==============================================================================
--- head/sys/amd64/vmm/io/vlapic.h	Fri Jun  6 18:02:32 2014	(r267177)
+++ head/sys/amd64/vmm/io/vlapic.h	Fri Jun  6 18:23:49 2014	(r267178)
@@ -92,6 +92,9 @@ void vlapic_reset_tmr(struct vlapic *vla
 void vlapic_set_tmr_level(struct vlapic *vlapic, uint32_t dest, bool phys,
     int delmode, int vector);
 
+void vlapic_set_tpr(struct vlapic *vlapic, uint8_t val);
+uint8_t vlapic_get_tpr(struct vlapic *vlapic);
+
 /* APIC write handlers */
 void vlapic_id_write_handler(struct vlapic *vlapic);
 void vlapic_ldr_write_handler(struct vlapic *vlapic);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201406061823.s56INoha075238>