Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 18 May 2014 03:50:17 +0000 (UTC)
From:      Peter Grehan <grehan@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r266390 - head/sys/amd64/vmm/intel
Message-ID:  <201405180350.s4I3oHkl048065@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: grehan
Date: Sun May 18 03:50:17 2014
New Revision: 266390
URL: http://svnweb.freebsd.org/changeset/base/266390

Log:
  Make the vmx asm code dtrace-fbt-friendly by
   - inserting frame enter/leave sequences
   - restructuring the vmx_enter_guest routine so that it subsumes
     the vm_exit_guest block, which was the #vmexit RIP and not a
     callable routine.
  
  Reviewed by:	neel
  MFC after:	3 weeks

Modified:
  head/sys/amd64/vmm/intel/vmx.h
  head/sys/amd64/vmm/intel/vmx_support.S

Modified: head/sys/amd64/vmm/intel/vmx.h
==============================================================================
--- head/sys/amd64/vmm/intel/vmx.h	Sun May 18 01:20:51 2014	(r266389)
+++ head/sys/amd64/vmm/intel/vmx.h	Sun May 18 03:50:17 2014	(r266390)
@@ -67,7 +67,7 @@ struct vmxctx {
 	int		inst_fail_status;
 
 	/*
-	 * The pmap needs to be deactivated in vmx_exit_guest()
+	 * The pmap needs to be deactivated in vmx_enter_guest()
 	 * so keep a copy of the 'pmap' in each vmxctx.
 	 */
 	struct pmap	*pmap;
@@ -121,10 +121,11 @@ CTASSERT((offsetof(struct vmx, pir_desc[
 #define	VMX_VMLAUNCH_ERROR	2
 #define	VMX_INVEPT_ERROR	3
 int	vmx_enter_guest(struct vmxctx *ctx, struct vmx *vmx, int launched);
-void	vmx_exit_guest(void);
 void	vmx_call_isr(uintptr_t entry);
 
 u_long	vmx_fix_cr0(u_long cr0);
 u_long	vmx_fix_cr4(u_long cr4);
 
+extern char	vmx_exit_guest[];
+
 #endif

Modified: head/sys/amd64/vmm/intel/vmx_support.S
==============================================================================
--- head/sys/amd64/vmm/intel/vmx_support.S	Sun May 18 01:20:51 2014	(r266389)
+++ head/sys/amd64/vmm/intel/vmx_support.S	Sun May 18 03:50:17 2014	(r266390)
@@ -37,6 +37,10 @@
 #define	LK
 #endif
 
+/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */
+#define VENTER  push %rbp ; mov %rsp,%rbp
+#define VLEAVE  pop %rbp
+
 /*
  * Assumes that %rdi holds a pointer to the 'vmxctx'.
  *
@@ -98,6 +102,7 @@
  * Interrupts must be disabled on entry.
  */
 ENTRY(vmx_enter_guest)
+	VENTER
 	/*
 	 * Save host state before doing anything else.
 	 */
@@ -183,14 +188,17 @@ inst_error:
 	LK btrl	%r10d, PM_ACTIVE(%r11)
 
 	VMX_HOST_RESTORE
+	VLEAVE
 	ret
-END(vmx_enter_guest)
 
 /*
- * void vmx_exit_guest(void)
- * %rsp points to the struct vmxctx
+ * Non-error VM-exit from the guest. Make this a label so it can
+ * be used by C code when setting up the VMCS.
+ * The VMCS-restored %rsp points to the struct vmxctx
  */
-ENTRY(vmx_exit_guest)
+	ALIGN_TEXT
+	.globl	vmx_exit_guest
+vmx_exit_guest:
 	/*
 	 * Save guest state that is not automatically saved in the vmcs.
 	 */
@@ -229,8 +237,9 @@ ENTRY(vmx_exit_guest)
 	 * value of VMX_GUEST_VMEXIT.
 	 */
 	movl	$VMX_GUEST_VMEXIT, %eax
+	VLEAVE
 	ret
-END(vmx_exit_guest)
+END(vmx_enter_guest)
 
 /*
  * %rdi = interrupt handler entry point
@@ -239,6 +248,7 @@ END(vmx_exit_guest)
  * instruction in Intel SDM, Vol 2.
  */
 ENTRY(vmx_call_isr)
+	VENTER
 	mov	%rsp, %r11			/* save %rsp */
 	and	$~0xf, %rsp			/* align on 16-byte boundary */
 	pushq	$KERNEL_SS			/* %ss */
@@ -247,5 +257,6 @@ ENTRY(vmx_call_isr)
 	pushq	$KERNEL_CS			/* %cs */
 	cli					/* disable interrupts */
 	callq	*%rdi				/* push %rip and call isr */
+	VLEAVE
 	ret
 END(vmx_call_isr)



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201405180350.s4I3oHkl048065>