From owner-svn-src-all@FreeBSD.ORG Sun May 18 03:50:18 2014 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [8.8.178.115]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id DAE46E2A; Sun, 18 May 2014 03:50:17 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id BDC422432; Sun, 18 May 2014 03:50:17 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s4I3oHXJ048068; Sun, 18 May 2014 03:50:17 GMT (envelope-from grehan@svn.freebsd.org) Received: (from grehan@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s4I3oHkl048065; Sun, 18 May 2014 03:50:17 GMT (envelope-from grehan@svn.freebsd.org) Message-Id: <201405180350.s4I3oHkl048065@svn.freebsd.org> From: Peter Grehan Date: Sun, 18 May 2014 03:50:17 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r266390 - head/sys/amd64/vmm/intel X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 18 May 2014 03:50:18 -0000 Author: grehan Date: Sun May 18 03:50:17 2014 New Revision: 266390 URL: http://svnweb.freebsd.org/changeset/base/266390 Log: Make the vmx asm code dtrace-fbt-friendly by - inserting frame enter/leave sequences - restructuring the vmx_enter_guest routine so that it subsumes the vm_exit_guest block, which was the #vmexit RIP and not a callable routine. Reviewed by: neel MFC after: 3 weeks Modified: head/sys/amd64/vmm/intel/vmx.h head/sys/amd64/vmm/intel/vmx_support.S Modified: head/sys/amd64/vmm/intel/vmx.h ============================================================================== --- head/sys/amd64/vmm/intel/vmx.h Sun May 18 01:20:51 2014 (r266389) +++ head/sys/amd64/vmm/intel/vmx.h Sun May 18 03:50:17 2014 (r266390) @@ -67,7 +67,7 @@ struct vmxctx { int inst_fail_status; /* - * The pmap needs to be deactivated in vmx_exit_guest() + * The pmap needs to be deactivated in vmx_enter_guest() * so keep a copy of the 'pmap' in each vmxctx. */ struct pmap *pmap; @@ -121,10 +121,11 @@ CTASSERT((offsetof(struct vmx, pir_desc[ #define VMX_VMLAUNCH_ERROR 2 #define VMX_INVEPT_ERROR 3 int vmx_enter_guest(struct vmxctx *ctx, struct vmx *vmx, int launched); -void vmx_exit_guest(void); void vmx_call_isr(uintptr_t entry); u_long vmx_fix_cr0(u_long cr0); u_long vmx_fix_cr4(u_long cr4); +extern char vmx_exit_guest[]; + #endif Modified: head/sys/amd64/vmm/intel/vmx_support.S ============================================================================== --- head/sys/amd64/vmm/intel/vmx_support.S Sun May 18 01:20:51 2014 (r266389) +++ head/sys/amd64/vmm/intel/vmx_support.S Sun May 18 03:50:17 2014 (r266390) @@ -37,6 +37,10 @@ #define LK #endif +/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */ +#define VENTER push %rbp ; mov %rsp,%rbp +#define VLEAVE pop %rbp + /* * Assumes that %rdi holds a pointer to the 'vmxctx'. * @@ -98,6 +102,7 @@ * Interrupts must be disabled on entry. */ ENTRY(vmx_enter_guest) + VENTER /* * Save host state before doing anything else. */ @@ -183,14 +188,17 @@ inst_error: LK btrl %r10d, PM_ACTIVE(%r11) VMX_HOST_RESTORE + VLEAVE ret -END(vmx_enter_guest) /* - * void vmx_exit_guest(void) - * %rsp points to the struct vmxctx + * Non-error VM-exit from the guest. Make this a label so it can + * be used by C code when setting up the VMCS. + * The VMCS-restored %rsp points to the struct vmxctx */ -ENTRY(vmx_exit_guest) + ALIGN_TEXT + .globl vmx_exit_guest +vmx_exit_guest: /* * Save guest state that is not automatically saved in the vmcs. */ @@ -229,8 +237,9 @@ ENTRY(vmx_exit_guest) * value of VMX_GUEST_VMEXIT. */ movl $VMX_GUEST_VMEXIT, %eax + VLEAVE ret -END(vmx_exit_guest) +END(vmx_enter_guest) /* * %rdi = interrupt handler entry point @@ -239,6 +248,7 @@ END(vmx_exit_guest) * instruction in Intel SDM, Vol 2. */ ENTRY(vmx_call_isr) + VENTER mov %rsp, %r11 /* save %rsp */ and $~0xf, %rsp /* align on 16-byte boundary */ pushq $KERNEL_SS /* %ss */ @@ -247,5 +257,6 @@ ENTRY(vmx_call_isr) pushq $KERNEL_CS /* %cs */ cli /* disable interrupts */ callq *%rdi /* push %rip and call isr */ + VLEAVE ret END(vmx_call_isr)