From owner-svn-src-head@FreeBSD.ORG Thu Apr 25 04:56:45 2013 Return-Path: Delivered-To: svn-src-head@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by hub.freebsd.org (Postfix) with ESMTP id 4920EE77; Thu, 25 Apr 2013 04:56:45 +0000 (UTC) (envelope-from grehan@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) by mx1.freebsd.org (Postfix) with ESMTP id 2A8571F13; Thu, 25 Apr 2013 04:56:45 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.6/8.14.6) with ESMTP id r3P4ujXD094658; Thu, 25 Apr 2013 04:56:45 GMT (envelope-from grehan@svn.freebsd.org) Received: (from grehan@localhost) by svn.freebsd.org (8.14.6/8.14.5/Submit) id r3P4uivU094650; Thu, 25 Apr 2013 04:56:44 GMT (envelope-from grehan@svn.freebsd.org) Message-Id: <201304250456.r3P4uivU094650@svn.freebsd.org> From: Peter Grehan Date: Thu, 25 Apr 2013 04:56:44 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r249879 - in head/sys/amd64: include vmm vmm/intel X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 25 Apr 2013 04:56:45 -0000 Author: grehan Date: Thu Apr 25 04:56:43 2013 New Revision: 249879 URL: http://svnweb.freebsd.org/changeset/base/249879 Log: Add RIP-relative addressing to the instruction decoder. Rework the guest register fetch code to allow the RIP to be extracted from the VMCS while the kernel decoder is functioning. Hit by the OpenBSD local-apic code. Submitted by: neel Reviewed by: grehan Obtained from: NetApp Modified: head/sys/amd64/include/vmm.h head/sys/amd64/vmm/intel/vmcs.c head/sys/amd64/vmm/intel/vmcs.h head/sys/amd64/vmm/intel/vmx.c head/sys/amd64/vmm/vmm.c head/sys/amd64/vmm/vmm_instruction_emul.c Modified: head/sys/amd64/include/vmm.h ============================================================================== --- head/sys/amd64/include/vmm.h Thu Apr 25 04:53:01 2013 (r249878) +++ head/sys/amd64/include/vmm.h Thu Apr 25 04:56:43 2013 (r249879) @@ -135,12 +135,12 @@ enum vcpu_state { }; int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state); -enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu); +enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu); static int __inline -vcpu_is_running(struct vm *vm, int vcpu) +vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu) { - return (vcpu_get_state(vm, vcpu) == VCPU_RUNNING); + return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING); } void *vcpu_stats(struct vm *vm, int vcpu); Modified: head/sys/amd64/vmm/intel/vmcs.c ============================================================================== --- head/sys/amd64/vmm/intel/vmcs.c Thu Apr 25 04:53:01 2013 (r249878) +++ head/sys/amd64/vmm/intel/vmcs.c Thu Apr 25 04:56:43 2013 (r249879) @@ -174,7 +174,7 @@ vmcs_seg_desc_encoding(int seg, uint32_t } int -vmcs_getreg(struct vmcs *vmcs, int ident, uint64_t *retval) +vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *retval) { int error; uint32_t encoding; @@ -194,14 +194,19 @@ vmcs_getreg(struct vmcs *vmcs, int ident if (encoding == (uint32_t)-1) return (EINVAL); - VMPTRLD(vmcs); + if (!running) + VMPTRLD(vmcs); + error = vmread(encoding, retval); - VMCLEAR(vmcs); + + if (!running) + VMCLEAR(vmcs); + return (error); } int -vmcs_setreg(struct vmcs *vmcs, int ident, uint64_t val) +vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val) { int error; uint32_t encoding; @@ -216,9 +221,14 @@ vmcs_setreg(struct vmcs *vmcs, int ident val = vmcs_fix_regval(encoding, val); - VMPTRLD(vmcs); + if (!running) + VMPTRLD(vmcs); + error = vmwrite(encoding, val); - VMCLEAR(vmcs); + + if (!running) + VMCLEAR(vmcs); + return (error); } Modified: head/sys/amd64/vmm/intel/vmcs.h ============================================================================== --- head/sys/amd64/vmm/intel/vmcs.h Thu Apr 25 04:53:01 2013 (r249878) +++ head/sys/amd64/vmm/intel/vmcs.h Thu Apr 25 04:56:43 2013 (r249879) @@ -52,8 +52,8 @@ int vmcs_set_defaults(struct vmcs *vmcs, uint32_t procbased_ctls2, uint32_t exit_ctls, uint32_t entry_ctls, u_long msr_bitmap, uint16_t vpid); -int vmcs_getreg(struct vmcs *vmcs, int ident, uint64_t *retval); -int vmcs_setreg(struct vmcs *vmcs, int ident, uint64_t val); +int vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *rv); +int vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val); int vmcs_getdesc(struct vmcs *vmcs, int ident, struct seg_desc *desc); int vmcs_setdesc(struct vmcs *vmcs, int ident, Modified: head/sys/amd64/vmm/intel/vmx.c ============================================================================== --- head/sys/amd64/vmm/intel/vmx.c Thu Apr 25 04:53:01 2013 (r249878) +++ head/sys/amd64/vmm/intel/vmx.c Thu Apr 25 04:56:43 2013 (r249879) @@ -667,11 +667,11 @@ vmx_setup_cr_shadow(int which, struct vm shadow_value = cr4_ones_mask; } - error = vmcs_setreg(vmcs, VMCS_IDENT(mask_ident), mask_value); + error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); if (error) return (error); - error = vmcs_setreg(vmcs, VMCS_IDENT(shadow_ident), shadow_value); + error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), shadow_value); if (error) return (error); @@ -1617,49 +1617,34 @@ vmxctx_setreg(struct vmxctx *vmxctx, int static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) { + int running, hostcpu; struct vmx *vmx = arg; + running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); + if (running && hostcpu != curcpu) + panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); + if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) return (0); - /* - * If the vcpu is running then don't mess with the VMCS. - * - * vmcs_getreg will VMCLEAR the vmcs when it is done which will cause - * the subsequent vmlaunch/vmresume to fail. - */ - if (vcpu_is_running(vmx->vm, vcpu)) - panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); - - return (vmcs_getreg(&vmx->vmcs[vcpu], reg, retval)); + return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval)); } static int vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) { - int error; + int error, hostcpu, running; uint64_t ctls; struct vmx *vmx = arg; - /* - * XXX Allow caller to set contents of the guest registers saved in - * the 'vmxctx' even though the vcpu might be running. We need this - * specifically to support the rdmsr emulation that will set the - * %eax and %edx registers during vm exit processing. - */ + running = vcpu_is_running(vmx->vm, vcpu, &hostcpu); + if (running && hostcpu != curcpu) + panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); + if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) return (0); - /* - * If the vcpu is running then don't mess with the VMCS. - * - * vmcs_setreg will VMCLEAR the vmcs when it is done which will cause - * the subsequent vmlaunch/vmresume to fail. - */ - if (vcpu_is_running(vmx->vm, vcpu)) - panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); - - error = vmcs_setreg(&vmx->vmcs[vcpu], reg, val); + error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val); if (error == 0) { /* @@ -1669,13 +1654,13 @@ vmx_setreg(void *arg, int vcpu, int reg, */ if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && (reg == VM_REG_GUEST_EFER)) { - vmcs_getreg(&vmx->vmcs[vcpu], + vmcs_getreg(&vmx->vmcs[vcpu], running, VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); if (val & EFER_LMA) ctls |= VM_ENTRY_GUEST_LMA; else ctls &= ~VM_ENTRY_GUEST_LMA; - vmcs_setreg(&vmx->vmcs[vcpu], + vmcs_setreg(&vmx->vmcs[vcpu], running, VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); } } @@ -1722,7 +1707,7 @@ vmx_inject(void *arg, int vcpu, int type * If there is already an exception pending to be delivered to the * vcpu then just return. */ - error = vmcs_getreg(vmcs, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info); + error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info); if (error) return (error); @@ -1731,12 +1716,12 @@ vmx_inject(void *arg, int vcpu, int type info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0); info |= VMCS_INTERRUPTION_INFO_VALID; - error = vmcs_setreg(vmcs, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info); + error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info); if (error != 0) return (error); if (code_valid) { - error = vmcs_setreg(vmcs, + error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR), code); } Modified: head/sys/amd64/vmm/vmm.c ============================================================================== --- head/sys/amd64/vmm/vmm.c Thu Apr 25 04:53:01 2013 (r249878) +++ head/sys/amd64/vmm/vmm.c Thu Apr 25 04:56:43 2013 (r249879) @@ -894,7 +894,7 @@ vcpu_set_state(struct vm *vm, int vcpuid } enum vcpu_state -vcpu_get_state(struct vm *vm, int vcpuid) +vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) { struct vcpu *vcpu; enum vcpu_state state; @@ -906,6 +906,8 @@ vcpu_get_state(struct vm *vm, int vcpuid vcpu_lock(vcpu); state = vcpu->state; + if (hostcpu != NULL) + *hostcpu = vcpu->hostcpu; vcpu_unlock(vcpu); return (state); Modified: head/sys/amd64/vmm/vmm_instruction_emul.c ============================================================================== --- head/sys/amd64/vmm/vmm_instruction_emul.c Thu Apr 25 04:53:01 2013 (r249878) +++ head/sys/amd64/vmm/vmm_instruction_emul.c Thu Apr 25 04:56:43 2013 (r249879) @@ -50,7 +50,10 @@ __FBSDID("$FreeBSD$"); #include #endif /* _KERNEL */ - +enum cpu_mode { + CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */ + CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */ +}; /* struct vie_op.op_type */ enum { @@ -133,32 +136,10 @@ static uint64_t size2mask[] = { }; static int -vie_valid_register(enum vm_reg_name reg) -{ -#ifdef _KERNEL - /* - * XXX - * The operand register in which we store the result of the - * read must be a GPR that we can modify even if the vcpu - * is "running". All the GPRs qualify except for %rsp. - * - * This is a limitation of the vm_set_register() API - * and can be fixed if necessary. - */ - if (reg == VM_REG_GUEST_RSP) - return (0); -#endif - return (1); -} - -static int vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval) { int error; - if (!vie_valid_register(reg)) - return (EINVAL); - error = vm_get_register(vm, vcpuid, reg, rval); return (error); @@ -196,9 +177,6 @@ vie_read_bytereg(void *vm, int vcpuid, s } } - if (!vie_valid_register(reg)) - return (EINVAL); - error = vm_get_register(vm, vcpuid, reg, &val); *rval = val >> rshift; return (error); @@ -211,9 +189,6 @@ vie_update_register(void *vm, int vcpuid int error; uint64_t origval; - if (!vie_valid_register(reg)) - return (EINVAL); - switch (size) { case 1: case 2: @@ -583,13 +558,16 @@ decode_opcode(struct vie *vie) return (0); } -/* - * XXX assuming 32-bit or 64-bit guest - */ static int decode_modrm(struct vie *vie) { uint8_t x; + enum cpu_mode cpu_mode; + + /* + * XXX assuming that guest is in IA-32E 64-bit mode + */ + cpu_mode = CPU_MODE_64BIT; if (vie_peek(vie, &x)) return (-1); @@ -642,7 +620,18 @@ decode_modrm(struct vie *vie) case VIE_MOD_INDIRECT: if (vie->rm == VIE_RM_DISP32) { vie->disp_bytes = 4; - vie->base_register = VM_REG_LAST; /* no base */ + /* + * Table 2-7. RIP-Relative Addressing + * + * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32 + * whereas in compatibility mode it just implies disp32. + */ + + if (cpu_mode == CPU_MODE_64BIT) + vie->base_register = VM_REG_GUEST_RIP; + else + vie->base_register = VM_REG_LAST; + } break; } @@ -812,6 +801,13 @@ verify_gla(struct vm *vm, int cpuid, uin error, vie->base_register); return (-1); } + + /* + * RIP-relative addressing starts from the following + * instruction + */ + if (vie->base_register == VM_REG_GUEST_RIP) + base += vie->num_valid; } idx = 0;