From owner-svn-src-all@FreeBSD.ORG Sat May 24 23:12:32 2014 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 934BBF5A; Sat, 24 May 2014 23:12:32 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 73E9C265B; Sat, 24 May 2014 23:12:32 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s4ONCWnV066753; Sat, 24 May 2014 23:12:32 GMT (envelope-from neel@svn.freebsd.org) Received: (from neel@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s4ONCVdu066744; Sat, 24 May 2014 23:12:31 GMT (envelope-from neel@svn.freebsd.org) Message-Id: <201405242312.s4ONCVdu066744@svn.freebsd.org> From: Neel Natu Date: Sat, 24 May 2014 23:12:31 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r266633 - in head: lib/libvmmapi sys/amd64/include sys/amd64/vmm usr.sbin/bhyve X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 24 May 2014 23:12:32 -0000 Author: neel Date: Sat May 24 23:12:30 2014 New Revision: 266633 URL: http://svnweb.freebsd.org/changeset/base/266633 Log: Add libvmmapi functions vm_copyin() and vm_copyout() to copy into and out of the guest linear address space. These APIs in turn use a new ioctl 'VM_GLA2GPA' to convert the guest linear address to guest physical. Use the new copyin/copyout APIs when emulating ins/outs instruction in bhyve(8). Modified: head/lib/libvmmapi/vmmapi.c head/lib/libvmmapi/vmmapi.h head/sys/amd64/include/vmm.h head/sys/amd64/include/vmm_dev.h head/sys/amd64/vmm/vmm_dev.c head/sys/amd64/vmm/vmm_ioport.c head/usr.sbin/bhyve/inout.c Modified: head/lib/libvmmapi/vmmapi.c ============================================================================== --- head/lib/libvmmapi/vmmapi.c Sat May 24 22:50:58 2014 (r266632) +++ head/lib/libvmmapi/vmmapi.c Sat May 24 23:12:30 2014 (r266633) @@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include @@ -937,3 +938,88 @@ vm_get_hpet_capabilities(struct vmctx *c *capabilities = cap.capabilities; return (error); } + +static int +vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, + uint64_t gla, int prot, int *fault, uint64_t *gpa) +{ + struct vm_gla2gpa gg; + int error; + + bzero(&gg, sizeof(struct vm_gla2gpa)); + gg.vcpuid = vcpu; + gg.prot = prot; + gg.gla = gla; + gg.paging = *paging; + + error = ioctl(ctx->fd, VM_GLA2GPA, &gg); + if (error == 0) { + *fault = gg.fault; + *gpa = gg.gpa; + } + return (error); +} + +#ifndef min +#define min(a,b) (((a) < (b)) ? (a) : (b)) +#endif + +int +vm_copyin(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, + uint64_t gla, void *vp, size_t len) +{ + char *dst; + const char *src; + uint64_t gpa; + int error, fault, n, off; + + dst = vp; + while (len) { + error = vm_gla2gpa(ctx, vcpu, paging, gla, PROT_READ, + &fault, &gpa); + if (error) + return (-1); + if (fault) + return (1); + + off = gpa & PAGE_MASK; + n = min(len, PAGE_SIZE - off); + src = vm_map_gpa(ctx, gpa, n); + bcopy(src, dst, n); + + gla += n; + dst += n; + len -= n; + } + return (0); +} + +int +vm_copyout(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, + const void *vp, uint64_t gla, size_t len) +{ + uint64_t gpa; + char *dst; + const char *src; + int error, fault, n, off; + + src = vp; + while (len) { + error = vm_gla2gpa(ctx, vcpu, paging, gla, PROT_WRITE, + &fault, &gpa); + if (error) + return (-1); + if (fault) + return (1); + + off = gpa & PAGE_MASK; + n = min(len, PAGE_SIZE - off); + dst = vm_map_gpa(ctx, gpa, n); + bcopy(src, dst, n); + + gla += n; + src += n; + len -= n; + } + return (0); +} Modified: head/lib/libvmmapi/vmmapi.h ============================================================================== --- head/lib/libvmmapi/vmmapi.h Sat May 24 22:50:58 2014 (r266632) +++ head/lib/libvmmapi/vmmapi.h Sat May 24 23:12:30 2014 (r266633) @@ -109,6 +109,11 @@ int vm_set_x2apic_state(struct vmctx *ct int vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities); +int vm_copyin(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, + uint64_t gla_src, void *dst, size_t len); +int vm_copyout(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging, + const void *src, uint64_t gla_dst, size_t len); + /* Reset vcpu register state */ int vcpu_reset(struct vmctx *ctx, int vcpu); Modified: head/sys/amd64/include/vmm.h ============================================================================== --- head/sys/amd64/include/vmm.h Sat May 24 22:50:58 2014 (r266632) +++ head/sys/amd64/include/vmm.h Sat May 24 23:12:30 2014 (r266633) @@ -427,7 +427,6 @@ struct vm_inout_str { enum vm_reg_name seg_name; struct seg_desc seg_desc; uint64_t gla; /* may be set to VIE_INVALID_GLA */ - uint64_t gpa; }; struct vm_exit { Modified: head/sys/amd64/include/vmm_dev.h ============================================================================== --- head/sys/amd64/include/vmm_dev.h Sat May 24 22:50:58 2014 (r266632) +++ head/sys/amd64/include/vmm_dev.h Sat May 24 23:12:30 2014 (r266633) @@ -168,6 +168,15 @@ struct vm_suspend { enum vm_suspend_how how; }; +struct vm_gla2gpa { + int vcpuid; /* inputs */ + int prot; /* PROT_READ or PROT_WRITE */ + uint64_t gla; + struct vm_guest_paging paging; + int fault; /* outputs */ + uint64_t gpa; +}; + enum { /* general routines */ IOCNUM_ABIVERS = 0, @@ -180,6 +189,7 @@ enum { IOCNUM_MAP_MEMORY = 10, IOCNUM_GET_MEMORY_SEG = 11, IOCNUM_GET_GPA_PMAP = 12, + IOCNUM_GLA2GPA = 13, /* register/state accessors */ IOCNUM_SET_REGISTER = 20, @@ -289,4 +299,6 @@ enum { _IOR('v', IOCNUM_GET_HPET_CAPABILITIES, struct vm_hpet_cap) #define VM_GET_GPA_PMAP \ _IOWR('v', IOCNUM_GET_GPA_PMAP, struct vm_gpa_pte) +#define VM_GLA2GPA \ + _IOWR('v', IOCNUM_GLA2GPA, struct vm_gla2gpa) #endif Modified: head/sys/amd64/vmm/vmm_dev.c ============================================================================== --- head/sys/amd64/vmm/vmm_dev.c Sat May 24 22:50:58 2014 (r266632) +++ head/sys/amd64/vmm/vmm_dev.c Sat May 24 23:12:30 2014 (r266633) @@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include "vmm_lapic.h" @@ -168,6 +169,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long c struct vm_x2apic *x2apic; struct vm_gpa_pte *gpapte; struct vm_suspend *vmsuspend; + struct vm_gla2gpa *gg; sc = vmmdev_lookup2(cdev); if (sc == NULL) @@ -192,6 +194,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long c case VM_PPTDEV_MSI: case VM_PPTDEV_MSIX: case VM_SET_X2APIC_STATE: + case VM_GLA2GPA: /* * XXX fragile, handle with care * Assumes that the first field of the ioctl data is the vcpu. @@ -415,6 +418,27 @@ vmmdev_ioctl(struct cdev *cdev, u_long c case VM_GET_HPET_CAPABILITIES: error = vhpet_getcap((struct vm_hpet_cap *)data); break; + case VM_GLA2GPA: { + CTASSERT(PROT_READ == VM_PROT_READ); + CTASSERT(PROT_WRITE == VM_PROT_WRITE); + CTASSERT(PROT_EXEC == VM_PROT_EXECUTE); + gg = (struct vm_gla2gpa *)data; + error = vmm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla, + gg->prot, &gg->gpa); + KASSERT(error == 0 || error == 1 || error == -1, + ("%s: vmm_gla2gpa unknown error %d", __func__, error)); + if (error >= 0) { + /* + * error = 0: the translation was successful + * error = 1: a fault was injected into the guest + */ + gg->fault = error; + error = 0; + } else { + error = EFAULT; + } + break; + } default: error = ENOTTY; break; Modified: head/sys/amd64/vmm/vmm_ioport.c ============================================================================== --- head/sys/amd64/vmm/vmm_ioport.c Sat May 24 22:50:58 2014 (r266632) +++ head/sys/amd64/vmm/vmm_ioport.c Sat May 24 23:12:30 2014 (r266633) @@ -145,7 +145,7 @@ emulate_inout_str(struct vm *vm, int vcp { struct vm_inout_str *vis; uint64_t gla, index, segbase; - int error, in; + int in; vis = &vmexit->u.inout_str; in = vis->inout.in; @@ -197,18 +197,8 @@ emulate_inout_str(struct vm *vm, int vcp } vis->gla = gla; - error = vmm_gla2gpa(vm, vcpuid, &vis->paging, gla, - in ? VM_PROT_WRITE : VM_PROT_READ, &vis->gpa); - KASSERT(error == 0 || error == 1 || error == -1, - ("%s: vmm_gla2gpa unexpected error %d", __func__, error)); - if (error == -1) { - return (EFAULT); - } else if (error == 1) { - return (0); /* Resume guest to handle page fault */ - } else { - *retu = true; - return (0); /* Return to userspace to finish emulation */ - } + *retu = true; + return (0); /* Return to userspace to finish emulation */ } int Modified: head/usr.sbin/bhyve/inout.c ============================================================================== --- head/usr.sbin/bhyve/inout.c Sat May 24 22:50:58 2014 (r266632) +++ head/usr.sbin/bhyve/inout.c Sat May 24 23:12:30 2014 (r266633) @@ -102,14 +102,12 @@ int emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict) { int addrsize, bytes, flags, in, port, rep; - uint64_t gpa, gpaend; uint32_t val; inout_func_t handler; void *arg; - char *gva; int error, retval; enum vm_reg_name idxreg; - uint64_t index, count; + uint64_t gla, index, count; struct vm_inout_str *vis; bytes = vmexit->u.inout.bytes; @@ -149,10 +147,6 @@ emulate_inout(struct vmctx *ctx, int vcp /* Count register */ count = vis->count & vie_size2mask(addrsize); - gpa = vis->gpa; - gpaend = rounddown(gpa + PAGE_SIZE, PAGE_SIZE); - gva = paddr_guest2host(ctx, gpa, gpaend - gpa); - if (vie_alignment_check(vis->paging.cpl, bytes, vis->cr0, vis->rflags, vis->gla)) { error = vm_inject_exception2(ctx, vcpu, IDT_AC, 0); @@ -160,26 +154,34 @@ emulate_inout(struct vmctx *ctx, int vcp return (INOUT_RESTART); } - while (count != 0 && gpa < gpaend) { - /* - * XXX this may not work for unaligned accesses because - * the last access on the page may spill over into the - * adjacent page in the linear address space. This is a - * problem because we don't have a gla2gpa() mapping of - * this adjacent page. - */ - assert(gpaend - gpa >= bytes); - + gla = vis->gla; + while (count) { val = 0; - if (!in) - bcopy(gva, &val, bytes); + if (!in) { + error = vm_copyin(ctx, vcpu, &vis->paging, + gla, &val, bytes); + assert(error == 0 || error == 1 || error == -1); + if (error) { + retval = (error == 1) ? INOUT_RESTART : + INOUT_ERROR; + break; + } + } retval = handler(ctx, vcpu, in, port, bytes, &val, arg); if (retval != 0) break; - if (in) - bcopy(&val, gva, bytes); + if (in) { + error = vm_copyout(ctx, vcpu, &vis->paging, + &val, gla, bytes); + assert(error == 0 || error == 1 || error == -1); + if (error) { + retval = (error == 1) ? INOUT_RESTART : + INOUT_ERROR; + break; + } + } /* Update index */ if (vis->rflags & PSL_D) @@ -188,8 +190,7 @@ emulate_inout(struct vmctx *ctx, int vcp index += bytes; count--; - gva += bytes; - gpa += bytes; + gla += bytes; } /* Update index register */