From owner-svn-src-stable-10@FreeBSD.ORG Sun May 18 04:33:26 2014 Return-Path: Delivered-To: svn-src-stable-10@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 195466B5; Sun, 18 May 2014 04:33:26 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id E7ECB2795; Sun, 18 May 2014 04:33:25 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s4I4XPGW074571; Sun, 18 May 2014 04:33:25 GMT (envelope-from jhb@svn.freebsd.org) Received: (from jhb@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s4I4XPXI074567; Sun, 18 May 2014 04:33:25 GMT (envelope-from jhb@svn.freebsd.org) Message-Id: <201405180433.s4I4XPXI074567@svn.freebsd.org> From: John Baldwin Date: Sun, 18 May 2014 04:33:25 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r266393 - in stable/10: sys/amd64/include sys/amd64/vmm usr.sbin/bhyve X-SVN-Group: stable-10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-stable-10@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: SVN commit messages for only the 10-stable src tree List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 18 May 2014 04:33:26 -0000 Author: jhb Date: Sun May 18 04:33:24 2014 New Revision: 266393 URL: http://svnweb.freebsd.org/changeset/base/266393 Log: MFC 259737, 262646: Fix a couple of issues with vcpu state: - Add a parameter to 'vcpu_set_state()' to enforce that the vcpu is in the IDLE state before the requested state transition. This guarantees that there is exactly one ioctl() operating on a vcpu at any point in time and prevents unintended state transitions. - Fix a race between VMRUN() and vcpu_notify_event() due to 'vcpu->hostcpu' being updated outside of the vcpu_lock(). Modified: stable/10/sys/amd64/include/vmm.h stable/10/sys/amd64/vmm/vmm.c stable/10/sys/amd64/vmm/vmm_dev.c stable/10/usr.sbin/bhyve/bhyverun.c Directory Properties: stable/10/ (props changed) Modified: stable/10/sys/amd64/include/vmm.h ============================================================================== --- stable/10/sys/amd64/include/vmm.h Sun May 18 04:21:12 2014 (r266392) +++ stable/10/sys/amd64/include/vmm.h Sun May 18 04:33:24 2014 (r266393) @@ -177,7 +177,8 @@ enum vcpu_state { VCPU_SLEEPING, }; -int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state); +int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state, + bool from_idle); enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu); static int __inline Modified: stable/10/sys/amd64/vmm/vmm.c ============================================================================== --- stable/10/sys/amd64/vmm/vmm.c Sun May 18 04:21:12 2014 (r266392) +++ stable/10/sys/amd64/vmm/vmm.c Sun May 18 04:33:24 2014 (r266393) @@ -837,13 +837,35 @@ save_guest_fpustate(struct vcpu *vcpu) static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); static int -vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) +vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, + bool from_idle) { int error; vcpu_assert_locked(vcpu); /* + * State transitions from the vmmdev_ioctl() must always begin from + * the VCPU_IDLE state. This guarantees that there is only a single + * ioctl() operating on a vcpu at any point. + */ + if (from_idle) { + while (vcpu->state != VCPU_IDLE) + msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); + } else { + KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " + "vcpu idle state")); + } + + if (vcpu->state == VCPU_RUNNING) { + KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " + "mismatch for running vcpu", curcpu, vcpu->hostcpu)); + } else { + KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " + "vcpu that is not running", vcpu->hostcpu)); + } + + /* * The following state transitions are allowed: * IDLE -> FROZEN -> IDLE * FROZEN -> RUNNING -> FROZEN @@ -863,12 +885,19 @@ vcpu_set_state_locked(struct vcpu *vcpu, break; } - if (error == 0) - vcpu->state = newstate; + if (error) + return (EBUSY); + + vcpu->state = newstate; + if (newstate == VCPU_RUNNING) + vcpu->hostcpu = curcpu; else - error = EBUSY; + vcpu->hostcpu = NOCPU; - return (error); + if (newstate == VCPU_IDLE) + wakeup(&vcpu->state); + + return (0); } static void @@ -876,7 +905,7 @@ vcpu_require_state(struct vm *vm, int vc { int error; - if ((error = vcpu_set_state(vm, vcpuid, newstate)) != 0) + if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) panic("Error %d setting state to %d\n", error, newstate); } @@ -885,7 +914,7 @@ vcpu_require_state_locked(struct vcpu *v { int error; - if ((error = vcpu_set_state_locked(vcpu, newstate)) != 0) + if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) panic("Error %d setting state to %d", error, newstate); } @@ -1131,9 +1160,7 @@ restart: restore_guest_fpustate(vcpu); vcpu_require_state(vm, vcpuid, VCPU_RUNNING); - vcpu->hostcpu = curcpu; error = VMRUN(vm->cookie, vcpuid, rip, pmap, &vm->rendezvous_func); - vcpu->hostcpu = NOCPU; vcpu_require_state(vm, vcpuid, VCPU_FROZEN); save_guest_fpustate(vcpu); @@ -1343,7 +1370,8 @@ vm_iommu_domain(struct vm *vm) } int -vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) +vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, + bool from_idle) { int error; struct vcpu *vcpu; @@ -1354,7 +1382,7 @@ vcpu_set_state(struct vm *vm, int vcpuid vcpu = &vm->vcpu[vcpuid]; vcpu_lock(vcpu); - error = vcpu_set_state_locked(vcpu, newstate); + error = vcpu_set_state_locked(vcpu, newstate, from_idle); vcpu_unlock(vcpu); return (error); @@ -1475,19 +1503,28 @@ vcpu_notify_event(struct vm *vm, int vcp vcpu_lock(vcpu); hostcpu = vcpu->hostcpu; - if (hostcpu == NOCPU) { - if (vcpu->state == VCPU_SLEEPING) - wakeup_one(vcpu); - } else { - if (vcpu->state != VCPU_RUNNING) - panic("invalid vcpu state %d", vcpu->state); + if (vcpu->state == VCPU_RUNNING) { + KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); if (hostcpu != curcpu) { - if (lapic_intr) + if (lapic_intr) { vlapic_post_intr(vcpu->vlapic, hostcpu, vmm_ipinum); - else + } else { ipi_cpu(hostcpu, vmm_ipinum); + } + } else { + /* + * If the 'vcpu' is running on 'curcpu' then it must + * be sending a notification to itself (e.g. SELF_IPI). + * The pending event will be picked up when the vcpu + * transitions back to guest context. + */ } + } else { + KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " + "with hostcpu %d", vcpu->state, hostcpu)); + if (vcpu->state == VCPU_SLEEPING) + wakeup_one(vcpu); } vcpu_unlock(vcpu); } Modified: stable/10/sys/amd64/vmm/vmm_dev.c ============================================================================== --- stable/10/sys/amd64/vmm/vmm_dev.c Sun May 18 04:21:12 2014 (r266392) +++ stable/10/sys/amd64/vmm/vmm_dev.c Sun May 18 04:33:24 2014 (r266393) @@ -197,7 +197,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long c goto done; } - error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN); + error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true); if (error) goto done; @@ -214,14 +214,14 @@ vmmdev_ioctl(struct cdev *cdev, u_long c */ error = 0; for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) { - error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN); + error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true); if (error) break; } if (error) { while (--vcpu >= 0) - vcpu_set_state(sc->vm, vcpu, VCPU_IDLE); + vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false); goto done; } @@ -387,10 +387,10 @@ vmmdev_ioctl(struct cdev *cdev, u_long c } if (state_changed == 1) { - vcpu_set_state(sc->vm, vcpu, VCPU_IDLE); + vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false); } else if (state_changed == 2) { for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) - vcpu_set_state(sc->vm, vcpu, VCPU_IDLE); + vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false); } done: Modified: stable/10/usr.sbin/bhyve/bhyverun.c ============================================================================== --- stable/10/usr.sbin/bhyve/bhyverun.c Sun May 18 04:21:12 2014 (r266392) +++ stable/10/usr.sbin/bhyve/bhyverun.c Sun May 18 04:33:24 2014 (r266393) @@ -493,19 +493,8 @@ vm_loop(struct vmctx *ctx, int vcpu, uin while (1) { error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]); - if (error != 0) { - /* - * It is possible that 'vmmctl' or some other process - * has transitioned the vcpu to CANNOT_RUN state right - * before we tried to transition it to RUNNING. - * - * This is expected to be temporary so just retry. - */ - if (errno == EBUSY) - continue; - else - break; - } + if (error != 0) + break; prevcpu = vcpu;