From owner-svn-src-all@FreeBSD.ORG Sun Jun 22 08:30:44 2014 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [8.8.178.115]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 62AE1EB5; Sun, 22 Jun 2014 08:30:44 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 4ECCF20A8; Sun, 22 Jun 2014 08:30:44 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s5M8UikS009556; Sun, 22 Jun 2014 08:30:44 GMT (envelope-from kib@svn.freebsd.org) Received: (from kib@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s5M8Ui6j009555; Sun, 22 Jun 2014 08:30:44 GMT (envelope-from kib@svn.freebsd.org) Message-Id: <201406220830.s5M8Ui6j009555@svn.freebsd.org> From: Konstantin Belousov Date: Sun, 22 Jun 2014 08:30:44 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r267713 - stable/10/sys/kern X-SVN-Group: stable-10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 22 Jun 2014 08:30:44 -0000 Author: kib Date: Sun Jun 22 08:30:43 2014 New Revision: 267713 URL: http://svnweb.freebsd.org/changeset/base/267713 Log: MFC r267255: Change the nblock mutex to rwlock. MFC r267264: Devolatile as needed. Modified: stable/10/sys/kern/vfs_bio.c Directory Properties: stable/10/ (props changed) Modified: stable/10/sys/kern/vfs_bio.c ============================================================================== --- stable/10/sys/kern/vfs_bio.c Sun Jun 22 06:54:36 2014 (r267712) +++ stable/10/sys/kern/vfs_bio.c Sun Jun 22 08:30:43 2014 (r267713) @@ -251,7 +251,7 @@ static struct mtx_padalign rbreqlock; /* * Lock that protects needsbuffer and the sleeps/wakeups surrounding it. */ -static struct mtx_padalign nblock; +static struct rwlock_padalign nblock; /* * Lock that protects bdirtywait. @@ -296,7 +296,7 @@ static int runningbufreq; * Used in numdirtywakeup(), bufspacewakeup(), bufcountadd(), bwillwrite(), * getnewbuf(), and getblk(). */ -static int needsbuffer; +static volatile int needsbuffer; /* * Synchronization for bwillwrite() waiters. @@ -426,18 +426,27 @@ bdirtyadd(void) static __inline void bufspacewakeup(void) { + int need_wakeup, on; /* * If someone is waiting for BUF space, wake them up. Even * though we haven't freed the kva space yet, the waiting * process will be able to now. */ - mtx_lock(&nblock); - if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { - needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; - wakeup(&needsbuffer); + rw_rlock(&nblock); + for (;;) { + need_wakeup = 0; + on = needsbuffer; + if ((on & VFS_BIO_NEED_BUFSPACE) == 0) + break; + need_wakeup = 1; + if (atomic_cmpset_rel_int(&needsbuffer, on, + on & ~VFS_BIO_NEED_BUFSPACE)) + break; } - mtx_unlock(&nblock); + if (need_wakeup) + wakeup(__DEVOLATILE(void *, &needsbuffer)); + rw_runlock(&nblock); } /* @@ -497,7 +506,7 @@ runningbufwakeup(struct buf *bp) static __inline void bufcountadd(struct buf *bp) { - int old; + int mask, need_wakeup, old, on; KASSERT((bp->b_flags & B_INFREECNT) == 0, ("buf %p already counted as free", bp)); @@ -505,14 +514,22 @@ bufcountadd(struct buf *bp) old = atomic_fetchadd_int(&numfreebuffers, 1); KASSERT(old >= 0 && old < nbuf, ("numfreebuffers climbed to %d", old + 1)); - mtx_lock(&nblock); - if (needsbuffer) { - needsbuffer &= ~VFS_BIO_NEED_ANY; - if (numfreebuffers >= hifreebuffers) - needsbuffer &= ~VFS_BIO_NEED_FREE; - wakeup(&needsbuffer); + mask = VFS_BIO_NEED_ANY; + if (numfreebuffers >= hifreebuffers) + mask |= VFS_BIO_NEED_FREE; + rw_rlock(&nblock); + for (;;) { + need_wakeup = 0; + on = needsbuffer; + if (on == 0) + break; + need_wakeup = 1; + if (atomic_cmpset_rel_int(&needsbuffer, on, on & ~mask)) + break; } - mtx_unlock(&nblock); + if (need_wakeup) + wakeup(__DEVOLATILE(void *, &needsbuffer)); + rw_runlock(&nblock); } /* @@ -756,7 +773,7 @@ bufinit(void) mtx_init(&bqclean, "bufq clean lock", NULL, MTX_DEF); mtx_init(&bqdirty, "bufq dirty lock", NULL, MTX_DEF); mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF); - mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF); + rw_init(&nblock, "needsbuffer lock"); mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF); mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF); @@ -2054,9 +2071,7 @@ getnewbuf_bufd_help(struct vnode *vp, in waitmsg = "newbuf"; flags = VFS_BIO_NEED_ANY; } - mtx_lock(&nblock); - needsbuffer |= flags; - mtx_unlock(&nblock); + atomic_set_int(&needsbuffer, flags); mtx_unlock(&bqclean); bd_speedup(); /* heeeelp */ @@ -2066,12 +2081,11 @@ getnewbuf_bufd_help(struct vnode *vp, in td = curthread; cnt = 0; wait = MNT_NOWAIT; - mtx_lock(&nblock); - while (needsbuffer & flags) { + rw_wlock(&nblock); + while ((needsbuffer & flags) != 0) { if (vp != NULL && vp->v_type != VCHR && (td->td_pflags & TDP_BUFNEED) == 0) { - mtx_unlock(&nblock); - + rw_wunlock(&nblock); /* * getblk() is called with a vnode locked, and * some majority of the dirty buffers may as @@ -2093,15 +2107,16 @@ getnewbuf_bufd_help(struct vnode *vp, in atomic_add_long(¬bufdflushes, 1); curthread_pflags_restore(norunbuf); } - mtx_lock(&nblock); + rw_wlock(&nblock); if ((needsbuffer & flags) == 0) break; } - if (msleep(&needsbuffer, &nblock, (PRIBIO + 4) | slpflag, - waitmsg, slptimeo)) + error = rw_sleep(__DEVOLATILE(void *, &needsbuffer), &nblock, + (PRIBIO + 4) | slpflag, waitmsg, slptimeo); + if (error != 0) break; } - mtx_unlock(&nblock); + rw_wunlock(&nblock); } static void