Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 22 Jun 2014 08:30:44 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org
Subject:   svn commit: r267713 - stable/10/sys/kern
Message-ID:  <201406220830.s5M8Ui6j009555@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Sun Jun 22 08:30:43 2014
New Revision: 267713
URL: http://svnweb.freebsd.org/changeset/base/267713

Log:
  MFC r267255:
  Change the nblock mutex to rwlock.
  
  MFC r267264:
  Devolatile as needed.

Modified:
  stable/10/sys/kern/vfs_bio.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/kern/vfs_bio.c
==============================================================================
--- stable/10/sys/kern/vfs_bio.c	Sun Jun 22 06:54:36 2014	(r267712)
+++ stable/10/sys/kern/vfs_bio.c	Sun Jun 22 08:30:43 2014	(r267713)
@@ -251,7 +251,7 @@ static struct mtx_padalign rbreqlock;
 /*
  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
  */
-static struct mtx_padalign nblock;
+static struct rwlock_padalign nblock;
 
 /*
  * Lock that protects bdirtywait.
@@ -296,7 +296,7 @@ static int runningbufreq;
  * Used in numdirtywakeup(), bufspacewakeup(), bufcountadd(), bwillwrite(),
  * getnewbuf(), and getblk().
  */
-static int needsbuffer;
+static volatile int needsbuffer;
 
 /*
  * Synchronization for bwillwrite() waiters.
@@ -426,18 +426,27 @@ bdirtyadd(void)
 static __inline void
 bufspacewakeup(void)
 {
+	int need_wakeup, on;
 
 	/*
 	 * If someone is waiting for BUF space, wake them up.  Even
 	 * though we haven't freed the kva space yet, the waiting
 	 * process will be able to now.
 	 */
-	mtx_lock(&nblock);
-	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
-		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
-		wakeup(&needsbuffer);
+	rw_rlock(&nblock);
+	for (;;) {
+		need_wakeup = 0;
+		on = needsbuffer;
+		if ((on & VFS_BIO_NEED_BUFSPACE) == 0)
+			break;
+		need_wakeup = 1;
+		if (atomic_cmpset_rel_int(&needsbuffer, on,
+		    on & ~VFS_BIO_NEED_BUFSPACE))
+			break;
 	}
-	mtx_unlock(&nblock);
+	if (need_wakeup)
+		wakeup(__DEVOLATILE(void *, &needsbuffer));
+	rw_runlock(&nblock);
 }
 
 /*
@@ -497,7 +506,7 @@ runningbufwakeup(struct buf *bp)
 static __inline void
 bufcountadd(struct buf *bp)
 {
-	int old;
+	int mask, need_wakeup, old, on;
 
 	KASSERT((bp->b_flags & B_INFREECNT) == 0,
 	    ("buf %p already counted as free", bp));
@@ -505,14 +514,22 @@ bufcountadd(struct buf *bp)
 	old = atomic_fetchadd_int(&numfreebuffers, 1);
 	KASSERT(old >= 0 && old < nbuf,
 	    ("numfreebuffers climbed to %d", old + 1));
-	mtx_lock(&nblock);
-	if (needsbuffer) {
-		needsbuffer &= ~VFS_BIO_NEED_ANY;
-		if (numfreebuffers >= hifreebuffers)
-			needsbuffer &= ~VFS_BIO_NEED_FREE;
-		wakeup(&needsbuffer);
+	mask = VFS_BIO_NEED_ANY;
+	if (numfreebuffers >= hifreebuffers)
+		mask |= VFS_BIO_NEED_FREE;
+	rw_rlock(&nblock);
+	for (;;) {
+		need_wakeup = 0;
+		on = needsbuffer;
+		if (on == 0)
+			break;
+		need_wakeup = 1;
+		if (atomic_cmpset_rel_int(&needsbuffer, on, on & ~mask))
+			break;
 	}
-	mtx_unlock(&nblock);
+	if (need_wakeup)
+		wakeup(__DEVOLATILE(void *, &needsbuffer));
+	rw_runlock(&nblock);
 }
 
 /*
@@ -756,7 +773,7 @@ bufinit(void)
 	mtx_init(&bqclean, "bufq clean lock", NULL, MTX_DEF);
 	mtx_init(&bqdirty, "bufq dirty lock", NULL, MTX_DEF);
 	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
-	mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF);
+	rw_init(&nblock, "needsbuffer lock");
 	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
 	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
 
@@ -2054,9 +2071,7 @@ getnewbuf_bufd_help(struct vnode *vp, in
 		waitmsg = "newbuf";
 		flags = VFS_BIO_NEED_ANY;
 	}
-	mtx_lock(&nblock);
-	needsbuffer |= flags;
-	mtx_unlock(&nblock);
+	atomic_set_int(&needsbuffer, flags);
 	mtx_unlock(&bqclean);
 
 	bd_speedup();	/* heeeelp */
@@ -2066,12 +2081,11 @@ getnewbuf_bufd_help(struct vnode *vp, in
 	td = curthread;
 	cnt = 0;
 	wait = MNT_NOWAIT;
-	mtx_lock(&nblock);
-	while (needsbuffer & flags) {
+	rw_wlock(&nblock);
+	while ((needsbuffer & flags) != 0) {
 		if (vp != NULL && vp->v_type != VCHR &&
 		    (td->td_pflags & TDP_BUFNEED) == 0) {
-			mtx_unlock(&nblock);
-
+			rw_wunlock(&nblock);
 			/*
 			 * getblk() is called with a vnode locked, and
 			 * some majority of the dirty buffers may as
@@ -2093,15 +2107,16 @@ getnewbuf_bufd_help(struct vnode *vp, in
 				atomic_add_long(&notbufdflushes, 1);
 				curthread_pflags_restore(norunbuf);
 			}
-			mtx_lock(&nblock);
+			rw_wlock(&nblock);
 			if ((needsbuffer & flags) == 0)
 				break;
 		}
-		if (msleep(&needsbuffer, &nblock, (PRIBIO + 4) | slpflag,
-		    waitmsg, slptimeo))
+		error = rw_sleep(__DEVOLATILE(void *, &needsbuffer), &nblock,
+		    (PRIBIO + 4) | slpflag, waitmsg, slptimeo);
+		if (error != 0)
 			break;
 	}
-	mtx_unlock(&nblock);
+	rw_wunlock(&nblock);
 }
 
 static void



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201406220830.s5M8Ui6j009555>