Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 18 Aug 2019 11:43:59 +0000 (UTC)
From:      Jeff Roberson <jeff@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r351188 - in head/sys: kern sys
Message-ID:  <201908181143.x7IBhxZO011254@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jeff
Date: Sun Aug 18 11:43:58 2019
New Revision: 351188
URL: https://svnweb.freebsd.org/changeset/base/351188

Log:
  Add a blocking wait bit to refcount.  This allows refs to be used as a simple
  barrier.
  
  Reviewed by:	markj, kib
  Discussed with:	jhb
  Sponsored by:	Netflix
  Differential Revision:	https://reviews.freebsd.org/D21254

Modified:
  head/sys/kern/kern_synch.c
  head/sys/sys/refcount.h

Modified: head/sys/kern/kern_synch.c
==============================================================================
--- head/sys/kern/kern_synch.c	Sun Aug 18 09:19:33 2019	(r351187)
+++ head/sys/kern/kern_synch.c	Sun Aug 18 11:43:58 2019	(r351188)
@@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/resourcevar.h>
+#include <sys/refcount.h>
 #include <sys/sched.h>
 #include <sys/sdt.h>
 #include <sys/signalvar.h>
@@ -331,6 +332,75 @@ pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_
 	}
 	return (_sleep(&pause_wchan[curcpu], NULL,
 	    (flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags));
+}
+
+/*
+ * Potentially release the last reference for refcount.  Check for
+ * unlikely conditions and signal the caller as to whether it was
+ * the final ref.
+ */
+bool
+refcount_release_last(volatile u_int *count, u_int n, u_int old)
+{
+	u_int waiter;
+
+	waiter = old & REFCOUNT_WAITER;
+	old = REFCOUNT_COUNT(old);
+	if (__predict_false(n > old || REFCOUNT_SATURATED(old))) {
+		/*
+		 * Avoid multiple destructor invocations if underflow occurred.
+		 * This is not perfect since the memory backing the containing
+		 * object may already have been reallocated.
+		 */
+		_refcount_update_saturated(count);
+		return (false);
+	}
+
+	/*
+	 * Attempt to atomically clear the waiter bit.  Wakeup waiters
+	 * if we are successful.
+	 */
+	if (waiter != 0 && atomic_cmpset_int(count, REFCOUNT_WAITER, 0))
+		wakeup(__DEVOLATILE(u_int *, count));
+
+	/*
+	 * Last reference.  Signal the user to call the destructor.
+	 *
+	 * Ensure that the destructor sees all updates.  The fence_rel
+	 * at the start of refcount_releasen synchronizes with this fence.
+	 */
+	atomic_thread_fence_acq();
+	return (true);
+}
+
+/*
+ * Wait for a refcount wakeup.  This does not guarantee that the ref is still
+ * zero on return and may be subject to transient wakeups.  Callers wanting
+ * a precise answer should use refcount_wait().
+ */
+void
+refcount_sleep(volatile u_int *count, const char *wmesg, int pri)
+{
+	void *wchan;
+	u_int old;
+
+	if (REFCOUNT_COUNT(*count) == 0)
+		return;
+	wchan = __DEVOLATILE(void *, count);
+	sleepq_lock(wchan);
+	old = *count;
+	for (;;) {
+		if (REFCOUNT_COUNT(old) == 0) {
+			sleepq_release(wchan);
+			return;
+		}
+		if (old & REFCOUNT_WAITER)
+			break;
+		if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER))
+			break;
+	}
+	sleepq_add(wchan, NULL, wmesg, 0, 0);
+	sleepq_wait(wchan, pri);
 }
 
 /*

Modified: head/sys/sys/refcount.h
==============================================================================
--- head/sys/sys/refcount.h	Sun Aug 18 09:19:33 2019	(r351187)
+++ head/sys/sys/refcount.h	Sun Aug 18 11:43:58 2019	(r351188)
@@ -39,9 +39,15 @@
 #define	KASSERT(exp, msg)	/* */
 #endif
 
-#define	REFCOUNT_SATURATED(val)		(((val) & (1U << 31)) != 0)
-#define	REFCOUNT_SATURATION_VALUE	(3U << 30)
+#define	REFCOUNT_WAITER			(1 << 31) /* Refcount has waiter. */
+#define	REFCOUNT_SATURATION_VALUE	(3U << 29)
 
+#define	REFCOUNT_SATURATED(val)		(((val) & (1U << 30)) != 0)
+#define	REFCOUNT_COUNT(x)		((x) & ~REFCOUNT_WAITER)
+
+bool refcount_release_last(volatile u_int *count, u_int n, u_int old);
+void refcount_sleep(volatile u_int *count, const char *wmesg, int prio);
+
 /*
  * Attempt to handle reference count overflow and underflow.  Force the counter
  * to stay at the saturation value so that a counter overflow cannot trigger
@@ -76,6 +82,19 @@ refcount_acquire(volatile u_int *count)
 		_refcount_update_saturated(count);
 }
 
+static __inline void
+refcount_acquiren(volatile u_int *count, u_int n)
+{
+
+	u_int old;
+
+	KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
+	    ("refcount_acquiren: n %d too large", n));
+	old = atomic_fetchadd_int(count, n);
+	if (__predict_false(REFCOUNT_SATURATED(old)))
+		_refcount_update_saturated(count);
+}
+
 static __inline __result_use_check bool
 refcount_acquire_checked(volatile u_int *count)
 {
@@ -91,34 +110,35 @@ refcount_acquire_checked(volatile u_int *count)
 }
 
 static __inline bool
-refcount_release(volatile u_int *count)
+refcount_releasen(volatile u_int *count, u_int n)
 {
 	u_int old;
 
+	KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
+	    ("refcount_releasen: n %d too large", n));
 	atomic_thread_fence_rel();
-	old = atomic_fetchadd_int(count, -1);
-	if (__predict_false(old == 0 || REFCOUNT_SATURATED(old))) {
-		/*
-		 * Avoid multiple destructor invocations if underflow occurred.
-		 * This is not perfect since the memory backing the containing
-		 * object may already have been reallocated.
-		 */
-		_refcount_update_saturated(count);
-		return (false);
-	}
-	if (old > 1)
-		return (false);
+	old = atomic_fetchadd_int(count, -n);
+	if (__predict_false(n >= REFCOUNT_COUNT(old) ||
+	    REFCOUNT_SATURATED(old)))
+		return (refcount_release_last(count, n, old));
+	return (false);
+}
 
-	/*
-	 * Last reference.  Signal the user to call the destructor.
-	 *
-	 * Ensure that the destructor sees all updates.  The fence_rel
-	 * at the start of the function synchronizes with this fence.
-	 */
-	atomic_thread_fence_acq();
-	return (true);
+static __inline bool
+refcount_release(volatile u_int *count)
+{
+
+	return (refcount_releasen(count, 1));
 }
 
+static __inline void
+refcount_wait(volatile u_int *count, const char *wmesg, int prio)
+{
+
+	while (*count != 0)
+		refcount_sleep(count, wmesg, prio);
+}
+
 /*
  * This functions returns non-zero if the refcount was
  * incremented. Else zero is returned.
@@ -130,7 +150,7 @@ refcount_acquire_if_not_zero(volatile u_int *count)
 
 	old = *count;
 	for (;;) {
-		if (old == 0)
+		if (REFCOUNT_COUNT(old) == 0)
 			return (false);
 		if (__predict_false(REFCOUNT_SATURATED(old)))
 			return (true);
@@ -146,7 +166,7 @@ refcount_release_if_not_last(volatile u_int *count)
 
 	old = *count;
 	for (;;) {
-		if (old == 1)
+		if (REFCOUNT_COUNT(old) == 1)
 			return (false);
 		if (__predict_false(REFCOUNT_SATURATED(old)))
 			return (true);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201908181143.x7IBhxZO011254>