From owner-svn-src-head@freebsd.org Sun Aug 18 11:43:59 2019 Return-Path: Delivered-To: svn-src-head@mailman.nyi.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2610:1c1:1:606c::19:1]) by mailman.nyi.freebsd.org (Postfix) with ESMTP id D6953C4B7F; Sun, 18 Aug 2019 11:43:59 +0000 (UTC) (envelope-from jeff@FreeBSD.org) Received: from mxrelay.nyi.freebsd.org (mxrelay.nyi.freebsd.org [IPv6:2610:1c1:1:606c::19:3]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) server-signature RSA-PSS (4096 bits) client-signature RSA-PSS (4096 bits) client-digest SHA256) (Client CN "mxrelay.nyi.freebsd.org", Issuer "Let's Encrypt Authority X3" (verified OK)) by mx1.freebsd.org (Postfix) with ESMTPS id 46BFZR54ghz4Rs5; Sun, 18 Aug 2019 11:43:59 +0000 (UTC) (envelope-from jeff@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mxrelay.nyi.freebsd.org (Postfix) with ESMTPS id 8FC7D178D; Sun, 18 Aug 2019 11:43:59 +0000 (UTC) (envelope-from jeff@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id x7IBhxqI011256; Sun, 18 Aug 2019 11:43:59 GMT (envelope-from jeff@FreeBSD.org) Received: (from jeff@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id x7IBhxZO011254; Sun, 18 Aug 2019 11:43:59 GMT (envelope-from jeff@FreeBSD.org) Message-Id: <201908181143.x7IBhxZO011254@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: jeff set sender to jeff@FreeBSD.org using -f From: Jeff Roberson Date: Sun, 18 Aug 2019 11:43:59 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r351188 - in head/sys: kern sys X-SVN-Group: head X-SVN-Commit-Author: jeff X-SVN-Commit-Paths: in head/sys: kern sys X-SVN-Commit-Revision: 351188 X-SVN-Commit-Repository: base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 18 Aug 2019 11:43:59 -0000 Author: jeff Date: Sun Aug 18 11:43:58 2019 New Revision: 351188 URL: https://svnweb.freebsd.org/changeset/base/351188 Log: Add a blocking wait bit to refcount. This allows refs to be used as a simple barrier. Reviewed by: markj, kib Discussed with: jhb Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D21254 Modified: head/sys/kern/kern_synch.c head/sys/sys/refcount.h Modified: head/sys/kern/kern_synch.c ============================================================================== --- head/sys/kern/kern_synch.c Sun Aug 18 09:19:33 2019 (r351187) +++ head/sys/kern/kern_synch.c Sun Aug 18 11:43:58 2019 (r351188) @@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -331,6 +332,75 @@ pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_ } return (_sleep(&pause_wchan[curcpu], NULL, (flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags)); +} + +/* + * Potentially release the last reference for refcount. Check for + * unlikely conditions and signal the caller as to whether it was + * the final ref. + */ +bool +refcount_release_last(volatile u_int *count, u_int n, u_int old) +{ + u_int waiter; + + waiter = old & REFCOUNT_WAITER; + old = REFCOUNT_COUNT(old); + if (__predict_false(n > old || REFCOUNT_SATURATED(old))) { + /* + * Avoid multiple destructor invocations if underflow occurred. + * This is not perfect since the memory backing the containing + * object may already have been reallocated. + */ + _refcount_update_saturated(count); + return (false); + } + + /* + * Attempt to atomically clear the waiter bit. Wakeup waiters + * if we are successful. + */ + if (waiter != 0 && atomic_cmpset_int(count, REFCOUNT_WAITER, 0)) + wakeup(__DEVOLATILE(u_int *, count)); + + /* + * Last reference. Signal the user to call the destructor. + * + * Ensure that the destructor sees all updates. The fence_rel + * at the start of refcount_releasen synchronizes with this fence. + */ + atomic_thread_fence_acq(); + return (true); +} + +/* + * Wait for a refcount wakeup. This does not guarantee that the ref is still + * zero on return and may be subject to transient wakeups. Callers wanting + * a precise answer should use refcount_wait(). + */ +void +refcount_sleep(volatile u_int *count, const char *wmesg, int pri) +{ + void *wchan; + u_int old; + + if (REFCOUNT_COUNT(*count) == 0) + return; + wchan = __DEVOLATILE(void *, count); + sleepq_lock(wchan); + old = *count; + for (;;) { + if (REFCOUNT_COUNT(old) == 0) { + sleepq_release(wchan); + return; + } + if (old & REFCOUNT_WAITER) + break; + if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER)) + break; + } + sleepq_add(wchan, NULL, wmesg, 0, 0); + sleepq_wait(wchan, pri); } /* Modified: head/sys/sys/refcount.h ============================================================================== --- head/sys/sys/refcount.h Sun Aug 18 09:19:33 2019 (r351187) +++ head/sys/sys/refcount.h Sun Aug 18 11:43:58 2019 (r351188) @@ -39,9 +39,15 @@ #define KASSERT(exp, msg) /* */ #endif -#define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0) -#define REFCOUNT_SATURATION_VALUE (3U << 30) +#define REFCOUNT_WAITER (1 << 31) /* Refcount has waiter. */ +#define REFCOUNT_SATURATION_VALUE (3U << 29) +#define REFCOUNT_SATURATED(val) (((val) & (1U << 30)) != 0) +#define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER) + +bool refcount_release_last(volatile u_int *count, u_int n, u_int old); +void refcount_sleep(volatile u_int *count, const char *wmesg, int prio); + /* * Attempt to handle reference count overflow and underflow. Force the counter * to stay at the saturation value so that a counter overflow cannot trigger @@ -76,6 +82,19 @@ refcount_acquire(volatile u_int *count) _refcount_update_saturated(count); } +static __inline void +refcount_acquiren(volatile u_int *count, u_int n) +{ + + u_int old; + + KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, + ("refcount_acquiren: n %d too large", n)); + old = atomic_fetchadd_int(count, n); + if (__predict_false(REFCOUNT_SATURATED(old))) + _refcount_update_saturated(count); +} + static __inline __result_use_check bool refcount_acquire_checked(volatile u_int *count) { @@ -91,34 +110,35 @@ refcount_acquire_checked(volatile u_int *count) } static __inline bool -refcount_release(volatile u_int *count) +refcount_releasen(volatile u_int *count, u_int n) { u_int old; + KASSERT(n < REFCOUNT_SATURATION_VALUE / 2, + ("refcount_releasen: n %d too large", n)); atomic_thread_fence_rel(); - old = atomic_fetchadd_int(count, -1); - if (__predict_false(old == 0 || REFCOUNT_SATURATED(old))) { - /* - * Avoid multiple destructor invocations if underflow occurred. - * This is not perfect since the memory backing the containing - * object may already have been reallocated. - */ - _refcount_update_saturated(count); - return (false); - } - if (old > 1) - return (false); + old = atomic_fetchadd_int(count, -n); + if (__predict_false(n >= REFCOUNT_COUNT(old) || + REFCOUNT_SATURATED(old))) + return (refcount_release_last(count, n, old)); + return (false); +} - /* - * Last reference. Signal the user to call the destructor. - * - * Ensure that the destructor sees all updates. The fence_rel - * at the start of the function synchronizes with this fence. - */ - atomic_thread_fence_acq(); - return (true); +static __inline bool +refcount_release(volatile u_int *count) +{ + + return (refcount_releasen(count, 1)); } +static __inline void +refcount_wait(volatile u_int *count, const char *wmesg, int prio) +{ + + while (*count != 0) + refcount_sleep(count, wmesg, prio); +} + /* * This functions returns non-zero if the refcount was * incremented. Else zero is returned. @@ -130,7 +150,7 @@ refcount_acquire_if_not_zero(volatile u_int *count) old = *count; for (;;) { - if (old == 0) + if (REFCOUNT_COUNT(old) == 0) return (false); if (__predict_false(REFCOUNT_SATURATED(old))) return (true); @@ -146,7 +166,7 @@ refcount_release_if_not_last(volatile u_int *count) old = *count; for (;;) { - if (old == 1) + if (REFCOUNT_COUNT(old) == 1) return (false); if (__predict_false(REFCOUNT_SATURATED(old))) return (true);