Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 17 Nov 2017 20:41:18 +0000 (UTC)
From:      Mateusz Guzik <mjg@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r325958 - in head: share/man/man9 sys/conf sys/kern
Message-ID:  <201711172041.vAHKfImC006244@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mjg
Date: Fri Nov 17 20:41:17 2017
New Revision: 325958
URL: https://svnweb.freebsd.org/changeset/base/325958

Log:
  lockmgr: remove the ADAPTIVE_LOCKMGRS option
  
  The code was never enabled and is very heavy weight.
  
  A revamped adaptive spinning may show up at a later time.
  
  Discussed with:	kib

Modified:
  head/share/man/man9/lock.9
  head/sys/conf/options
  head/sys/kern/kern_lock.c

Modified: head/share/man/man9/lock.9
==============================================================================
--- head/share/man/man9/lock.9	Fri Nov 17 19:25:39 2017	(r325957)
+++ head/share/man/man9/lock.9	Fri Nov 17 20:41:17 2017	(r325958)
@@ -26,7 +26,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd November 2, 2014
+.Dd November 17, 2017
 .Dt LOCK 9
 .Os
 .Sh NAME
@@ -93,9 +93,6 @@ The timeout value passed to
 .It Fa flags
 The flags the lock is to be initialized with:
 .Bl -tag -width ".Dv LK_CANRECURSE"
-.It Dv LK_ADAPTIVE
-Enable adaptive spinning for this lock if the kernel is compiled with the
-ADAPTIVE_LOCKMGRS option.
 .It Dv LK_CANRECURSE
 Allow recursive exclusive locks.
 .It Dv LK_NOPROFILE

Modified: head/sys/conf/options
==============================================================================
--- head/sys/conf/options	Fri Nov 17 19:25:39 2017	(r325957)
+++ head/sys/conf/options	Fri Nov 17 20:41:17 2017	(r325958)
@@ -69,7 +69,6 @@ TEXTDUMP_VERBOSE	opt_ddb.h
 NUM_CORE_FILES	opt_global.h
 
 # Miscellaneous options.
-ADAPTIVE_LOCKMGRS
 ALQ
 ALTERA_SDCARD_FAST_SIM	opt_altera_sdcard.h
 ATSE_CFI_HACK	opt_cfi.h

Modified: head/sys/kern/kern_lock.c
==============================================================================
--- head/sys/kern/kern_lock.c	Fri Nov 17 19:25:39 2017	(r325957)
+++ head/sys/kern/kern_lock.c	Fri Nov 17 20:41:17 2017	(r325958)
@@ -26,7 +26,6 @@
  * DAMAGE.
  */
 
-#include "opt_adaptive_lockmgrs.h"
 #include "opt_ddb.h"
 #include "opt_hwpmc_hooks.h"
 
@@ -159,15 +158,6 @@ struct lock_class lock_class_lockmgr = {
 #endif
 };
 
-#ifdef ADAPTIVE_LOCKMGRS
-static u_int alk_retries = 10;
-static u_int alk_loops = 10000;
-static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
-    "lockmgr debugging");
-SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
-SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
-#endif
-
 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
     int flags);
 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t x);
@@ -661,10 +651,6 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lo
 	uint64_t waittime = 0;
 	int contested = 0;
 #endif
-#ifdef ADAPTIVE_LOCKMGRS
-	volatile struct thread *owner;
-	u_int i, spintries = 0;
-#endif
 
 	error = 0;
 	tid = (uintptr_t)curthread;
@@ -748,76 +734,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lo
 				break;
 			}
 
-#ifdef ADAPTIVE_LOCKMGRS
 			/*
-			 * If the owner is running on another CPU, spin until
-			 * the owner stops running or the state of the lock
-			 * changes.  We need a double-state handle here
-			 * because for a failed acquisition the lock can be
-			 * either held in exclusive mode or shared mode
-			 * (for the writer starvation avoidance technique).
-			 */
-			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
-			    LK_HOLDER(x) != LK_KERNPROC) {
-				owner = (struct thread *)LK_HOLDER(x);
-				if (LOCK_LOG_TEST(&lk->lock_object, 0))
-					CTR3(KTR_LOCK,
-					    "%s: spinning on %p held by %p",
-					    __func__, lk, owner);
-				KTR_STATE1(KTR_SCHED, "thread",
-				    sched_tdname(td), "spinning",
-				    "lockname:\"%s\"", lk->lock_object.lo_name);
-
-				/*
-				 * If we are holding also an interlock drop it
-				 * in order to avoid a deadlock if the lockmgr
-				 * owner is adaptively spinning on the
-				 * interlock itself.
-				 */
-				if (flags & LK_INTERLOCK) {
-					class->lc_unlock(ilk);
-					flags &= ~LK_INTERLOCK;
-				}
-				GIANT_SAVE();
-				while (LK_HOLDER(lk->lk_lock) ==
-				    (uintptr_t)owner && TD_IS_RUNNING(owner))
-					cpu_spinwait();
-				KTR_STATE0(KTR_SCHED, "thread",
-				    sched_tdname(td), "running");
-				GIANT_RESTORE();
-				continue;
-			} else if (LK_CAN_ADAPT(lk, flags) &&
-			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
-			    spintries < alk_retries) {
-				KTR_STATE1(KTR_SCHED, "thread",
-				    sched_tdname(td), "spinning",
-				    "lockname:\"%s\"", lk->lock_object.lo_name);
-				if (flags & LK_INTERLOCK) {
-					class->lc_unlock(ilk);
-					flags &= ~LK_INTERLOCK;
-				}
-				GIANT_SAVE();
-				spintries++;
-				for (i = 0; i < alk_loops; i++) {
-					if (LOCK_LOG_TEST(&lk->lock_object, 0))
-						CTR4(KTR_LOCK,
-				    "%s: shared spinning on %p with %u and %u",
-						    __func__, lk, spintries, i);
-					x = lk->lk_lock;
-					if ((x & LK_SHARE) == 0 ||
-					    LK_CAN_SHARE(x, flags) != 0)
-						break;
-					cpu_spinwait();
-				}
-				KTR_STATE0(KTR_SCHED, "thread",
-				    sched_tdname(td), "running");
-				GIANT_RESTORE();
-				if (i != alk_loops)
-					continue;
-			}
-#endif
-
-			/*
 			 * Acquire the sleepqueue chain lock because we
 			 * probabilly will need to manipulate waiters flags.
 			 */
@@ -833,25 +750,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lo
 				continue;
 			}
 
-#ifdef ADAPTIVE_LOCKMGRS
 			/*
-			 * The current lock owner might have started executing
-			 * on another CPU (or the lock could have changed
-			 * owner) while we were waiting on the turnstile
-			 * chain lock.  If so, drop the turnstile lock and try
-			 * again.
-			 */
-			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
-			    LK_HOLDER(x) != LK_KERNPROC) {
-				owner = (struct thread *)LK_HOLDER(x);
-				if (TD_IS_RUNNING(owner)) {
-					sleepq_release(&lk->lock_object);
-					continue;
-				}
-			}
-#endif
-
-			/*
 			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
 			 * loop back and retry.
 			 */
@@ -992,77 +891,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lo
 				break;
 			}
 
-#ifdef ADAPTIVE_LOCKMGRS
 			/*
-			 * If the owner is running on another CPU, spin until
-			 * the owner stops running or the state of the lock
-			 * changes.
-			 */
-			x = lk->lk_lock;
-			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
-			    LK_HOLDER(x) != LK_KERNPROC) {
-				owner = (struct thread *)LK_HOLDER(x);
-				if (LOCK_LOG_TEST(&lk->lock_object, 0))
-					CTR3(KTR_LOCK,
-					    "%s: spinning on %p held by %p",
-					    __func__, lk, owner);
-				KTR_STATE1(KTR_SCHED, "thread",
-				    sched_tdname(td), "spinning",
-				    "lockname:\"%s\"", lk->lock_object.lo_name);
-
-				/*
-				 * If we are holding also an interlock drop it
-				 * in order to avoid a deadlock if the lockmgr
-				 * owner is adaptively spinning on the
-				 * interlock itself.
-				 */
-				if (flags & LK_INTERLOCK) {
-					class->lc_unlock(ilk);
-					flags &= ~LK_INTERLOCK;
-				}
-				GIANT_SAVE();
-				while (LK_HOLDER(lk->lk_lock) ==
-				    (uintptr_t)owner && TD_IS_RUNNING(owner))
-					cpu_spinwait();
-				KTR_STATE0(KTR_SCHED, "thread",
-				    sched_tdname(td), "running");
-				GIANT_RESTORE();
-				continue;
-			} else if (LK_CAN_ADAPT(lk, flags) &&
-			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
-			    spintries < alk_retries) {
-				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
-				    !atomic_cmpset_ptr(&lk->lk_lock, x,
-				    x | LK_EXCLUSIVE_SPINNERS))
-					continue;
-				KTR_STATE1(KTR_SCHED, "thread",
-				    sched_tdname(td), "spinning",
-				    "lockname:\"%s\"", lk->lock_object.lo_name);
-				if (flags & LK_INTERLOCK) {
-					class->lc_unlock(ilk);
-					flags &= ~LK_INTERLOCK;
-				}
-				GIANT_SAVE();
-				spintries++;
-				for (i = 0; i < alk_loops; i++) {
-					if (LOCK_LOG_TEST(&lk->lock_object, 0))
-						CTR4(KTR_LOCK,
-				    "%s: shared spinning on %p with %u and %u",
-						    __func__, lk, spintries, i);
-					if ((lk->lk_lock &
-					    LK_EXCLUSIVE_SPINNERS) == 0)
-						break;
-					cpu_spinwait();
-				}
-				KTR_STATE0(KTR_SCHED, "thread",
-				    sched_tdname(td), "running");
-				GIANT_RESTORE();
-				if (i != alk_loops)
-					continue;
-			}
-#endif
-
-			/*
 			 * Acquire the sleepqueue chain lock because we
 			 * probabilly will need to manipulate waiters flags.
 			 */
@@ -1077,24 +906,6 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lo
 				sleepq_release(&lk->lock_object);
 				continue;
 			}
-
-#ifdef ADAPTIVE_LOCKMGRS
-			/*
-			 * The current lock owner might have started executing
-			 * on another CPU (or the lock could have changed
-			 * owner) while we were waiting on the turnstile
-			 * chain lock.  If so, drop the turnstile lock and try
-			 * again.
-			 */
-			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
-			    LK_HOLDER(x) != LK_KERNPROC) {
-				owner = (struct thread *)LK_HOLDER(x);
-				if (TD_IS_RUNNING(owner)) {
-					sleepq_release(&lk->lock_object);
-					continue;
-				}
-			}
-#endif
 
 			/*
 			 * The lock can be in the state where there is a



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201711172041.vAHKfImC006244>