Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 29 May 2009 01:49:27 +0000 (UTC)
From:      Attilio Rao <attilio@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r193011 - in head: . share/man/man9 sys/cddl/compat/opensolaris/sys sys/conf sys/kern sys/sys
Message-ID:  <200905290149.n4T1nRJc056207@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: attilio
Date: Fri May 29 01:49:27 2009
New Revision: 193011
URL: http://svn.freebsd.org/changeset/base/193011

Log:
  Reverse the logic for ADAPTIVE_SX option and enable it by default.
  Introduce for this operation the reverse NO_ADAPTIVE_SX option.
  The flag SX_ADAPTIVESPIN to be passed to sx_init_flags(9) gets suppressed
  and the new flag, offering the reversed logic, SX_NOADAPTIVE is added.
  
  Additively implements adaptive spininning for sx held in shared mode.
  The spinning limit can be handled through sysctls in order to be tuned
  while the code doesn't reach the release, after which time they should
  be dropped probabilly.
  
  This change has made been necessary by recent benchmarks where it does
  improve concurrency of workloads in presence of high contention
  (ie. ZFS).
  
  KPI breakage is documented by __FreeBSD_version bumping, manpage and
  UPDATING updates.
  
  Requested by:	jeff, kmacy
  Reviewed by:	jeff
  Tested by:	pho

Modified:
  head/UPDATING
  head/share/man/man9/sx.9
  head/sys/cddl/compat/opensolaris/sys/mutex.h
  head/sys/cddl/compat/opensolaris/sys/rwlock.h
  head/sys/conf/NOTES
  head/sys/conf/options
  head/sys/kern/kern_sx.c
  head/sys/sys/param.h
  head/sys/sys/sx.h

Modified: head/UPDATING
==============================================================================
--- head/UPDATING	Fri May 29 01:31:18 2009	(r193010)
+++ head/UPDATING	Fri May 29 01:49:27 2009	(r193011)
@@ -22,6 +22,14 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 8.
 	to maximize performance.  (To disable malloc debugging, run
 	ln -s aj /etc/malloc.conf.)
 
+20090528:
+	The compiling option ADAPTIVE_SX has been retired while it has been
+	introduced the option NO_ADAPTIVE_SX which handles the reversed logic.
+	The KPI for sx_init_flags() changes as accepting flags:
+	SX_ADAPTIVESPIN flag has been retired while the SX_NOADAPTIVE flag
+	has been introduced in order to handle the reversed logic.
+	Bump __FreeBSD_version to 800092.
+
 20090527:
 	Add support for hierarchical jails.  Remove global securelevel.
 	Bump __FreeBSD_version to 800091.

Modified: head/share/man/man9/sx.9
==============================================================================
--- head/share/man/man9/sx.9	Fri May 29 01:31:18 2009	(r193010)
+++ head/share/man/man9/sx.9	Fri May 29 01:49:27 2009	(r193011)
@@ -26,7 +26,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd November 25, 2007
+.Dd May 28, 2009
 .Dt SX 9
 .Os
 .Sh NAME
@@ -122,10 +122,10 @@ argument to
 specifies a set of optional flags to alter the behavior of
 .Fa sx .
 It contains one or more of the following flags:
-.Bl -tag -width SX_ADAPTIVESPIN
-.It Dv SX_ADAPTIVESPIN
-If the kernel is compiled with
-.Cd "options ADAPTIVE_SX" ,
+.Bl -tag -width SX_NOADAPTIVE
+.It Dv SX_NOADAPTIVE
+If the kernel is not compiled with
+.Cd "options NO_ADAPTIVE_SX" ,
 then lock operations for
 .Fa sx
 will spin instead of sleeping while an exclusive lock holder is executing on

Modified: head/sys/cddl/compat/opensolaris/sys/mutex.h
==============================================================================
--- head/sys/cddl/compat/opensolaris/sys/mutex.h	Fri May 29 01:31:18 2009	(r193010)
+++ head/sys/cddl/compat/opensolaris/sys/mutex.h	Fri May 29 01:49:27 2009	(r193011)
@@ -47,9 +47,9 @@ typedef enum {
 typedef struct sx	kmutex_t;
 
 #ifndef DEBUG
-#define	MUTEX_FLAGS	(SX_DUPOK | SX_NOWITNESS | SX_ADAPTIVESPIN)
+#define	MUTEX_FLAGS	(SX_DUPOK | SX_NOWITNESS)
 #else
-#define	MUTEX_FLAGS	(SX_DUPOK | SX_ADAPTIVESPIN)
+#define	MUTEX_FLAGS	(SX_DUPOK)
 #endif
 
 #define	mutex_init(lock, desc, type, arg)	do {			\

Modified: head/sys/cddl/compat/opensolaris/sys/rwlock.h
==============================================================================
--- head/sys/cddl/compat/opensolaris/sys/rwlock.h	Fri May 29 01:31:18 2009	(r193010)
+++ head/sys/cddl/compat/opensolaris/sys/rwlock.h	Fri May 29 01:49:27 2009	(r193011)
@@ -49,9 +49,9 @@ typedef enum {
 typedef	struct sx	krwlock_t;
 
 #ifndef DEBUG
-#define	RW_FLAGS	(SX_DUPOK | SX_NOWITNESS | SX_ADAPTIVESPIN)
+#define	RW_FLAGS	(SX_DUPOK | SX_NOWITNESS)
 #else
-#define	RW_FLAGS	(SX_DUPOK | SX_ADAPTIVESPIN)
+#define	RW_FLAGS	(SX_DUPOK)
 #endif
 
 #define	RW_READ_HELD(x)		(rw_read_held((x)))

Modified: head/sys/conf/NOTES
==============================================================================
--- head/sys/conf/NOTES	Fri May 29 01:31:18 2009	(r193010)
+++ head/sys/conf/NOTES	Fri May 29 01:49:27 2009	(r193011)
@@ -215,11 +215,11 @@ options 	NO_ADAPTIVE_MUTEXES
 # to disable it.
 options 	NO_ADAPTIVE_RWLOCKS
 
-# ADAPTIVE_SX changes the behavior of sx locks to spin if the thread
-# that currently owns the lock is executing on another CPU.  Note that
-# in addition to enabling this option, individual sx locks must be
-# initialized with the SX_ADAPTIVESPIN flag.
-options 	ADAPTIVE_SX
+# ADAPTIVE_SX changes the behavior of sx locks to spin if the thread that
+# currently owns the sx lock is executing on another CPU.
+# This behaviour is enabled by default, so this option can be used to
+# disable it.
+options 	NO_ADAPTIVE_SX
 
 # MUTEX_NOINLINE forces mutex operations to call functions to perform each
 # operation rather than inlining the simple cases.  This can be used to

Modified: head/sys/conf/options
==============================================================================
--- head/sys/conf/options	Fri May 29 01:31:18 2009	(r193010)
+++ head/sys/conf/options	Fri May 29 01:49:27 2009	(r193011)
@@ -60,7 +60,6 @@ KDB_UNATTENDED	opt_kdb.h
 SYSCTL_DEBUG	opt_sysctl.h
 
 # Miscellaneous options.
-ADAPTIVE_SX
 ALQ
 AUDIT		opt_global.h
 CODA_COMPAT_5	opt_coda.h
@@ -134,6 +133,7 @@ MPROF_BUFFERS	opt_mprof.h
 MPROF_HASH_SIZE	opt_mprof.h
 NO_ADAPTIVE_MUTEXES	opt_adaptive_mutexes.h
 NO_ADAPTIVE_RWLOCKS
+NO_ADAPTIVE_SX
 NO_SYSCTL_DESCR	opt_global.h
 NSWBUF_MIN	opt_swap.h
 MBUF_PACKET_ZONE_DISABLE	opt_global.h

Modified: head/sys/kern/kern_sx.c
==============================================================================
--- head/sys/kern/kern_sx.c	Fri May 29 01:31:18 2009	(r193010)
+++ head/sys/kern/kern_sx.c	Fri May 29 01:49:27 2009	(r193011)
@@ -60,12 +60,12 @@ __FBSDID("$FreeBSD$");
 #include <ddb/ddb.h>
 #endif
 
-#if !defined(SMP) && defined(ADAPTIVE_SX)
-#error "You must have SMP to enable the ADAPTIVE_SX option"
+#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
+#define	ADAPTIVE_SX
 #endif
 
-CTASSERT(((SX_ADAPTIVESPIN | SX_RECURSE) & LO_CLASSFLAGS) ==
-    (SX_ADAPTIVESPIN | SX_RECURSE));
+CTASSERT(((SX_NOADAPTIVE | SX_RECURSE) & LO_CLASSFLAGS) ==
+    (SX_NOADAPTIVE | SX_RECURSE));
 
 /* Handy macros for sleep queues. */
 #define	SQ_EXCLUSIVE_QUEUE	0
@@ -133,6 +133,14 @@ struct lock_class lock_class_sx = {
 #define	_sx_assert(sx, what, file, line)
 #endif
 
+#ifdef ADAPTIVE_SX
+static u_int asx_retries = 10;
+static u_int asx_loops = 10000;
+SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
+SYSCTL_INT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
+SYSCTL_INT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
+#endif
+
 void
 assert_sx(struct lock_object *lock, int what)
 {
@@ -195,7 +203,7 @@ sx_init_flags(struct sx *sx, const char 
 	int flags;
 
 	MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
-	    SX_NOPROFILE | SX_ADAPTIVESPIN)) == 0);
+	    SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
 
 	flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
 	if (opts & SX_DUPOK)
@@ -207,7 +215,7 @@ sx_init_flags(struct sx *sx, const char 
 	if (opts & SX_QUIET)
 		flags |= LO_QUIET;
 
-	flags |= opts & (SX_ADAPTIVESPIN | SX_RECURSE);
+	flags |= opts & (SX_NOADAPTIVE | SX_RECURSE);
 	sx->sx_lock = SX_LOCK_UNLOCKED;
 	sx->sx_recurse = 0;
 	lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
@@ -453,6 +461,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 	GIANT_DECLARE;
 #ifdef ADAPTIVE_SX
 	volatile struct thread *owner;
+	u_int i, spintries = 0;
 #endif
 	uintptr_t x;
 #ifdef LOCK_PROFILING
@@ -495,24 +504,44 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 		 * running or the state of the lock changes.
 		 */
 		x = sx->sx_lock;
-		if (!(x & SX_LOCK_SHARED) &&
-		    (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
-			x = SX_OWNER(x);
-			owner = (struct thread *)x;
-			if (TD_IS_RUNNING(owner)) {
-				if (LOCK_LOG_TEST(&sx->lock_object, 0))
-					CTR3(KTR_LOCK,
+		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0) {
+			if ((x & SX_LOCK_SHARED) == 0) {
+				x = SX_OWNER(x);
+				owner = (struct thread *)x;
+				if (TD_IS_RUNNING(owner)) {
+					if (LOCK_LOG_TEST(&sx->lock_object, 0))
+						CTR3(KTR_LOCK,
 					    "%s: spinning on %p held by %p",
-					    __func__, sx, owner);
-				GIANT_SAVE();
-				while (SX_OWNER(sx->sx_lock) == x &&
-				    TD_IS_RUNNING(owner)) {
+						    __func__, sx, owner);
+					GIANT_SAVE();
+					while (SX_OWNER(sx->sx_lock) == x &&
+					    TD_IS_RUNNING(owner)) {
+						cpu_spinwait();
+#ifdef KDTRACE_HOOKS
+						spin_cnt++;
+#endif
+					}
+					continue;
+				}
+			} else if (SX_SHARERS(x) && spintries < asx_retries) {
+				spintries++;
+				for (i = 0; i < asx_loops; i++) {
+					if (LOCK_LOG_TEST(&sx->lock_object, 0))
+						CTR4(KTR_LOCK,
+				    "%s: shared spinning on %p with %u and %u",
+						    __func__, sx, spintries, i);
+					GIANT_SAVE();
+					x = sx->sx_lock;
+					if ((x & SX_LOCK_SHARED) == 0 ||
+					    SX_SHARERS(x) == 0)
+						break;
 					cpu_spinwait();
 #ifdef KDTRACE_HOOKS
 					spin_cnt++;
 #endif
 				}
-				continue;
+				if (i != asx_loops)
+					continue;
 			}
 		}
 #endif
@@ -538,7 +567,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 		 * again.
 		 */
 		if (!(x & SX_LOCK_SHARED) &&
-		    (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
+		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
 			owner = (struct thread *)SX_OWNER(x);
 			if (TD_IS_RUNNING(owner)) {
 				sleepq_release(&sx->lock_object);
@@ -752,7 +781,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
 		 * the owner stops running or the state of the lock
 		 * changes.
 		 */
-		if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
+		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
 			x = SX_OWNER(x);
 			owner = (struct thread *)x;
 			if (TD_IS_RUNNING(owner)) {
@@ -796,7 +825,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
 		 * changes.
 		 */
 		if (!(x & SX_LOCK_SHARED) &&
-		    (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
+		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
 			owner = (struct thread *)SX_OWNER(x);
 			if (TD_IS_RUNNING(owner)) {
 				sleepq_release(&sx->lock_object);

Modified: head/sys/sys/param.h
==============================================================================
--- head/sys/sys/param.h	Fri May 29 01:31:18 2009	(r193010)
+++ head/sys/sys/param.h	Fri May 29 01:49:27 2009	(r193011)
@@ -57,7 +57,7 @@
  *		is created, otherwise 1.
  */
 #undef __FreeBSD_version
-#define __FreeBSD_version 800091	/* Master, propagated to newvers */
+#define __FreeBSD_version 800092	/* Master, propagated to newvers */
 
 #ifndef LOCORE
 #include <sys/types.h>

Modified: head/sys/sys/sx.h
==============================================================================
--- head/sys/sys/sx.h	Fri May 29 01:31:18 2009	(r193010)
+++ head/sys/sys/sx.h	Fri May 29 01:49:27 2009	(r193011)
@@ -265,7 +265,7 @@ __sx_sunlock(struct sx *sx, const char *
 #define	SX_NOPROFILE		0x02
 #define	SX_NOWITNESS		0x04
 #define	SX_QUIET		0x08
-#define	SX_ADAPTIVESPIN		0x10
+#define	SX_NOADAPTIVE		0x10
 #define	SX_RECURSE		0x20
 
 /*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200905290149.n4T1nRJc056207>