Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 8 Feb 2010 14:08:52 +0000 (UTC)
From:      Attilio Rao <attilio@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-8@freebsd.org
Subject:   svn commit: r203662 - stable/8/sys/kern
Message-ID:  <201002081408.o18E8qfK064378@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: attilio
Date: Mon Feb  8 14:08:52 2010
New Revision: 203662
URL: http://svn.freebsd.org/changeset/base/203662

Log:
  MC r202889, r202940:
  - Fix a race in sched_switch() of sched_4bsd.
    Block the td_lock when acquiring explicitly sched_lock in order to prevent
    races with other td_lock contenders.
  - Merge the ULE's internal function thread_block_switch() into the global
    thread_lock_block() and make the former semantic as the default for
    thread_lock_block().
  - Split out an invariant in order to have better checks.

Modified:
  stable/8/sys/kern/kern_mutex.c
  stable/8/sys/kern/sched_4bsd.c
  stable/8/sys/kern/sched_ule.c
Directory Properties:
  stable/8/sys/   (props changed)
  stable/8/sys/amd64/include/xen/   (props changed)
  stable/8/sys/cddl/contrib/opensolaris/   (props changed)
  stable/8/sys/contrib/dev/acpica/   (props changed)
  stable/8/sys/contrib/pf/   (props changed)
  stable/8/sys/dev/xen/xenpci/   (props changed)

Modified: stable/8/sys/kern/kern_mutex.c
==============================================================================
--- stable/8/sys/kern/kern_mutex.c	Mon Feb  8 14:04:32 2010	(r203661)
+++ stable/8/sys/kern/kern_mutex.c	Mon Feb  8 14:08:52 2010	(r203662)
@@ -616,7 +616,6 @@ thread_lock_block(struct thread *td)
 {
 	struct mtx *lock;
 
-	spinlock_enter();
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	lock = td->td_lock;
 	td->td_lock = &blocked_lock;
@@ -631,7 +630,6 @@ thread_lock_unblock(struct thread *td, s
 	mtx_assert(new, MA_OWNED);
 	MPASS(td->td_lock == &blocked_lock);
 	atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
-	spinlock_exit();
 }
 
 void

Modified: stable/8/sys/kern/sched_4bsd.c
==============================================================================
--- stable/8/sys/kern/sched_4bsd.c	Mon Feb  8 14:04:32 2010	(r203661)
+++ stable/8/sys/kern/sched_4bsd.c	Mon Feb  8 14:08:52 2010	(r203662)
@@ -920,9 +920,11 @@ sched_sleep(struct thread *td, int pri)
 void
 sched_switch(struct thread *td, struct thread *newtd, int flags)
 {
+	struct mtx *tmtx;
 	struct td_sched *ts;
 	struct proc *p;
 
+	tmtx = NULL;
 	ts = td->td_sched;
 	p = td->td_proc;
 
@@ -931,17 +933,20 @@ sched_switch(struct thread *td, struct t
 	/* 
 	 * Switch to the sched lock to fix things up and pick
 	 * a new thread.
+	 * Block the td_lock in order to avoid breaking the critical path.
 	 */
 	if (td->td_lock != &sched_lock) {
 		mtx_lock_spin(&sched_lock);
-		thread_unlock(td);
+		tmtx = thread_lock_block(td);
 	}
 
 	if ((p->p_flag & P_NOLOAD) == 0)
 		sched_load_rem();
 
-	if (newtd)
+	if (newtd) {
+		MPASS(newtd->td_lock == &sched_lock);
 		newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
+	}
 
 	td->td_lastcpu = td->td_oncpu;
 	td->td_flags &= ~TDF_NEEDRESCHED;
@@ -984,8 +989,8 @@ sched_switch(struct thread *td, struct t
 			sched_load_add();
 	} else {
 		newtd = choosethread();
+		MPASS(newtd->td_lock == &sched_lock);
 	}
-	MPASS(newtd->td_lock == &sched_lock);
 
 	if (td != newtd) {
 #ifdef	HWPMC_HOOKS
@@ -1004,7 +1009,7 @@ sched_switch(struct thread *td, struct t
 			(*dtrace_vtime_switch_func)(newtd);
 #endif
 
-		cpu_switch(td, newtd, td->td_lock);
+		cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
 		lock_profile_obtain_lock_success(&sched_lock.lock_object,
 		    0, 0, __FILE__, __LINE__);
 		/*

Modified: stable/8/sys/kern/sched_ule.c
==============================================================================
--- stable/8/sys/kern/sched_ule.c	Mon Feb  8 14:04:32 2010	(r203661)
+++ stable/8/sys/kern/sched_ule.c	Mon Feb  8 14:08:52 2010	(r203662)
@@ -301,7 +301,6 @@ static int sched_pickcpu(struct thread *
 static void sched_balance(void);
 static int sched_balance_pair(struct tdq *, struct tdq *);
 static inline struct tdq *sched_setcpu(struct thread *, int, int);
-static inline struct mtx *thread_block_switch(struct thread *);
 static inline void thread_unblock_switch(struct thread *, struct mtx *);
 static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
 static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS);
@@ -1106,9 +1105,11 @@ sched_setcpu(struct thread *td, int cpu,
 	 * The hard case, migration, we need to block the thread first to
 	 * prevent order reversals with other cpus locks.
 	 */
+	spinlock_enter();
 	thread_lock_block(td);
 	TDQ_LOCK(tdq);
 	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
+	spinlock_exit();
 	return (tdq);
 }
 
@@ -1715,23 +1716,6 @@ sched_unlend_user_prio(struct thread *td
 }
 
 /*
- * Block a thread for switching.  Similar to thread_block() but does not
- * bump the spin count.
- */
-static inline struct mtx *
-thread_block_switch(struct thread *td)
-{
-	struct mtx *lock;
-
-	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	lock = td->td_lock;
-	td->td_lock = &blocked_lock;
-	mtx_unlock_spin(lock);
-
-	return (lock);
-}
-
-/*
  * Handle migration from sched_switch().  This happens only for
  * cpu binding.
  */
@@ -1749,7 +1733,7 @@ sched_switch_migrate(struct tdq *tdq, st
 	 * not holding either run-queue lock.
 	 */
 	spinlock_enter();
-	thread_block_switch(td);	/* This releases the lock on tdq. */
+	thread_lock_block(td);	/* This releases the lock on tdq. */
 
 	/*
 	 * Acquire both run-queue locks before placing the thread on the new
@@ -1769,7 +1753,8 @@ sched_switch_migrate(struct tdq *tdq, st
 }
 
 /*
- * Release a thread that was blocked with thread_block_switch().
+ * Variadic version of thread_lock_unblock() that does not assume td_lock
+ * is blocked.
  */
 static inline void
 thread_unblock_switch(struct thread *td, struct mtx *mtx)
@@ -1825,7 +1810,7 @@ sched_switch(struct thread *td, struct t
 	} else {
 		/* This thread must be going to sleep. */
 		TDQ_LOCK(tdq);
-		mtx = thread_block_switch(td);
+		mtx = thread_lock_block(td);
 		tdq_load_rem(tdq, td);
 	}
 	/*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201002081408.o18E8qfK064378>