Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 27 Feb 2004 11:04:34 -0800 (PST)
From:      John Baldwin <jhb@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 47757 for review
Message-ID:  <200402271904.i1RJ4Yf3033493@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=47757

Change 47757 by jhb@jhb_slimer on 2004/02/27 11:03:35

	IFC @47756.  Loop back sleep queues merge.

Affected files ...

.. //depot/projects/smpng/sys/conf/files#104 integrate
.. //depot/projects/smpng/sys/ddb/db_ps.c#24 integrate
.. //depot/projects/smpng/sys/kern/kern_condvar.c#33 integrate
.. //depot/projects/smpng/sys/kern/kern_exit.c#72 integrate
.. //depot/projects/smpng/sys/kern/kern_sig.c#87 integrate
.. //depot/projects/smpng/sys/kern/kern_synch.c#65 integrate
.. //depot/projects/smpng/sys/kern/kern_thread.c#51 integrate
.. //depot/projects/smpng/sys/kern/sched_4bsd.c#23 integrate
.. //depot/projects/smpng/sys/kern/sched_ule.c#26 integrate
.. //depot/projects/smpng/sys/kern/subr_sleepqueue.c#1 branch
.. //depot/projects/smpng/sys/kern/subr_turnstile.c#8 integrate
.. //depot/projects/smpng/sys/kern/subr_witness.c#113 integrate
.. //depot/projects/smpng/sys/kern/sys_generic.c#31 integrate
.. //depot/projects/smpng/sys/kern/vfs_subr.c#69 integrate
.. //depot/projects/smpng/sys/sys/condvar.h#6 integrate
.. //depot/projects/smpng/sys/sys/proc.h#112 integrate
.. //depot/projects/smpng/sys/sys/sched.h#9 integrate
.. //depot/projects/smpng/sys/sys/sleepqueue.h#1 branch
.. //depot/projects/smpng/sys/sys/systm.h#49 integrate

Differences ...

==== //depot/projects/smpng/sys/conf/files#104 (text+ko) ====

@@ -1,4 +1,4 @@
-# $FreeBSD: src/sys/conf/files,v 1.865 2004/02/26 03:53:52 mlaier Exp $
+# $FreeBSD: src/sys/conf/files,v 1.866 2004/02/27 18:52:42 jhb Exp $
 #
 # The long compile-with and dependency lines are required because of
 # limitations in config: backslash-newline doesn't work in strings, and
@@ -1151,6 +1151,7 @@
 kern/subr_rman.c	standard
 kern/subr_sbuf.c	standard
 kern/subr_scanf.c	standard
+kern/subr_sleepqueue.c	standard
 kern/subr_smp.c		standard
 kern/subr_taskqueue.c	standard
 kern/subr_trap.c	standard

==== //depot/projects/smpng/sys/ddb/db_ps.c#24 (text+ko) ====

@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/ddb/db_ps.c,v 1.49 2003/08/30 19:06:57 phk Exp $");
+__FBSDID("$FreeBSD: src/sys/ddb/db_ps.c,v 1.50 2004/02/27 18:52:42 jhb Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -126,20 +126,8 @@
 
 	if (p->p_flag & P_SA) 
 		db_printf( "   thread %p ksegrp %p ", td, td->td_ksegrp);
-	if (TD_ON_SLEEPQ(td)) {
-		if (td->td_flags & TDF_CVWAITQ)
-			if (TD_IS_SLEEPING(td))
-				db_printf("[CV]");
-			else
-				db_printf("[CVQ");
-		else
-			if (TD_IS_SLEEPING(td))
-				db_printf("[SLP]");
-			else
-				db_printf("[SLPQ");
-		db_printf("%s %p]", td->td_wmesg,
-		    (void *)td->td_wchan);
-	}
+	if (TD_ON_SLEEPQ(td))
+		db_printf("[SLPQ %s %p]", td->td_wmesg, (void *)td->td_wchan);
 	switch (td->td_state) {
 	case TDS_INHIBITED:
 		if (TD_ON_LOCK(td)) {
@@ -147,11 +135,9 @@
 			    td->td_lockname,
 			    (void *)td->td_blocked);
 		}
-#if 0 /* covered above */
 		if (TD_IS_SLEEPING(td)) {
 			db_printf("[SLP]");
 		}  
-#endif
 		if (TD_IS_SWAPPED(td)) {
 			db_printf("[SWAP]");
 		}

==== //depot/projects/smpng/sys/kern/kern_condvar.c#33 (text+ko) ====

@@ -25,7 +25,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_condvar.c,v 1.45 2004/01/25 03:54:52 jeff Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_condvar.c,v 1.46 2004/02/27 18:52:43 jhb Exp $");
 
 #include "opt_ktrace.h"
 
@@ -39,6 +39,7 @@
 #include <sys/condvar.h>
 #include <sys/sched.h>
 #include <sys/signalvar.h>
+#include <sys/sleepqueue.h>
 #include <sys/resourcevar.h>
 #ifdef KTRACE
 #include <sys/uio.h>
@@ -56,35 +57,6 @@
 	mtx_assert((mp), MA_OWNED | MA_NOTRECURSED);			\
 } while (0)
 
-#ifdef INVARIANTS
-#define	CV_WAIT_VALIDATE(cvp, mp) do {					\
-	if (TAILQ_EMPTY(&(cvp)->cv_waitq)) {				\
-		/* Only waiter. */					\
-		(cvp)->cv_mtx = (mp);					\
-	} else {							\
-		/*							\
-		 * Other waiter; assert that we're using the		\
-		 * same mutex.						\
-		 */							\
-		KASSERT((cvp)->cv_mtx == (mp),				\
-		    ("%s: Multiple mutexes", __func__));		\
-	}								\
-} while (0)
-
-#define	CV_SIGNAL_VALIDATE(cvp) do {					\
-	if (!TAILQ_EMPTY(&(cvp)->cv_waitq)) {				\
-		KASSERT(mtx_owned((cvp)->cv_mtx),			\
-		    ("%s: Mutex not owned", __func__));			\
-	}								\
-} while (0)
-
-#else
-#define	CV_WAIT_VALIDATE(cvp, mp)
-#define	CV_SIGNAL_VALIDATE(cvp)
-#endif
-
-static void cv_timedwait_end(void *arg);
-
 /*
  * Initialize a condition variable.  Must be called before use.
  */
@@ -92,8 +64,6 @@
 cv_init(struct cv *cvp, const char *desc)
 {
 
-	TAILQ_INIT(&cvp->cv_waitq);
-	cvp->cv_mtx = NULL;
 	cvp->cv_description = desc;
 }
 
@@ -104,85 +74,16 @@
 void
 cv_destroy(struct cv *cvp)
 {
+#ifdef INVARIANTS
+	struct sleepqueue *sq;	
 
-	KASSERT(cv_waitq_empty(cvp), ("%s: cv_waitq non-empty", __func__));
-}
-
-/*
- * Common code for cv_wait* functions.  All require sched_lock.
- */
-
-/*
- * Switch context.
- */
-static __inline void
-cv_switch(struct thread *td)
-{
-	TD_SET_SLEEPING(td);
-	mi_switch(SW_VOL);
-	CTR3(KTR_PROC, "cv_switch: resume thread %p (pid %d, %s)", td,
-	    td->td_proc->p_pid, td->td_proc->p_comm);
-}
-
-/*
- * Switch context, catching signals.
- */
-static __inline int
-cv_switch_catch(struct thread *td)
-{
-	struct proc *p;
-	int sig;
-
-	/*
-	 * We put ourselves on the sleep queue and start our timeout before
-	 * calling cursig, as we could stop there, and a wakeup or a SIGCONT (or
-	 * both) could occur while we were stopped.  A SIGCONT would cause us to
-	 * be marked as TDS_SLP without resuming us, thus we must be ready for
-	 * sleep when cursig is called.  If the wakeup happens while we're
-	 * stopped, td->td_wchan will be 0 upon return from cursig,
-	 * and TD_ON_SLEEPQ() will return false.
-	 */
-	td->td_flags |= TDF_SINTR;
-	mtx_unlock_spin(&sched_lock);
-	p = td->td_proc;
-	PROC_LOCK(p);
-	mtx_lock(&p->p_sigacts->ps_mtx);
-	sig = cursig(td);
-	mtx_unlock(&p->p_sigacts->ps_mtx);
-	if (thread_suspend_check(1))
-		sig = SIGSTOP;
-	mtx_lock_spin(&sched_lock);
-	PROC_UNLOCK(p);
-	if (sig != 0) {
-		if (TD_ON_SLEEPQ(td))
-			cv_waitq_remove(td);
-		TD_SET_RUNNING(td);
-	} else if (TD_ON_SLEEPQ(td)) {
-		cv_switch(td);
-	}
-	td->td_flags &= ~TDF_SINTR;
-
-	return sig;
+	sq = sleepq_lookup(cvp);
+	sleepq_release(cvp);
+	KASSERT(sq == NULL, ("%s: associated sleep queue non-empty", __func__));
+#endif
 }
 
 /*
- * Add a thread to the wait queue of a condition variable.
- */
-static __inline void
-cv_waitq_add(struct cv *cvp, struct thread *td)
-{
-
-	td->td_flags |= TDF_CVWAITQ;
-	TD_SET_ON_SLEEPQ(td);
-	td->td_wchan = cvp;
-	td->td_wmesg = cvp->cv_description;
-	CTR3(KTR_PROC, "cv_waitq_add: thread %p (pid %d, %s)", td,
-	    td->td_proc->p_pid, td->td_proc->p_comm);
-	TAILQ_INSERT_TAIL(&cvp->cv_waitq, td, td_slpq);
-	sched_sleep(td, td->td_priority);
-}
-
-/*
  * Wait on a condition variable.  The current thread is placed on the condition
  * variable's wait queue and suspended.  A cv_signal or cv_broadcast on the same
  * condition variable will resume the thread.  The mutex is released before
@@ -192,6 +93,7 @@
 void
 cv_wait(struct cv *cvp, struct mtx *mp)
 {
+	struct sleepqueue *sq;
 	struct thread *td;
 	WITNESS_SAVE_DECL(mp);
 
@@ -205,7 +107,7 @@
 	    "Waiting on \"%s\"", cvp->cv_description);
 	WITNESS_SAVE(&mp->mtx_object, mp);
 
-	if (cold ) {
+	if (cold || panicstr) {
 		/*
 		 * During autoconfiguration, just give interrupts
 		 * a chance, then just return.  Don't run any other
@@ -215,17 +117,14 @@
 		return;
 	}
 
-	mtx_lock_spin(&sched_lock);
-
-	CV_WAIT_VALIDATE(cvp, mp);
+	sq = sleepq_lookup(cvp);
 
 	DROP_GIANT();
 	mtx_unlock(mp);
 
-	cv_waitq_add(cvp, td);
-	cv_switch(td);
+	sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+	sleepq_wait(cvp);
 
-	mtx_unlock_spin(&sched_lock);
 #ifdef KTRACE
 	if (KTRPOINT(td, KTR_CSW))
 		ktrcsw(0, 0);
@@ -244,10 +143,10 @@
 int
 cv_wait_sig(struct cv *cvp, struct mtx *mp)
 {
+	struct sleepqueue *sq;
 	struct thread *td;
 	struct proc *p;
-	int rval;
-	int sig;
+	int rval, sig;
 	WITNESS_SAVE_DECL(mp);
 
 	td = curthread;
@@ -272,32 +171,25 @@
 		return 0;
 	}
 
-	mtx_lock_spin(&sched_lock);
+	sq = sleepq_lookup(cvp);
 
-	CV_WAIT_VALIDATE(cvp, mp);
+	/* XXX: Missing the threading checks from msleep! */
 
 	DROP_GIANT();
 	mtx_unlock(mp);
 
-	cv_waitq_add(cvp, td);
-	sig = cv_switch_catch(td);
+	sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+	sig = sleepq_catch_signals(cvp);
+	/*
+	 * XXX: Missing magic return value handling for no signal
+	 * caught but thread woken up during check.
+	 */
+	rval = sleepq_wait_sig(cvp);
+	if (rval == 0)
+		rval = sleepq_calc_signal_retval(sig);
 
-	mtx_unlock_spin(&sched_lock);
-
+	/* XXX: Part of missing threading checks? */
 	PROC_LOCK(p);
-	mtx_lock(&p->p_sigacts->ps_mtx);
-	if (sig == 0) {
-		sig = cursig(td);	/* XXXKSE */
-		if (sig == 0 && td->td_flags & TDF_INTERRUPT)
-			rval = td->td_intrval;
-	}
-	if (sig != 0) {
-		if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
-			rval = EINTR;
-		else
-			rval = ERESTART;
-	}
-	mtx_unlock(&p->p_sigacts->ps_mtx);
 	if (p->p_flag & P_WEXIT)
 		rval = EINTR;
 	PROC_UNLOCK(p);
@@ -321,6 +213,7 @@
 int
 cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
 {
+	struct sleepqueue *sq;
 	struct thread *td;
 	int rval;
 	WITNESS_SAVE_DECL(mp);
@@ -346,34 +239,15 @@
 		return 0;
 	}
 
-	mtx_lock_spin(&sched_lock);
-
-	CV_WAIT_VALIDATE(cvp, mp);
+	sq = sleepq_lookup(cvp);
 
 	DROP_GIANT();
 	mtx_unlock(mp);
 
-	cv_waitq_add(cvp, td);
-	callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
-	cv_switch(td);
-
-	if (td->td_flags & TDF_TIMEOUT) {
-		td->td_flags &= ~TDF_TIMEOUT;
-		rval = EWOULDBLOCK;
-	} else if (td->td_flags & TDF_TIMOFAIL)
-		td->td_flags &= ~TDF_TIMOFAIL;
-	else if (callout_stop(&td->td_slpcallout) == 0) {
-		/*
-		 * Work around race with cv_timedwait_end similar to that
-		 * between msleep and endtsleep.
-		 * Go back to sleep.
-		 */
-		TD_SET_SLEEPING(td);
-		mi_switch(SW_INVOL);
-		td->td_flags &= ~TDF_TIMOFAIL;
-	}
+	sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+	sleepq_set_timeout(sq, cvp, timo);
+	rval = sleepq_timedwait(cvp, 0);
 
-	mtx_unlock_spin(&sched_lock);
 #ifdef KTRACE
 	if (KTRPOINT(td, KTR_CSW))
 		ktrcsw(0, 0);
@@ -394,6 +268,7 @@
 int
 cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
 {
+	struct sleepqueue *sq;
 	struct thread *td;
 	struct proc *p;
 	int rval;
@@ -422,48 +297,24 @@
 		return 0;
 	}
 
-	mtx_lock_spin(&sched_lock);
+	sq = sleepq_lookup(cvp);
 
-	CV_WAIT_VALIDATE(cvp, mp);
-
 	DROP_GIANT();
 	mtx_unlock(mp);
 
-	cv_waitq_add(cvp, td);
-	callout_reset(&td->td_slpcallout, timo, cv_timedwait_end, td);
-	sig = cv_switch_catch(td);
+	sleepq_add(sq, cvp, mp, cvp->cv_description, SLEEPQ_CONDVAR);
+	sleepq_set_timeout(sq, cvp, timo);
+	sig = sleepq_catch_signals(cvp);
+	/*
+	 * XXX: Missing magic return value handling for no signal
+	 * caught but thread woken up during check.
+	 */
+	rval = sleepq_timedwait_sig(cvp, sig != 0);
+	if (rval == 0)
+		rval = sleepq_calc_signal_retval(sig);
 
-	if (td->td_flags & TDF_TIMEOUT) {
-		td->td_flags &= ~TDF_TIMEOUT;
-		rval = EWOULDBLOCK;
-	} else if (td->td_flags & TDF_TIMOFAIL)
-		td->td_flags &= ~TDF_TIMOFAIL;
-	else if (callout_stop(&td->td_slpcallout) == 0) {
-		/*
-		 * Work around race with cv_timedwait_end similar to that
-		 * between msleep and endtsleep.
-		 * Go back to sleep.
-		 */
-		TD_SET_SLEEPING(td);
-		mi_switch(SW_INVOL);
-		td->td_flags &= ~TDF_TIMOFAIL;
-	}
-	mtx_unlock_spin(&sched_lock);
-
+	/* XXX: Part of missing threading checks? */
 	PROC_LOCK(p);
-	mtx_lock(&p->p_sigacts->ps_mtx);
-	if (sig == 0) {
-		sig = cursig(td);
-		if (sig == 0 && td->td_flags & TDF_INTERRUPT)
-			rval = td->td_intrval;
-	}
-	if (sig != 0) {
-		if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
-			rval = EINTR;
-		else
-			rval = ERESTART;
-	}
-	mtx_unlock(&p->p_sigacts->ps_mtx);
 	if (p->p_flag & P_WEXIT)
 		rval = EINTR;
 	PROC_UNLOCK(p);
@@ -480,24 +331,6 @@
 }
 
 /*
- * Common code for signal and broadcast.  Assumes waitq is not empty.  Must be
- * called with sched_lock held.
- */
-static __inline void
-cv_wakeup(struct cv *cvp)
-{
-	struct thread *td;
-
-	mtx_assert(&sched_lock, MA_OWNED);
-	td = TAILQ_FIRST(&cvp->cv_waitq);
-	KASSERT(td->td_wchan == cvp, ("%s: bogus wchan", __func__));
-	KASSERT(td->td_flags & TDF_CVWAITQ, ("%s: not on waitq", __func__));
-	cv_waitq_remove(td);
-	TD_CLR_SLEEPING(td);
-	setrunnable(td);
-}
-
-/*
  * Signal a condition variable, wakes up one waiting thread.  Will also wakeup
  * the swapper if the process is not in memory, so that it can bring the
  * sleeping process in.  Note that this may also result in additional threads
@@ -508,13 +341,7 @@
 cv_signal(struct cv *cvp)
 {
 
-	KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
-	mtx_lock_spin(&sched_lock);
-	if (!TAILQ_EMPTY(&cvp->cv_waitq)) {
-		CV_SIGNAL_VALIDATE(cvp);
-		cv_wakeup(cvp);
-	}
-	mtx_unlock_spin(&sched_lock);
+	sleepq_signal(cvp, SLEEPQ_CONDVAR, -1);
 }
 
 /*
@@ -524,82 +351,6 @@
 void
 cv_broadcastpri(struct cv *cvp, int pri)
 {
-	struct thread	*td;
 
-	KASSERT(cvp != NULL, ("%s: cvp NULL", __func__));
-	mtx_lock_spin(&sched_lock);
-	CV_SIGNAL_VALIDATE(cvp);
-	while (!TAILQ_EMPTY(&cvp->cv_waitq)) {
-		if (pri >= PRI_MIN && pri <= PRI_MAX) {
-			td = TAILQ_FIRST(&cvp->cv_waitq);
-			if (td->td_priority > pri)
-				td->td_priority = pri;
-		}
-		cv_wakeup(cvp);
-	}
-	mtx_unlock_spin(&sched_lock);
+	sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri);
 }
-
-/*
- * Remove a thread from the wait queue of its condition variable.  This may be
- * called externally.
- */
-void
-cv_waitq_remove(struct thread *td)
-{
-	struct cv *cvp;
-
-	mtx_assert(&sched_lock, MA_OWNED);
-	if ((cvp = td->td_wchan) != NULL && td->td_flags & TDF_CVWAITQ) {
-		TAILQ_REMOVE(&cvp->cv_waitq, td, td_slpq);
-		td->td_flags &= ~TDF_CVWAITQ;
-		td->td_wmesg = NULL;
-		TD_CLR_ON_SLEEPQ(td);
-	}
-}
-
-/*
- * Timeout function for cv_timedwait.  Put the thread on the runqueue and set
- * its timeout flag.
- */
-static void
-cv_timedwait_end(void *arg)
-{
-	struct thread *td;
-
-	td = arg;
-	CTR3(KTR_PROC, "cv_timedwait_end: thread %p (pid %d, %s)",
-	    td, td->td_proc->p_pid, td->td_proc->p_comm);
-	mtx_lock_spin(&sched_lock);
-	if (TD_ON_SLEEPQ(td)) {
-		cv_waitq_remove(td);
-		td->td_flags |= TDF_TIMEOUT;
-	} else {
-		td->td_flags |= TDF_TIMOFAIL;
-	}
-	TD_CLR_SLEEPING(td);
-	setrunnable(td);
-	mtx_unlock_spin(&sched_lock);
-}
-
-/*
- * For now only abort interruptable waits.
- * The others will have to either complete on their own or have a timeout.
- */
-void
-cv_abort(struct thread *td)
-{
-
-	CTR3(KTR_PROC, "cv_abort: thread %p (pid %d, %s)", td,
-	    td->td_proc->p_pid, td->td_proc->p_comm);
-	mtx_lock_spin(&sched_lock);
-	if ((td->td_flags & (TDF_SINTR|TDF_TIMEOUT)) == TDF_SINTR) {
-		if (TD_ON_SLEEPQ(td)) {
-			cv_waitq_remove(td);
-		}
-		TD_CLR_SLEEPING(td);
-		setrunnable(td);
-	}
-	mtx_unlock_spin(&sched_lock);
-}
-

==== //depot/projects/smpng/sys/kern/kern_exit.c#72 (text+ko) ====

@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_exit.c,v 1.222 2004/02/19 06:43:48 truckman Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_exit.c,v 1.223 2004/02/27 18:39:09 jhb Exp $");
 
 #include "opt_compat.h"
 #include "opt_ktrace.h"
@@ -494,21 +494,26 @@
 	PROC_LOCK(p);
 	PROC_LOCK(p->p_pptr);
 	sx_xunlock(&proctree_lock);
-	mtx_lock_spin(&sched_lock);
 
 	while (mtx_owned(&Giant))
 		mtx_unlock(&Giant);
 
 	/*
 	 * We have to wait until after acquiring all locks before
-	 * changing p_state.  If we block on a mutex then we will be
-	 * back at SRUN when we resume and our parent will never
-	 * harvest us.
+	 * changing p_state.  We need to avoid any possibly context
+	 * switches while marked as a zombie including blocking on
+	 * a mutex.
 	 */
+	mtx_lock_spin(&sched_lock);
 	p->p_state = PRS_ZOMBIE;
+	critical_enter();
+	mtx_unlock_spin(&sched_lock);
 
 	wakeup(p->p_pptr);
 	PROC_UNLOCK(p->p_pptr);
+
+	mtx_lock_spin(&sched_lock);
+	critical_exit();
 	cnt.v_swtch++;
 	binuptime(PCPU_PTR(switchtime));
 	PCPU_SET(switchticks, ticks);

==== //depot/projects/smpng/sys/kern/kern_sig.c#87 (text+ko) ====

@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_sig.c,v 1.270 2004/02/04 21:52:55 jhb Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_sig.c,v 1.271 2004/02/27 18:52:43 jhb Exp $");
 
 #include "opt_compat.h"
 #include "opt_ktrace.h"
@@ -63,6 +63,7 @@
 #include <sys/proc.h>
 #include <sys/pioctl.h>
 #include <sys/resourcevar.h>
+#include <sys/sleepqueue.h>
 #include <sys/smp.h>
 #include <sys/stat.h>
 #include <sys/sx.h>
@@ -1872,12 +1873,8 @@
 		 * It may run a bit until it hits a thread_suspend_check().
 		 */
 		mtx_lock_spin(&sched_lock);
-		if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) {
-			if (td->td_flags & TDF_CVWAITQ)
-				cv_abort(td);
-			else
-				abortsleep(td);
-		}
+		if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
+			sleepq_abort(td);
 		mtx_unlock_spin(&sched_lock);
 		goto out;
 		/*
@@ -1972,9 +1969,8 @@
 		 * be noticed when the process returns through
 		 * trap() or syscall().
 		 */
-		if ((td->td_flags & TDF_SINTR) == 0) {
+		if ((td->td_flags & TDF_SINTR) == 0)
 			return;
-		}
 		/*
 		 * Process is sleeping and traced.  Make it runnable
 		 * so it can discover the signal in issignal() and stop
@@ -2002,14 +1998,10 @@
 			/*
 			 * Raise priority to at least PUSER.
 			 */
-			if (td->td_priority > PUSER) {
+			if (td->td_priority > PUSER)
 				td->td_priority = PUSER;
-			}
 		}
-		if (td->td_flags & TDF_CVWAITQ) 
-			cv_abort(td);
-		else
-			abortsleep(td);
+		sleepq_abort(td);
 	}
 #ifdef SMP
 	  else {
@@ -2018,9 +2010,8 @@
 		 * other than kicking ourselves if we are running.
 		 * It will either never be noticed, or noticed very soon.
 		 */
-		if (TD_IS_RUNNING(td) && td != curthread) {
+		if (TD_IS_RUNNING(td) && td != curthread)
 			forward_signal(td);
-		}
 	  }
 #endif
 }

==== //depot/projects/smpng/sys/kern/kern_synch.c#65 (text+ko) ====

@@ -39,7 +39,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/kern_synch.c,v 1.242 2004/02/01 05:37:36 jeff Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/kern_synch.c,v 1.243 2004/02/27 18:52:43 jhb Exp $");
 
 #include "opt_ddb.h"
 #include "opt_ktrace.h"
@@ -55,6 +55,7 @@
 #include <sys/resourcevar.h>
 #include <sys/sched.h>
 #include <sys/signalvar.h>
+#include <sys/sleepqueue.h>
 #include <sys/smp.h>
 #include <sys/sx.h>
 #include <sys/sysctl.h>
@@ -95,7 +96,6 @@
 static int      fscale __unused = FSCALE;
 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
 
-static void	endtsleep(void *);
 static void	loadav(void *arg);
 static void	lboltcb(void *arg);
 
@@ -116,6 +116,7 @@
 	hogticks = (hz / 10) * 2;	/* Default only. */
 	for (i = 0; i < TABLESIZE; i++)
 		TAILQ_INIT(&slpque[i]);
+	init_sleepqueues();
 }
 
 /*
@@ -141,29 +142,59 @@
 	int priority, timo;
 	const char *wmesg;
 {
-	struct thread *td = curthread;
-	struct proc *p = td->td_proc;
-	int sig, catch = priority & PCATCH;
-	int rval = 0;
+	struct sleepqueue *sq;
+	struct thread *td;
+	struct proc *p;
+	int catch, rval, sig;
 	WITNESS_SAVE_DECL(mtx);
 
+	td = curthread;
+	p = td->td_proc;
 #ifdef KTRACE
 	if (KTRPOINT(td, KTR_CSW))
 		ktrcsw(1, 0);
 #endif
-	/* XXX: mtx == NULL ?? */
-	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mtx->mtx_object,
-	    "Sleeping on \"%s\"", wmesg);
+	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, mtx == NULL ? NULL :
+	    &mtx->mtx_object, "Sleeping on \"%s\"", wmesg);
 	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
 	    ("sleeping without a mutex"));
+	KASSERT(p != NULL, ("msleep1"));
+	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
+
+	if (cold) {
+		/*
+		 * During autoconfiguration, just return;
+		 * don't run any other procs or panic below,
+		 * in case this is the idle process and already asleep.
+		 * XXX: this used to do "s = splhigh(); splx(safepri);
+		 * splx(s);" to give interrupts a chance, but there is
+		 * no way to give interrupts a chance now.
+		 */
+		if (mtx != NULL && priority & PDROP)
+			mtx_unlock(mtx);
+		return (0);
+	}
+	catch = priority & PCATCH;
+	rval = 0;
+
 	/*
+	 * If we are already on a sleep queue, then remove us from that
+	 * sleep queue first.  We have to do this to handle recursive
+	 * sleeps.
+	 */
+	if (TD_ON_SLEEPQ(td))
+		sleepq_remove(td, td->td_wchan);
+
+	sq = sleepq_lookup(ident);
+	mtx_lock_spin(&sched_lock);
+
+	/*
 	 * If we are capable of async syscalls and there isn't already
 	 * another one ready to return, start a new thread
 	 * and queue it as ready to run. Note that there is danger here
 	 * because we need to make sure that we don't sleep allocating
 	 * the thread (recursion here might be bad).
 	 */
-	mtx_lock_spin(&sched_lock);
 	if (p->p_flag & P_SA || p->p_numthreads > 1) {
 		/*
 		 * Just don't bother if we are exiting
@@ -173,28 +204,20 @@
 		if (catch) {
 			if ((p->p_flag & P_WEXIT) && p->p_singlethread != td) {
 				mtx_unlock_spin(&sched_lock);
+				sleepq_release(ident);
 				return (EINTR);
 			}
 			if (td->td_flags & TDF_INTERRUPT) {
 				mtx_unlock_spin(&sched_lock);
+				sleepq_release(ident);
 				return (td->td_intrval);
 			}
 		}
 	}
-	if (cold ) {
-		/*
-		 * During autoconfiguration, just return;
-		 * don't run any other procs or panic below,
-		 * in case this is the idle process and already asleep.
-		 * XXX: this used to do "s = splhigh(); splx(safepri);
-		 * splx(s);" to give interrupts a chance, but there is
-		 * no way to give interrupts a chance now.
-		 */
-		if (mtx != NULL && priority & PDROP)
-			mtx_unlock(mtx);
-		mtx_unlock_spin(&sched_lock);
-		return (0);
-	}
+	mtx_unlock_spin(&sched_lock);
+	CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
+	    td, p->p_pid, p->p_comm, wmesg, ident);
+
 	DROP_GIANT();
 	if (mtx != NULL) {
 		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
@@ -203,101 +226,55 @@
 		if (priority & PDROP)
 			mtx = NULL;
 	}
-	KASSERT(p != NULL, ("msleep1"));
-	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
-
-	CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
-	    td, p->p_pid, p->p_comm, wmesg, ident);
 
-	td->td_wchan = ident;
-	td->td_wmesg = wmesg;
-	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
-	TD_SET_ON_SLEEPQ(td);
-	if (timo)
-		callout_reset(&td->td_slpcallout, timo, endtsleep, td);
 	/*
 	 * We put ourselves on the sleep queue and start our timeout
-	 * before calling thread_suspend_check, as we could stop there, and
-	 * a wakeup or a SIGCONT (or both) could occur while we were stopped.
-	 * without resuming us, thus we must be ready for sleep
-	 * when cursig is called.  If the wakeup happens while we're
-	 * stopped, td->td_wchan will be 0 upon return from cursig.
+	 * before calling thread_suspend_check, as we could stop there,
+	 * and a wakeup or a SIGCONT (or both) could occur while we were
+	 * stopped without resuming us.  Thus, we must be ready for sleep
+	 * when cursig() is called.  If the wakeup happens while we're
+	 * stopped, then td will no longer be on a sleep queue upon
+	 * return from cursig().
 	 */
+	sleepq_add(sq, ident, mtx, wmesg, 0);
+	if (timo)
+		sleepq_set_timeout(sq, ident, timo);
 	if (catch) {
-		CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td,
-		    p->p_pid, p->p_comm);
-		td->td_flags |= TDF_SINTR;
-		mtx_unlock_spin(&sched_lock);
-		PROC_LOCK(p);
-		mtx_lock(&p->p_sigacts->ps_mtx);
-		sig = cursig(td);
-		mtx_unlock(&p->p_sigacts->ps_mtx);
-		if (sig == 0 && thread_suspend_check(1))
-			sig = SIGSTOP;
-		mtx_lock_spin(&sched_lock);
-		PROC_UNLOCK(p);
-		if (sig != 0) {
-			if (TD_ON_SLEEPQ(td))
-				unsleep(td);
-		} else if (!TD_ON_SLEEPQ(td))
+		sig = sleepq_catch_signals(ident);
+		if (sig == 0 && !TD_ON_SLEEPQ(td)) {
+			mtx_lock_spin(&sched_lock);
+			td->td_flags &= ~TDF_SINTR;
+			mtx_unlock_spin(&sched_lock);
 			catch = 0;
+		}
 	} else
 		sig = 0;
 
 	/*
-	 * Let the scheduler know we're about to voluntarily go to sleep.
+	 * Adjust this threads priority.
+	 *
+	 * XXX: Do we need to save priority in td_base_pri?
 	 */
-	sched_sleep(td, priority & PRIMASK);
+	mtx_lock_spin(&sched_lock);
+	sched_prio(td, priority & PRIMASK);
+	mtx_unlock_spin(&sched_lock);
 
-	if (TD_ON_SLEEPQ(td)) {
-		TD_SET_SLEEPING(td);
-		mi_switch(SW_VOL);
+	if (timo && catch)
+		rval = sleepq_timedwait_sig(ident, sig != 0);
+	else if (timo)
+		rval = sleepq_timedwait(ident, sig != 0);
+	else if (catch)
+		rval = sleepq_wait_sig(ident);
+	else {
+		sleepq_wait(ident);
+		rval = 0;
 	}
+
 	/*
 	 * We're awake from voluntary sleep.
 	 */
-	CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
-	    p->p_comm);
-	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
-	td->td_flags &= ~TDF_SINTR;
-	if (td->td_flags & TDF_TIMEOUT) {
-		td->td_flags &= ~TDF_TIMEOUT;
-		if (sig == 0)
-			rval = EWOULDBLOCK;
-	} else if (td->td_flags & TDF_TIMOFAIL) {
-		td->td_flags &= ~TDF_TIMOFAIL;
-	} else if (timo && callout_stop(&td->td_slpcallout) == 0) {
-		/*
-		 * This isn't supposed to be pretty.  If we are here, then
-		 * the endtsleep() callout is currently executing on another
-		 * CPU and is either spinning on the sched_lock or will be
-		 * soon.  If we don't synchronize here, there is a chance
-		 * that this process may msleep() again before the callout
-		 * has a chance to run and the callout may end up waking up
-		 * the wrong msleep().  Yuck.
-		 */
-		TD_SET_SLEEPING(td);
-		mi_switch(SW_INVOL);
-		td->td_flags &= ~TDF_TIMOFAIL;
-	} 
-	if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) &&
-	    (rval == 0)) {
-		rval = td->td_intrval;
-	}
-	mtx_unlock_spin(&sched_lock);
-	if (rval == 0 && catch) {
-		PROC_LOCK(p);
-		/* XXX: shouldn't we always be calling cursig()? */
-		mtx_lock(&p->p_sigacts->ps_mtx);
-		if (sig != 0 || (sig = cursig(td))) {
-			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
-				rval = EINTR;
-			else
-				rval = ERESTART;
-		}
-		mtx_unlock(&p->p_sigacts->ps_mtx);
-		PROC_UNLOCK(p);
-	}
+	if (rval == 0 && catch)
+		rval = sleepq_calc_signal_retval(sig);
 #ifdef KTRACE
 	if (KTRPOINT(td, KTR_CSW))
 		ktrcsw(0, 0);
@@ -311,109 +288,14 @@
 }
 
 /*
- * Implement timeout for msleep().
- *
- * If process hasn't been awakened (wchan non-zero),
- * set timeout flag and undo the sleep.  If proc
- * is stopped, just unsleep so it will remain stopped.
- * MP-safe, called without the Giant mutex.
- */
-static void
-endtsleep(arg)
-	void *arg;
-{
-	register struct thread *td;
-
-	td = (struct thread *)arg;
-	CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)",
-	    td, td->td_proc->p_pid, td->td_proc->p_comm);
-	mtx_lock_spin(&sched_lock);
-	/*
-	 * This is the other half of the synchronization with msleep()
-	 * described above.  If the TDS_TIMEOUT flag is set, we lost the
-	 * race and just need to put the process back on the runqueue.

>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200402271904.i1RJ4Yf3033493>