Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 10 Aug 2016 17:11:12 +0000 (UTC)
From:      Hans Petter Selasky <hselasky@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r303926 - in projects/hps_head/sys: compat/linuxkpi/common/src ddb kern sys
Message-ID:  <201608101711.u7AHBC9q055280@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: hselasky
Date: Wed Aug 10 17:11:12 2016
New Revision: 303926
URL: https://svnweb.freebsd.org/changeset/base/303926

Log:
  MFC r303426:
  Rewrite subr_sleepqueue.c use of callouts to not depend on the
  specifics of callout KPI.
  
  Diff reduce with ^head .
  
  Differential revision:	https://reviews.freebsd.org/D7137

Modified:
  projects/hps_head/sys/compat/linuxkpi/common/src/linux_compat.c
  projects/hps_head/sys/ddb/db_ps.c
  projects/hps_head/sys/kern/init_main.c
  projects/hps_head/sys/kern/kern_condvar.c
  projects/hps_head/sys/kern/kern_lock.c
  projects/hps_head/sys/kern/kern_synch.c
  projects/hps_head/sys/kern/kern_thread.c
  projects/hps_head/sys/kern/subr_sleepqueue.c
  projects/hps_head/sys/sys/proc.h

Modified: projects/hps_head/sys/compat/linuxkpi/common/src/linux_compat.c
==============================================================================
--- projects/hps_head/sys/compat/linuxkpi/common/src/linux_compat.c	Wed Aug 10 16:31:15 2016	(r303925)
+++ projects/hps_head/sys/compat/linuxkpi/common/src/linux_compat.c	Wed Aug 10 17:11:12 2016	(r303926)
@@ -1142,9 +1142,7 @@ linux_wait_for_timeout_common(struct com
 		if (c->done)
 			break;
 		sleepq_add(c, NULL, "completion", flags, 0);
-		sleepq_release(c);
 		sleepq_set_timeout(c, linux_timer_jiffies_until(end));
-		sleepq_lock(c);
 		if (flags & SLEEPQ_INTERRUPTIBLE)
 			ret = sleepq_timedwait_sig(c, 0);
 		else

Modified: projects/hps_head/sys/ddb/db_ps.c
==============================================================================
--- projects/hps_head/sys/ddb/db_ps.c	Wed Aug 10 16:31:15 2016	(r303925)
+++ projects/hps_head/sys/ddb/db_ps.c	Wed Aug 10 17:11:12 2016	(r303926)
@@ -375,8 +375,13 @@ DB_SHOW_COMMAND(thread, db_show_thread)
 		db_printf(" lock: %s  turnstile: %p\n", td->td_lockname,
 		    td->td_blocked);
 	if (TD_ON_SLEEPQ(td))
-		db_printf(" wmesg: %s  wchan: %p\n", td->td_wmesg,
-		    td->td_wchan);
+		db_printf(
+	    " wmesg: %s  wchan: %p sleeptimo %lx. %jx (curr %lx. %jx)\n",
+		    td->td_wmesg, td->td_wchan,
+		    (long)sbttobt(td->td_sleeptimo).sec,
+		    (uintmax_t)sbttobt(td->td_sleeptimo).frac,
+		    (long)sbttobt(sbinuptime()).sec,
+		    (uintmax_t)sbttobt(sbinuptime()).frac);
 	db_printf(" priority: %d\n", td->td_priority);
 	db_printf(" container lock: %s (%p)\n", lock->lo_name, lock);
 	if (td->td_swvoltick != 0) {

Modified: projects/hps_head/sys/kern/init_main.c
==============================================================================
--- projects/hps_head/sys/kern/init_main.c	Wed Aug 10 16:31:15 2016	(r303925)
+++ projects/hps_head/sys/kern/init_main.c	Wed Aug 10 17:11:12 2016	(r303926)
@@ -514,8 +514,7 @@ proc0_init(void *dummy __unused)
 
 	callout_init_mtx(&p->p_itcallout, &p->p_mtx, 0);
 	callout_init_mtx(&p->p_limco, &p->p_mtx, 0);
-	mtx_init(&td->td_slpmutex, "td_slpmutex", NULL, MTX_SPIN);
-	callout_init_mtx(&td->td_slpcallout, &td->td_slpmutex, 0);
+	callout_init(&td->td_slpcallout, 1);
 
 	/* Create credentials. */
 	newcred = crget();

Modified: projects/hps_head/sys/kern/kern_condvar.c
==============================================================================
--- projects/hps_head/sys/kern/kern_condvar.c	Wed Aug 10 16:31:15 2016	(r303925)
+++ projects/hps_head/sys/kern/kern_condvar.c	Wed Aug 10 17:11:12 2016	(r303926)
@@ -298,13 +298,15 @@ _cv_timedwait_sbt(struct cv *cvp, struct
 	DROP_GIANT();
 
 	sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR, 0);
-	sleepq_release(cvp);
 	sleepq_set_timeout_sbt(cvp, sbt, pr, flags);
 	if (lock != &Giant.lock_object) {
+		if (class->lc_flags & LC_SLEEPABLE)
+			sleepq_release(cvp);
 		WITNESS_SAVE(lock, lock_witness);
 		lock_state = class->lc_unlock(lock);
+		if (class->lc_flags & LC_SLEEPABLE)
+			sleepq_lock(cvp);
 	}
-	sleepq_lock(cvp);
 	rval = sleepq_timedwait(cvp, 0);
 
 #ifdef KTRACE
@@ -359,13 +361,15 @@ _cv_timedwait_sig_sbt(struct cv *cvp, st
 
 	sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR |
 	    SLEEPQ_INTERRUPTIBLE, 0);
-	sleepq_release(cvp);
 	sleepq_set_timeout_sbt(cvp, sbt, pr, flags);
 	if (lock != &Giant.lock_object) {
+		if (class->lc_flags & LC_SLEEPABLE)
+			sleepq_release(cvp);
 		WITNESS_SAVE(lock, lock_witness);
 		lock_state = class->lc_unlock(lock);
+		if (class->lc_flags & LC_SLEEPABLE)
+			sleepq_lock(cvp);
 	}
-	sleepq_lock(cvp);
 	rval = sleepq_timedwait_sig(cvp, 0);
 
 #ifdef KTRACE

Modified: projects/hps_head/sys/kern/kern_lock.c
==============================================================================
--- projects/hps_head/sys/kern/kern_lock.c	Wed Aug 10 16:31:15 2016	(r303925)
+++ projects/hps_head/sys/kern/kern_lock.c	Wed Aug 10 17:11:12 2016	(r303926)
@@ -206,11 +206,9 @@ sleeplk(struct lock *lk, u_int flags, st
 	GIANT_SAVE();
 	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
 	    SLEEPQ_INTERRUPTIBLE : 0), queue);
-	if ((flags & LK_TIMELOCK) && timo) {
-		sleepq_release(&lk->lock_object);
+	if ((flags & LK_TIMELOCK) && timo)
 		sleepq_set_timeout(&lk->lock_object, timo);
-		sleepq_lock(&lk->lock_object);
-	}
+
 	/*
 	 * Decisional switch for real sleeping.
 	 */

Modified: projects/hps_head/sys/kern/kern_synch.c
==============================================================================
--- projects/hps_head/sys/kern/kern_synch.c	Wed Aug 10 16:31:15 2016	(r303925)
+++ projects/hps_head/sys/kern/kern_synch.c	Wed Aug 10 17:11:12 2016	(r303926)
@@ -211,16 +211,12 @@ _sleep(void *ident, struct lock_object *
 	 * return from cursig().
 	 */
 	sleepq_add(ident, lock, wmesg, sleepq_flags, 0);
+	if (sbt != 0)
+		sleepq_set_timeout_sbt(ident, sbt, pr, flags);
 	if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
 		sleepq_release(ident);
 		WITNESS_SAVE(lock, lock_witness);
 		lock_state = class->lc_unlock(lock);
-		if (sbt != 0)
-			sleepq_set_timeout_sbt(ident, sbt, pr, flags);
-		sleepq_lock(ident);
-	} else if (sbt != 0) {
-		sleepq_release(ident);
-		sleepq_set_timeout_sbt(ident, sbt, pr, flags);
 		sleepq_lock(ident);
 	}
 	if (sbt != 0 && catch)
@@ -276,11 +272,8 @@ msleep_spin_sbt(void *ident, struct mtx 
 	 * We put ourselves on the sleep queue and start our timeout.
 	 */
 	sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
-	if (sbt != 0) {
-		sleepq_release(ident);
+	if (sbt != 0)
 		sleepq_set_timeout_sbt(ident, sbt, pr, flags);
-		sleepq_lock(ident);
-	}
 
 	/*
 	 * Can't call ktrace with any spin locks held so it can lock the

Modified: projects/hps_head/sys/kern/kern_thread.c
==============================================================================
--- projects/hps_head/sys/kern/kern_thread.c	Wed Aug 10 16:31:15 2016	(r303925)
+++ projects/hps_head/sys/kern/kern_thread.c	Wed Aug 10 17:11:12 2016	(r303926)
@@ -154,9 +154,6 @@ thread_ctor(void *mem, int size, void *a
 	audit_thread_alloc(td);
 #endif
 	umtx_thread_alloc(td);
-
-	mtx_init(&td->td_slpmutex, "td_slpmutex", NULL, MTX_SPIN);
-	callout_init_mtx(&td->td_slpcallout, &td->td_slpmutex, 0);
 	return (0);
 }
 
@@ -170,10 +167,6 @@ thread_dtor(void *mem, int size, void *a
 
 	td = (struct thread *)mem;
 
-	/* make sure to drain any use of the "td->td_slpcallout" */
-	callout_drain(&td->td_slpcallout);
-	mtx_destroy(&td->td_slpmutex);
-
 #ifdef INVARIANTS
 	/* Verify that this thread is in a safe state to free. */
 	switch (td->td_state) {
@@ -325,7 +318,7 @@ thread_reap(void)
 
 	/*
 	 * Don't even bother to lock if none at this instant,
-	 * we really don't care about the next instant..
+	 * we really don't care about the next instant.
 	 */
 	if (!TAILQ_EMPTY(&zombie_threads)) {
 		mtx_lock_spin(&zombie_lock);
@@ -390,6 +383,7 @@ thread_free(struct thread *td)
 	if (td->td_kstack != 0)
 		vm_thread_dispose(td);
 	vm_domain_policy_cleanup(&td->td_vm_dom_policy);
+	callout_drain(&td->td_slpcallout);
 	uma_zfree(thread_zone, td);
 }
 
@@ -587,6 +581,7 @@ thread_wait(struct proc *p)
 	td->td_cpuset = NULL;
 	cpu_thread_clean(td);
 	thread_cow_free(td);
+	callout_drain(&td->td_slpcallout);
 	thread_reap();	/* check for zombie threads etc. */
 }
 
@@ -612,6 +607,7 @@ thread_link(struct thread *td, struct pr
 	LIST_INIT(&td->td_lprof[0]);
 	LIST_INIT(&td->td_lprof[1]);
 	sigqueue_init(&td->td_sigqueue, p);
+	callout_init(&td->td_slpcallout, 1);
 	TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
 	p->p_numthreads++;
 }

Modified: projects/hps_head/sys/kern/subr_sleepqueue.c
==============================================================================
--- projects/hps_head/sys/kern/subr_sleepqueue.c	Wed Aug 10 16:31:15 2016	(r303925)
+++ projects/hps_head/sys/kern/subr_sleepqueue.c	Wed Aug 10 17:11:12 2016	(r303926)
@@ -155,8 +155,7 @@ static uma_zone_t sleepq_zone;
  */
 static int	sleepq_catch_signals(void *wchan, int pri);
 static int	sleepq_check_signals(void);
-static int	sleepq_check_timeout(struct thread *);
-static void	sleepq_stop_timeout(struct thread *);
+static int	sleepq_check_timeout(void);
 #ifdef INVARIANTS
 static void	sleepq_dtor(void *mem, int size, void *arg);
 #endif
@@ -377,16 +376,26 @@ void
 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
     int flags)
 {
+	struct sleepqueue_chain *sc;
 	struct thread *td;
+	sbintime_t pr1;
 
 	td = curthread;
-
-	mtx_lock_spin(&td->td_slpmutex);
+	sc = SC_LOOKUP(wchan);
+	mtx_assert(&sc->sc_lock, MA_OWNED);
+	MPASS(TD_ON_SLEEPQ(td));
+	MPASS(td->td_sleepqueue == NULL);
+	MPASS(wchan != NULL);
 	if (cold)
 		panic("timed sleep before timers are working");
-	callout_reset_sbt_on(&td->td_slpcallout, sbt, pr,
-	    sleepq_timeout, td, PCPU_GET(cpuid), flags | C_DIRECT_EXEC);
-	mtx_unlock_spin(&td->td_slpmutex);
+	KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
+	    td->td_tid, td, (uintmax_t)td->td_sleeptimo));
+	thread_lock(td);
+	callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
+	thread_unlock(td);
+	callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
+	    sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
+	    C_DIRECT_EXEC);
 }
 
 /*
@@ -571,29 +580,39 @@ sleepq_switch(void *wchan, int pri)
  * Check to see if we timed out.
  */
 static int
-sleepq_check_timeout(struct thread *td)
+sleepq_check_timeout(void)
 {
+	struct thread *td;
+	int res;
+
+	td = curthread;
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 
 	/*
-	 * If TDF_TIMEOUT is set, we timed out.
+	 * If TDF_TIMEOUT is set, we timed out.  But recheck
+	 * td_sleeptimo anyway.
 	 */
-	if (td->td_flags & TDF_TIMEOUT) {
-		td->td_flags &= ~TDF_TIMEOUT;
-		return (EWOULDBLOCK);
+	res = 0;
+	if (td->td_sleeptimo != 0) {
+		if (td->td_sleeptimo <= sbinuptime())
+			res = EWOULDBLOCK;
+		td->td_sleeptimo = 0;
 	}
-	return (0);
-}
-
-/*
- * Atomically stop the timeout by using a mutex.
- */
-static void
-sleepq_stop_timeout(struct thread *td)
-{
-	mtx_lock_spin(&td->td_slpmutex);
-	callout_stop(&td->td_slpcallout);
-	mtx_unlock_spin(&td->td_slpmutex);
+	if (td->td_flags & TDF_TIMEOUT)
+		td->td_flags &= ~TDF_TIMEOUT;
+	else
+		/*
+		 * We ignore the situation where timeout subsystem was
+		 * unable to stop our callout.  The struct thread is
+		 * type-stable, the callout will use the correct
+		 * memory when running.  The checks of the
+		 * td_sleeptimo value in this function and in
+		 * sleepq_timeout() ensure that the thread does not
+		 * get spurious wakeups, even if the callout was reset
+		 * or thread reused.
+		 */
+		callout_stop(&td->td_slpcallout);
+	return (res);
 }
 
 /*
@@ -666,11 +685,9 @@ sleepq_timedwait(void *wchan, int pri)
 	MPASS(!(td->td_flags & TDF_SINTR));
 	thread_lock(td);
 	sleepq_switch(wchan, pri);
-	rval = sleepq_check_timeout(td);
+	rval = sleepq_check_timeout();
 	thread_unlock(td);
 
-	sleepq_stop_timeout(td);
-
 	return (rval);
 }
 
@@ -681,18 +698,12 @@ sleepq_timedwait(void *wchan, int pri)
 int
 sleepq_timedwait_sig(void *wchan, int pri)
 {
-	struct thread *td;
 	int rcatch, rvalt, rvals;
 
-	td = curthread;
-
 	rcatch = sleepq_catch_signals(wchan, pri);
-	rvalt = sleepq_check_timeout(td);
+	rvalt = sleepq_check_timeout();
 	rvals = sleepq_check_signals();
-	thread_unlock(td);
-
-	sleepq_stop_timeout(td);
-
+	thread_unlock(curthread);
 	if (rcatch)
 		return (rcatch);
 	if (rvals)
@@ -898,49 +909,45 @@ sleepq_broadcast(void *wchan, int flags,
 static void
 sleepq_timeout(void *arg)
 {
-	struct thread *td = arg;
-	int wakeup_swapper = 0;
+	struct sleepqueue_chain *sc;
+	struct sleepqueue *sq;
+	struct thread *td;
+	void *wchan;
+	int wakeup_swapper;
 
+	td = arg;
+	wakeup_swapper = 0;
 	CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
 	    (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
 
-	/* Handle the three cases which can happen */
-
 	thread_lock(td);
-	if (TD_ON_SLEEPQ(td)) {
-		if (TD_IS_SLEEPING(td)) {
-			struct sleepqueue_chain *sc;
-			struct sleepqueue *sq;
-			void *wchan;
 
-			/*
-			 * Case I - thread is asleep and needs to be
-			 * awoken:
-			 */
-			wchan = td->td_wchan;
-			sc = SC_LOOKUP(wchan);
-			THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
-			sq = sleepq_lookup(wchan);
-			MPASS(sq != NULL);
-			td->td_flags |= TDF_TIMEOUT;
-			wakeup_swapper = sleepq_resume_thread(sq, td, 0);
-		} else {
-			/*
-			 * Case II - cancel going to sleep by setting
-			 * the timeout flag because the target thread
-			 * is not asleep yet. It can be on another CPU
-			 * in between sleepq_add() and one of the
-			 * sleepq_*wait*() routines or it can be in
-			 * sleepq_catch_signals().
-			 */
-			td->td_flags |= TDF_TIMEOUT;
-		}
-	} else {
+	if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) {
 		/*
-		 * Case III - thread is already woken up by a wakeup
-		 * call and should not timeout. Nothing to do!
+		 * The thread does not want a timeout (yet).
 		 */
+	} else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
+		/*
+		 * See if the thread is asleep and get the wait
+		 * channel if it is.
+		 */
+		wchan = td->td_wchan;
+		sc = SC_LOOKUP(wchan);
+		THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
+		sq = sleepq_lookup(wchan);
+		MPASS(sq != NULL);
+		td->td_flags |= TDF_TIMEOUT;
+		wakeup_swapper = sleepq_resume_thread(sq, td, 0);
+	} else if (TD_ON_SLEEPQ(td)) {
+		/*
+		 * If the thread is on the SLEEPQ but isn't sleeping
+		 * yet, it can either be on another CPU in between
+		 * sleepq_add() and one of the sleepq_*wait*()
+		 * routines or it can be in sleepq_catch_signals().
+		 */
+		td->td_flags |= TDF_TIMEOUT;
 	}
+
 	thread_unlock(td);
 	if (wakeup_swapper)
 		kick_proc0();

Modified: projects/hps_head/sys/sys/proc.h
==============================================================================
--- projects/hps_head/sys/sys/proc.h	Wed Aug 10 16:31:15 2016	(r303925)
+++ projects/hps_head/sys/sys/proc.h	Wed Aug 10 17:11:12 2016	(r303926)
@@ -282,6 +282,7 @@ struct thread {
 	int		td_no_sleeping;	/* (k) Sleeping disabled count. */
 	int		td_dom_rr_idx;	/* (k) RR Numa domain selection. */
 	void		*td_su;		/* (k) FFS SU private */
+	sbintime_t	td_sleeptimo;	/* (t) Sleep timeout. */
 #define	td_endzero td_sigmask
 
 /* Copied during fork1() or create_thread(). */
@@ -319,7 +320,6 @@ struct thread {
 #define td_retval	td_uretoff.tdu_retval
 	u_int		td_cowgen;	/* (k) Generation of COW pointers. */
 	struct callout	td_slpcallout;	/* (h) Callout for sleep. */
-	struct mtx	td_slpmutex;	/* (h) Mutex for sleep callout */
 	struct trapframe *td_frame;	/* (k) */
 	struct vm_object *td_kstack_obj;/* (a) Kstack object. */
 	vm_offset_t	td_kstack;	/* (a) Kernel VA of kstack. */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201608101711.u7AHBC9q055280>