Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 29 Oct 2019 17:28:25 +0000 (UTC)
From:      Gleb Smirnoff <glebius@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r354148 - head/sys/kern
Message-ID:  <201910291728.x9THSPYL012449@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: glebius
Date: Tue Oct 29 17:28:25 2019
New Revision: 354148
URL: https://svnweb.freebsd.org/changeset/base/354148

Log:
  Merge td_epochnest with td_no_sleeping.
  
  Epoch itself doesn't rely on the counter and it is provided
  merely for sleeping subsystems to check it.
  
  - In functions that sleep use THREAD_CAN_SLEEP() to assert
    correctness.  With EPOCH_TRACE compiled print epoch info.
  - _sleep() was a wrong place to put the assertion for epoch,
    right place is sleepq_add(), as there ways to call the
    latter bypassing _sleep().
  - Do not increase td_no_sleeping in non-preemptible epochs.
    The critical section would trigger all possible safeguards,
    no sleeping counter is extraneous.
  
  Reviewed by:	kib

Modified:
  head/sys/kern/genoffset.c
  head/sys/kern/kern_malloc.c
  head/sys/kern/kern_synch.c
  head/sys/kern/subr_epoch.c
  head/sys/kern/subr_sleepqueue.c
  head/sys/kern/subr_trap.c

Modified: head/sys/kern/genoffset.c
==============================================================================
--- head/sys/kern/genoffset.c	Tue Oct 29 17:19:36 2019	(r354147)
+++ head/sys/kern/genoffset.c	Tue Oct 29 17:28:25 2019	(r354148)
@@ -37,7 +37,6 @@ __FBSDID("$FreeBSD$");
 #include <sys/proc.h>
 
 OFFSYM(td_priority, thread, u_char);
-OFFSYM(td_epochnest, thread, u_char);
 OFFSYM(td_critnest, thread, u_int);
 OFFSYM(td_pinned, thread, int);
 OFFSYM(td_owepreempt, thread, u_char);

Modified: head/sys/kern/kern_malloc.c
==============================================================================
--- head/sys/kern/kern_malloc.c	Tue Oct 29 17:19:36 2019	(r354147)
+++ head/sys/kern/kern_malloc.c	Tue Oct 29 17:28:25 2019	(r354148)
@@ -523,12 +523,13 @@ malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_
 	if (flags & M_WAITOK) {
 		KASSERT(curthread->td_intr_nesting_level == 0,
 		   ("malloc(M_WAITOK) in interrupt context"));
+		if (__predict_false(!THREAD_CAN_SLEEP())) {
 #ifdef EPOCH_TRACE
-		if (__predict_false(curthread->td_epochnest > 0))
 			epoch_trace_list(curthread);
 #endif
-		KASSERT(curthread->td_epochnest == 0,
-			("malloc(M_WAITOK) in epoch context"));		
+			KASSERT(1, 
+			    ("malloc(M_WAITOK) with sleeping prohibited"));
+		}
 	}
 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
 	    ("malloc: called with spinlock or critical section held"));

Modified: head/sys/kern/kern_synch.c
==============================================================================
--- head/sys/kern/kern_synch.c	Tue Oct 29 17:19:36 2019	(r354147)
+++ head/sys/kern/kern_synch.c	Tue Oct 29 17:28:25 2019	(r354148)
@@ -151,11 +151,6 @@ _sleep(void *ident, struct lock_object *lock, int prio
 	    ("sleeping without a lock"));
 	KASSERT(ident != NULL, ("_sleep: NULL ident"));
 	KASSERT(TD_IS_RUNNING(td), ("_sleep: curthread not running"));
-#ifdef EPOCH_TRACE
-	if (__predict_false(curthread->td_epochnest > 0))
-		epoch_trace_list(curthread);
-#endif
-	KASSERT(td->td_epochnest == 0, ("sleeping in an epoch section"));
 	if (priority & PDROP)
 		KASSERT(lock != NULL && lock != &Giant.lock_object,
 		    ("PDROP requires a non-Giant lock"));

Modified: head/sys/kern/subr_epoch.c
==============================================================================
--- head/sys/kern/subr_epoch.c	Tue Oct 29 17:19:36 2019	(r354147)
+++ head/sys/kern/subr_epoch.c	Tue Oct 29 17:28:25 2019	(r354148)
@@ -377,7 +377,7 @@ _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et
 	epoch_trace_enter(td, epoch, et, file, line);
 #endif
 	et->et_td = td;
-	td->td_epochnest++;
+	THREAD_NO_SLEEPING();
 	critical_enter();
 	sched_pin();
 	td->td_pre_epoch_prio = td->td_priority;
@@ -390,13 +390,10 @@ _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et
 void
 epoch_enter(epoch_t epoch)
 {
-	struct thread *td;
 	epoch_record_t er;
 
 	MPASS(cold || epoch != NULL);
 	INIT_CHECK(epoch);
-	td = curthread;
-	td->td_epochnest++;
 	critical_enter();
 	er = epoch_currecord(epoch);
 	ck_epoch_begin(&er->er_record, NULL);
@@ -412,8 +409,7 @@ _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et 
 	td = curthread;
 	critical_enter();
 	sched_unpin();
-	MPASS(td->td_epochnest);
-	td->td_epochnest--;
+	THREAD_SLEEPING_OK();
 	er = epoch_currecord(epoch);
 	MPASS(epoch->e_flags & EPOCH_PREEMPT);
 	MPASS(et != NULL);
@@ -435,13 +431,9 @@ _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et 
 void
 epoch_exit(epoch_t epoch)
 {
-	struct thread *td;
 	epoch_record_t er;
 
 	INIT_CHECK(epoch);
-	td = curthread;
-	MPASS(td->td_epochnest);
-	td->td_epochnest--;
 	er = epoch_currecord(epoch);
 	ck_epoch_end(&er->er_record, NULL);
 	critical_exit();
@@ -740,7 +732,7 @@ in_epoch_verbose(epoch_t epoch, int dump_onfail)
 	epoch_record_t er;
 
 	td = curthread;
-	if (td->td_epochnest == 0)
+	if (THREAD_CAN_SLEEP())
 		return (0);
 	if (__predict_false((epoch) == NULL))
 		return (0);

Modified: head/sys/kern/subr_sleepqueue.c
==============================================================================
--- head/sys/kern/subr_sleepqueue.c	Tue Oct 29 17:19:36 2019	(r354147)
+++ head/sys/kern/subr_sleepqueue.c	Tue Oct 29 17:28:25 2019	(r354148)
@@ -80,6 +80,9 @@ __FBSDID("$FreeBSD$");
 #include <sys/stack.h>
 #include <sys/sysctl.h>
 #include <sys/time.h>
+#ifdef EPOCH_TRACE
+#include <sys/epoch.h>
+#endif
 
 #include <machine/atomic.h>
 
@@ -315,9 +318,14 @@ sleepq_add(void *wchan, struct lock_object *lock, cons
 	MPASS((queue >= 0) && (queue < NR_SLEEPQS));
 
 	/* If this thread is not allowed to sleep, die a horrible death. */
-	KASSERT(THREAD_CAN_SLEEP(),
-	    ("%s: td %p to sleep on wchan %p with sleeping prohibited",
-	    __func__, td, wchan));
+	if (__predict_false(!THREAD_CAN_SLEEP())) {
+#ifdef EPOCH_TRACE
+		epoch_trace_list(curthread);
+#endif
+		KASSERT(1,
+		    ("%s: td %p to sleep on wchan %p with sleeping prohibited",
+		    __func__, td, wchan));
+	}
 
 	/* Look up the sleep queue associated with the wait channel 'wchan'. */
 	sq = sleepq_lookup(wchan);

Modified: head/sys/kern/subr_trap.c
==============================================================================
--- head/sys/kern/subr_trap.c	Tue Oct 29 17:19:36 2019	(r354147)
+++ head/sys/kern/subr_trap.c	Tue Oct 29 17:28:25 2019	(r354148)
@@ -166,12 +166,6 @@ userret(struct thread *td, struct trapframe *frame)
 	WITNESS_WARN(WARN_PANIC, NULL, "userret: returning");
 	KASSERT(td->td_critnest == 0,
 	    ("userret: Returning in a critical section"));
-#ifdef EPOCH_TRACE
-	if (__predict_false(curthread->td_epochnest > 0))
-		epoch_trace_list(curthread);
-#endif
-	KASSERT(td->td_epochnest == 0,
-	    ("userret: Returning in an epoch section"));
 	KASSERT(td->td_locks == 0,
 	    ("userret: Returning with %d locks held", td->td_locks));
 	KASSERT(td->td_rw_rlocks == 0,
@@ -185,8 +179,12 @@ userret(struct thread *td, struct trapframe *frame)
 	    td->td_lk_slocks));
 	KASSERT((td->td_pflags & TDP_NOFAULTING) == 0,
 	    ("userret: Returning with pagefaults disabled"));
-	KASSERT(THREAD_CAN_SLEEP(),
-	    ("userret: Returning with sleep disabled"));
+	if (__predict_false(!THREAD_CAN_SLEEP())) {
+#ifdef EPOCH_TRACE
+		epoch_trace_list(curthread);
+#endif
+		KASSERT(1, ("userret: Returning with sleep disabled"));
+	}
 	KASSERT(td->td_pinned == 0 || (td->td_pflags & TDP_CALLCHAIN) != 0,
 	    ("userret: Returning with with pinned thread"));
 	KASSERT(td->td_vp_reserv == 0,



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201910291728.x9THSPYL012449>