Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 18 Feb 2021 14:04:01 GMT
From:      Alex Richardson <arichardson@FreeBSD.org>
To:        src-committers@FreeBSD.org, dev-commits-src-all@FreeBSD.org, dev-commits-src-main@FreeBSD.org
Subject:   git: fa2528ac6435 - main - Use atomic loads/stores when updating td->td_state
Message-ID:  <202102181404.11IE412g030923@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by arichardson:

URL: https://cgit.FreeBSD.org/src/commit/?id=fa2528ac643519072c498b483d0dcc1fa5d99bc1

commit fa2528ac643519072c498b483d0dcc1fa5d99bc1
Author:     Alex Richardson <arichardson@FreeBSD.org>
AuthorDate: 2021-02-18 10:25:10 +0000
Commit:     Alex Richardson <arichardson@FreeBSD.org>
CommitDate: 2021-02-18 14:02:48 +0000

    Use atomic loads/stores when updating td->td_state
    
    KCSAN complains about racy accesses in the locking code. Those races are
    fine since they are inside a TD_SET_RUNNING() loop that expects the value
    to be changed by another CPU.
    
    Use relaxed atomic stores/loads to indicate that this variable can be
    written/read by multiple CPUs at the same time. This will also prevent
    the compiler from doing unexpected re-ordering.
    
    Reported by:    GENERIC-KCSAN
    Test Plan:      KCSAN no longer complains, kernel still runs fine.
    Reviewed By:    markj, mjg (earlier version)
    Differential Revision: https://reviews.freebsd.org/D28569
---
 lib/libkvm/kvm_proc.c            |  2 +-
 sys/compat/linprocfs/linprocfs.c |  2 +-
 sys/ddb/db_command.c             |  2 +-
 sys/ddb/db_ps.c                  | 12 ++++++------
 sys/gdb/gdb_main.c               |  6 +++---
 sys/kern/init_main.c             |  2 +-
 sys/kern/kern_intr.c             |  2 +-
 sys/kern/kern_prot.c             |  2 +-
 sys/kern/kern_racct.c            |  2 +-
 sys/kern/kern_synch.c            |  4 ++--
 sys/kern/kern_thread.c           |  8 ++++----
 sys/kern/sched_4bsd.c            |  2 +-
 sys/kern/subr_turnstile.c        |  6 +++---
 sys/sys/proc.h                   | 34 +++++++++++++++++++++++-----------
 sys/vm/vm_meter.c                |  2 +-
 15 files changed, 50 insertions(+), 38 deletions(-)

diff --git a/lib/libkvm/kvm_proc.c b/lib/libkvm/kvm_proc.c
index 63f7c2a8a824..eed2f3de6075 100644
--- a/lib/libkvm/kvm_proc.c
+++ b/lib/libkvm/kvm_proc.c
@@ -426,7 +426,7 @@ nopgrp:
 					    TD_CAN_RUN(&mtd) ||
 					    TD_IS_RUNNING(&mtd)) {
 						kp->ki_stat = SRUN;
-					} else if (mtd.td_state ==
+					} else if (TD_GET_STATE(&mtd) ==
 					    TDS_INHIBITED) {
 						if (P_SHOULDSTOP(&proc)) {
 							kp->ki_stat = SSTOP;
diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c
index 79ffc4dfd5aa..fc2c29240893 100644
--- a/sys/compat/linprocfs/linprocfs.c
+++ b/sys/compat/linprocfs/linprocfs.c
@@ -1054,7 +1054,7 @@ linprocfs_doprocstatus(PFS_FILL_ARGS)
 				state = "X (exiting)";
 				break;
 			}
-			switch(td2->td_state) {
+			switch(TD_GET_STATE(td2)) {
 			case TDS_INHIBITED:
 				state = "S (sleeping)";
 				break;
diff --git a/sys/ddb/db_command.c b/sys/ddb/db_command.c
index 21ff75f78e6a..fedec1dd33a4 100644
--- a/sys/ddb/db_command.c
+++ b/sys/ddb/db_command.c
@@ -854,7 +854,7 @@ _db_stack_trace_all(bool active_only)
 	for (td = kdb_thr_first(); td != NULL; td = kdb_thr_next(td)) {
 		prev_jb = kdb_jmpbuf(jb);
 		if (setjmp(jb) == 0) {
-			if (td->td_state == TDS_RUNNING)
+			if (TD_IS_RUNNING(td))
 				db_printf("\nTracing command %s pid %d"
 				    " tid %ld td %p (CPU %d)\n",
 				    td->td_proc->p_comm, td->td_proc->p_pid,
diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c
index 44b803299ee9..a5245528ca83 100644
--- a/sys/ddb/db_ps.c
+++ b/sys/ddb/db_ps.c
@@ -173,9 +173,9 @@ db_ps_proc(struct proc *p)
 			 */
 			rflag = sflag = dflag = lflag = wflag = 0;
 			FOREACH_THREAD_IN_PROC(p, td) {
-				if (td->td_state == TDS_RUNNING ||
-				    td->td_state == TDS_RUNQ ||
-				    td->td_state == TDS_CAN_RUN)
+				if (TD_GET_STATE(td) == TDS_RUNNING ||
+				    TD_GET_STATE(td) == TDS_RUNQ ||
+				    TD_GET_STATE(td) == TDS_CAN_RUN)
 					rflag++;
 				if (TD_ON_LOCK(td))
 					lflag++;
@@ -267,7 +267,7 @@ dumpthread(volatile struct proc *p, volatile struct thread *td, int all)
 
 	if (all) {
 		db_printf("%6d                  ", td->td_tid);
-		switch (td->td_state) {
+		switch (TD_GET_STATE(td)) {
 		case TDS_RUNNING:
 			snprintf(state, sizeof(state), "Run");
 			break;
@@ -367,7 +367,7 @@ DB_SHOW_COMMAND(thread, db_show_thread)
 	db_printf(" flags: %#x ", td->td_flags);
 	db_printf(" pflags: %#x\n", td->td_pflags);
 	db_printf(" state: ");
-	switch (td->td_state) {
+	switch (TD_GET_STATE(td)) {
 	case TDS_INACTIVE:
 		db_printf("INACTIVE\n");
 		break;
@@ -413,7 +413,7 @@ DB_SHOW_COMMAND(thread, db_show_thread)
 		db_printf("}\n");
 		break;
 	default:
-		db_printf("??? (%#x)\n", td->td_state);
+		db_printf("??? (%#x)\n", TD_GET_STATE(td));
 		break;
 	}
 	if (TD_ON_LOCK(td))
diff --git a/sys/gdb/gdb_main.c b/sys/gdb/gdb_main.c
index 6e0c9f21f947..a9e935ebfbb5 100644
--- a/sys/gdb/gdb_main.c
+++ b/sys/gdb/gdb_main.c
@@ -510,11 +510,11 @@ do_qXfer_threads_read(void)
 
 			sbuf_putc(&ctx.qXfer.sb, '>');
 
-			if (ctx.iter->td_state == TDS_RUNNING)
+			if (TD_GET_STATE(ctx.iter) == TDS_RUNNING)
 				sbuf_cat(&ctx.qXfer.sb, "Running");
-			else if (ctx.iter->td_state == TDS_RUNQ)
+			else if (TD_GET_STATE(ctx.iter) == TDS_RUNQ)
 				sbuf_cat(&ctx.qXfer.sb, "RunQ");
-			else if (ctx.iter->td_state == TDS_CAN_RUN)
+			else if (TD_GET_STATE(ctx.iter) == TDS_CAN_RUN)
 				sbuf_cat(&ctx.qXfer.sb, "CanRun");
 			else if (TD_ON_LOCK(ctx.iter))
 				sbuf_cat(&ctx.qXfer.sb, "Blocked");
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 5eb8186c23ca..2d6a4f636240 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -499,7 +499,7 @@ proc0_init(void *dummy __unused)
 	p->p_nice = NZERO;
 	td->td_tid = THREAD0_TID;
 	tidhash_add(td);
-	td->td_state = TDS_RUNNING;
+	TD_SET_STATE(td, TDS_RUNNING);
 	td->td_pri_class = PRI_TIMESHARE;
 	td->td_user_pri = PUSER;
 	td->td_base_user_pri = PUSER;
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index 0e11af2123e2..af7c52c6b176 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -992,7 +992,7 @@ intr_event_schedule_thread(struct intr_event *ie)
 		sched_add(td, SRQ_INTR);
 	} else {
 		CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
-		    __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state);
+		    __func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td));
 		thread_unlock(td);
 	}
 
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index 170e9598835e..a107c7cced95 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -1955,7 +1955,7 @@ credbatch_add(struct credbatch *crb, struct thread *td)
 
 	MPASS(td->td_realucred != NULL);
 	MPASS(td->td_realucred == td->td_ucred);
-	MPASS(td->td_state == TDS_INACTIVE);
+	MPASS(TD_GET_STATE(td) == TDS_INACTIVE);
 	cr = td->td_realucred;
 	KASSERT(cr->cr_users > 0, ("%s: users %d not > 0 on cred %p",
 	    __func__, cr->cr_users, cr));
diff --git a/sys/kern/kern_racct.c b/sys/kern/kern_racct.c
index 4df1c72d50f7..7d179fe69844 100644
--- a/sys/kern/kern_racct.c
+++ b/sys/kern/kern_racct.c
@@ -1147,7 +1147,7 @@ racct_proc_throttle(struct proc *p, int timeout)
 		thread_lock(td);
 		td->td_flags |= TDF_ASTPENDING;
 
-		switch (td->td_state) {
+		switch (TD_GET_STATE(td)) {
 		case TDS_RUNQ:
 			/*
 			 * If the thread is on the scheduler run-queue, we can
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 4c0491ab6e85..dcca67326264 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -570,7 +570,7 @@ setrunnable(struct thread *td, int srqflags)
 	    ("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
 
 	swapin = 0;
-	switch (td->td_state) {
+	switch (TD_GET_STATE(td)) {
 	case TDS_RUNNING:
 	case TDS_RUNQ:
 		break;
@@ -593,7 +593,7 @@ setrunnable(struct thread *td, int srqflags)
 		}
 		break;
 	default:
-		panic("setrunnable: state 0x%x", td->td_state);
+		panic("setrunnable: state 0x%x", TD_GET_STATE(td));
 	}
 	if ((srqflags & (SRQ_HOLD | SRQ_HOLDTD)) == 0)
 		thread_unlock(td);
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 3561895d9fff..ea569576e7c9 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -346,7 +346,7 @@ thread_ctor(void *mem, int size, void *arg, int flags)
 	struct thread	*td;
 
 	td = (struct thread *)mem;
-	td->td_state = TDS_INACTIVE;
+	TD_SET_STATE(td, TDS_INACTIVE);
 	td->td_lastcpu = td->td_oncpu = NOCPU;
 
 	/*
@@ -379,7 +379,7 @@ thread_dtor(void *mem, int size, void *arg)
 
 #ifdef INVARIANTS
 	/* Verify that this thread is in a safe state to free. */
-	switch (td->td_state) {
+	switch (TD_GET_STATE(td)) {
 	case TDS_INHIBITED:
 	case TDS_RUNNING:
 	case TDS_CAN_RUN:
@@ -944,7 +944,7 @@ thread_exit(void)
 	rucollect(&p->p_ru, &td->td_ru);
 	PROC_STATUNLOCK(p);
 
-	td->td_state = TDS_INACTIVE;
+	TD_SET_STATE(td, TDS_INACTIVE);
 #ifdef WITNESS
 	witness_thread_exit(td);
 #endif
@@ -993,7 +993,7 @@ thread_link(struct thread *td, struct proc *p)
 	 * its lock has been created.
 	 * PROC_LOCK_ASSERT(p, MA_OWNED);
 	 */
-	td->td_state    = TDS_INACTIVE;
+	TD_SET_STATE(td, TDS_INACTIVE);
 	td->td_proc     = p;
 	td->td_flags    = TDF_INMEM;
 
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 7b44e12ef330..7ce9bd81704f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -1764,7 +1764,7 @@ sched_affinity(struct thread *td)
 	if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
 		return;
 
-	switch (td->td_state) {
+	switch (TD_GET_STATE(td)) {
 	case TDS_RUNQ:
 		/*
 		 * If we are on a per-CPU runqueue that is in the set,
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 2c8f2909f2b3..d966a796c867 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -288,7 +288,7 @@ propagate_priority(struct thread *td)
 		 */
 		KASSERT(TD_ON_LOCK(td), (
 		    "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
-		    td->td_tid, td->td_name, td->td_state,
+		    td->td_tid, td->td_name, TD_GET_STATE(td),
 		    ts->ts_lockobj->lo_name));
 
 		/*
@@ -1185,7 +1185,7 @@ print_lockchain(struct thread *td, const char *prefix)
 		}
 		db_printf("%sthread %d (pid %d, %s) is ", prefix, td->td_tid,
 		    td->td_proc->p_pid, td->td_name);
-		switch (td->td_state) {
+		switch (TD_GET_STATE(td)) {
 		case TDS_INACTIVE:
 			db_printf("inactive\n");
 			return;
@@ -1224,7 +1224,7 @@ print_lockchain(struct thread *td, const char *prefix)
 			db_printf("inhibited: %s\n", KTDSTATE(td));
 			return;
 		default:
-			db_printf("??? (%#x)\n", td->td_state);
+			db_printf("??? (%#x)\n", TD_GET_STATE(td));
 			return;
 		}
 	}
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 99257878c2e0..0073fee2a42e 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -347,6 +347,7 @@ struct thread {
 		TDS_RUNQ,
 		TDS_RUNNING
 	} td_state;			/* (t) thread state */
+	/* Note: td_state must be accessed using TD_{GET,SET}_STATE(). */
 	union {
 		register_t	tdu_retval[2];
 		off_t		tdu_off;
@@ -540,10 +541,15 @@ do {									\
 #define	TD_IS_SWAPPED(td)	((td)->td_inhibitors & TDI_SWAPPED)
 #define	TD_ON_LOCK(td)		((td)->td_inhibitors & TDI_LOCK)
 #define	TD_AWAITING_INTR(td)	((td)->td_inhibitors & TDI_IWAIT)
-#define	TD_IS_RUNNING(td)	((td)->td_state == TDS_RUNNING)
-#define	TD_ON_RUNQ(td)		((td)->td_state == TDS_RUNQ)
-#define	TD_CAN_RUN(td)		((td)->td_state == TDS_CAN_RUN)
-#define	TD_IS_INHIBITED(td)	((td)->td_state == TDS_INHIBITED)
+#ifdef _KERNEL
+#define	TD_GET_STATE(td)	atomic_load_int(&(td)->td_state)
+#else
+#define	TD_GET_STATE(td)	((td)->td_state)
+#endif
+#define	TD_IS_RUNNING(td)	(TD_GET_STATE(td) == TDS_RUNNING)
+#define	TD_ON_RUNQ(td)		(TD_GET_STATE(td) == TDS_RUNQ)
+#define	TD_CAN_RUN(td)		(TD_GET_STATE(td) == TDS_CAN_RUN)
+#define	TD_IS_INHIBITED(td)	(TD_GET_STATE(td) == TDS_INHIBITED)
 #define	TD_ON_UPILOCK(td)	((td)->td_flags & TDF_UPIBLOCKED)
 #define TD_IS_IDLETHREAD(td)	((td)->td_flags & TDF_IDLETD)
 
@@ -557,15 +563,15 @@ do {									\
 	((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" :		\
 	((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
 
-#define	TD_SET_INHIB(td, inhib) do {			\
-	(td)->td_state = TDS_INHIBITED;			\
-	(td)->td_inhibitors |= (inhib);			\
+#define	TD_SET_INHIB(td, inhib) do {		\
+	TD_SET_STATE(td, TDS_INHIBITED);	\
+	(td)->td_inhibitors |= (inhib);		\
 } while (0)
 
 #define	TD_CLR_INHIB(td, inhib) do {			\
 	if (((td)->td_inhibitors & (inhib)) &&		\
 	    (((td)->td_inhibitors &= ~(inhib)) == 0))	\
-		(td)->td_state = TDS_CAN_RUN;		\
+		TD_SET_STATE(td, TDS_CAN_RUN);		\
 } while (0)
 
 #define	TD_SET_SLEEPING(td)	TD_SET_INHIB((td), TDI_SLEEPING)
@@ -581,9 +587,15 @@ do {									\
 #define	TD_CLR_SUSPENDED(td)	TD_CLR_INHIB((td), TDI_SUSPENDED)
 #define	TD_CLR_IWAIT(td)	TD_CLR_INHIB((td), TDI_IWAIT)
 
-#define	TD_SET_RUNNING(td)	(td)->td_state = TDS_RUNNING
-#define	TD_SET_RUNQ(td)		(td)->td_state = TDS_RUNQ
-#define	TD_SET_CAN_RUN(td)	(td)->td_state = TDS_CAN_RUN
+#ifdef _KERNEL
+#define	TD_SET_STATE(td, state)	atomic_store_int(&(td)->td_state, state)
+#else
+#define	TD_SET_STATE(td, state)	(td)->td_state = state
+#endif
+#define	TD_SET_RUNNING(td)	TD_SET_STATE(td, TDS_RUNNING)
+#define	TD_SET_RUNQ(td)		TD_SET_STATE(td, TDS_RUNQ)
+#define	TD_SET_CAN_RUN(td)	TD_SET_STATE(td, TDS_CAN_RUN)
+
 
 #define	TD_SBDRY_INTR(td) \
     (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0)
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 44d3ac999c5a..f9e2b0c66cc8 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -207,7 +207,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
 		if (p->p_state != PRS_NEW) {
 			FOREACH_THREAD_IN_PROC(p, td) {
 				thread_lock(td);
-				switch (td->td_state) {
+				switch (TD_GET_STATE(td)) {
 				case TDS_INHIBITED:
 					if (TD_IS_SWAPPED(td))
 						total.t_sw++;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202102181404.11IE412g030923>