Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 25 Jun 2002 00:31:09 -0700 (PDT)
From:      Julian Elischer <julian@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 13410 for review
Message-ID:  <200206250731.g5P7V9N27321@freefall.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://people.freebsd.org/~peter/p4db/chv.cgi?CH=13410

Change 13410 by julian@julian_ref on 2002/06/25 00:30:56

	Check in "bunch-o-debuggin".
	Still lookign for the mysterious runqueue corruption.
	I can duplicate it at least.

Affected files ...

... //depot/projects/kse/sys/kern/kern_mutex.c#29 edit
... //depot/projects/kse/sys/kern/kern_proc.c#74 edit
... //depot/projects/kse/sys/kern/kern_switch.c#60 edit
... //depot/projects/kse/sys/kern/kern_synch.c#73 edit
... //depot/projects/kse/sys/kern/kern_thread.c#79 edit
... //depot/projects/kse/sys/posix4/ksched.c#10 edit
... //depot/projects/kse/sys/sys/proc.h#115 edit
... //depot/projects/kse/sys/vm/vm_glue.c#33 edit

Differences ...

==== //depot/projects/kse/sys/kern/kern_mutex.c#29 (text+ko) ====

@@ -127,16 +127,14 @@
 		if (td->td_priority <= pri) /* lower is higher priority */
 			return;
 
-		/*
-		 * Bump this thread's priority.
-		 */
-		td->td_priority = pri;
 
 		/*
 		 * If lock holder is actually running, just bump priority.
 		 */
-		if (td->td_state == TDS_RUNNING)
+		if (td->td_state == TDS_RUNNING) {
+			td->td_priority = pri;
 			return;
+		}
 
 #ifndef SMP
 		/*
@@ -155,9 +153,14 @@
 		if (td->td_state == TDS_RUNQ) {
 			MPASS(td->td_blocked == NULL);
 			remrunqueue(td);
+			td->td_priority = pri;
 			setrunqueue(td);
 			return;
 		}
+		/*
+		 * Adjust for any other cases.
+		 */
+		td->td_priority = pri;
 
 		/*
 		 * If we aren't blocked on a mutex, we should be.

==== //depot/projects/kse/sys/kern/kern_proc.c#74 (text+ko) ====

@@ -255,13 +255,11 @@
 	mtx_lock_spin(&sched_lock);
 	mi_switch();	/* Save current registers to PCB. */
 	mtx_unlock_spin(&sched_lock);
-	PROC_LOCK(p);
 	newkse->ke_upcall = mbx.kmbx_upcall;
 	newkse->ke_stackbase  = mbx.kmbx_stackbase;
 	newkse->ke_stacksize = mbx.kmbx_stacksize;
 	newkse->ke_mailbox = uap->mbx;
 	cpu_save_upcall(td, newkse);
-	PROC_UNLOCK(p);
 	/* Note that we are the returning syscall */
 	td->td_retval[0] = 0;
 	td->td_retval[1] = 0;

==== //depot/projects/kse/sys/kern/kern_switch.c#60 (text+ko) ====

@@ -131,11 +131,11 @@
 		if (td->td_flags & TDF_UNBOUND) {
 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
 			if (kg->kg_last_assigned == td) 
-				if (TAILQ_PREV(td, threadlist_head, td_runq)
+				if (TAILQ_PREV(td, threadqueue, td_runq)
 				    != NULL)
 					printf("Yo MAMA!\n");
 				kg->kg_last_assigned = TAILQ_PREV(td,
-				    threadlist_head, td_runq);
+				    threadqueue, td_runq);
 			/*
 			 *  If we have started running an upcall,
 			 * Then TDF_UNBOUND WAS set because the thread was 
@@ -160,6 +160,7 @@
 		td->td_kse->ke_state = KES_UNQUEUED; 
 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
 	}
+	thread_sanity_check(td);
 	return (td);
 }
 
@@ -192,6 +193,7 @@
 	 * If we found one assign it the kse, otherwise idle the kse.
 	 */
 	if (td) {
+		thread_sanity_check(td);
 		kg->kg_last_assigned = td;
 		td->td_kse = ke;
 		ke->ke_thread = td;
@@ -209,37 +211,6 @@
 	}
 }
 
-#if 0
-/*
- * Given a current kse, and a current thread, see if
- * there is another thread of the same priority and
- * group, waiting to run, and if so, link it up as if it were ours and
- * return. Refuse to change if bound together.
- * ****not completed****
- */
-struct thread *
-nextthread(struct kse *ke)
-{
-	struct ksegrp *kg = ke->ke_ksegrp;
-	struct thread *td = ke->ke_thread;;
-
-	if (((td->td_flags & TDF_UNBOUND) == 0)
-	    && (td = TAILQ_FIRST(&kg->kg_runq))) {
-		if ((td->td_priority / RQ_PPQ) <= ke->ke_rqindex) {
-			/* head thread's priority is more urgent than ours */
-			if (td->td_kse) {
-				kse_reassign(ke);
-			} else {
-				ke->ke_thread = td;
-				td->td_kse = ke;
-				kg->kg_last_assigned = td;
-			}
-		}
-	}
-	return (td);
-}
-#endif
-
 int
 kserunnable(void)
 {
@@ -260,6 +231,7 @@
 	struct kse *ke;
 
 	mtx_assert(&sched_lock, MA_OWNED);
+	thread_sanity_check(td);
 	KASSERT ((td->td_state == TDS_RUNQ),
 		("remrunqueue: Bad state on run queue"));
 	kg = td->td_ksegrp;
@@ -284,8 +256,9 @@
 		 * KSE to the next available thread. Then, we should
 		 * see if we need to move the KSE in the run queues.
 		 */
+		td2 = kg->kg_last_assigned;
+		KASSERT((td2 != NULL), ("last assigned has wrong value "));
 		td->td_kse = NULL;
-		td2 = kg->kg_last_assigned;
 		if ((td3 = TAILQ_NEXT(td2, td_runq))) {
 			KASSERT(td3 != td, ("td3 somehow matched td"));
 			/*
@@ -306,7 +279,7 @@
 			 */
 			if (td == td2) {
 				kg->kg_last_assigned =
-				    TAILQ_PREV(td, threadlist_head, td_runq);
+				    TAILQ_PREV(td, threadqueue, td_runq);
 			}
 			runq_remove(&runq, ke);
 KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!"));
@@ -321,6 +294,7 @@
 		}
 	}
 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
+	thread_sanity_check(td);
 }
 
 #if 1 /* use the first version */
@@ -334,6 +308,7 @@
 
 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
 	mtx_assert(&sched_lock, MA_OWNED);
+	thread_sanity_check(td);
 	KASSERT((td->td_state != TDS_RUNQ), ("setrunqueue: bad thread state"));
 	td->td_state = TDS_RUNQ;
 	kg = td->td_ksegrp;
@@ -379,7 +354,7 @@
 			 */
 			td2 = kg->kg_last_assigned;
 			kg->kg_last_assigned =
-		    		TAILQ_PREV(td2, threadlist_head, td_runq);
+		    		TAILQ_PREV(td2, threadqueue, td_runq);
 			ke = td2->td_kse;
 			runq_remove(&runq, ke);
 			ke->ke_thread = td;
@@ -413,11 +388,12 @@
 	 */
 	if (ke) {
 		if (kg->kg_last_assigned ==
-		    TAILQ_PREV(td, threadlist_head, td_runq)) {
+		    TAILQ_PREV(td, threadqueue, td_runq)) {
 			kg->kg_last_assigned = td;
 		}
 		runq_add(&runq, ke);
 	}
+	thread_sanity_check(td);
 }
 
 #else
@@ -530,7 +506,7 @@
 		ke = td2->td_kse;
 KASSERT((ke->ke_kgrlist.tqe_next != ke), ("linked to self!"));
 		kg->kg_last_assigned =
-		    TAILQ_PREV(td2, threadlist_head, td_runq);
+		    TAILQ_PREV(td2, threadqueue, td_runq);
 		td2->td_kse = NULL;
 		td->td_kse = ke;
 		ke->ke_thread = td;
@@ -774,3 +750,91 @@
 		runq_add(rq, ke);
 	}
 }
+
+void
+thread_sanity_check(struct thread *td)
+{
+	struct proc *p;
+	struct ksegrp *kg;
+	struct kse *ke;
+	struct thread *td2;
+	unsigned int prevpri;
+	int	saw_lastassigned;
+	int unassigned;
+	int assigned;
+
+	p = td->td_proc;
+	kg = td->td_ksegrp;
+	ke = td->td_kse;
+
+	if (kg != &p->p_ksegrp) {
+		panic ("wrong ksegrp");
+	}
+
+	if (ke) {
+		if (ke != &p->p_kse) {
+			panic("wrong kse");
+		}
+		if (ke->ke_thread != td) {
+			panic("wrong thread");
+		}
+	}
+	
+	if ((p->p_flag & P_KSES) == 0) {
+		if (ke == NULL) {
+			panic("non KSE thread lost kse");
+		}
+	} else {
+		prevpri = 0;
+		saw_lastassigned = 0;
+		unassigned = 0;
+		assigned = 0;
+		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
+			if (td2->td_priority < prevpri) {
+				panic("thread runqueue unosorted");
+			}
+			prevpri = td2->td_priority;
+			if (td2->td_kse) {
+				assigned++;
+				if (unassigned) {
+					panic("unassigned before assigned");
+				}
+ 				if  (kg->kg_last_assigned == NULL) {
+					panic("lastassigned corrupt");
+				}
+				if (saw_lastassigned) {
+					panic("last assigned not last");
+				}
+				if (td2->td_kse->ke_thread != td2) {
+					panic("mismatched kse/thread");
+				}
+			} else {
+				unassigned++;
+			}
+			if (td2 == kg->kg_last_assigned) {
+				saw_lastassigned = 1;
+				if (td2->td_kse == NULL) {
+					panic("last assigned not assigned");
+				}
+			}
+		}
+		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
+			panic("where on earth does lastassigned point?");
+		}
+		FOREACH_THREAD_IN_GROUP(kg, td2) {
+			if (((td2->td_flags & TDF_UNBOUND) == 0) && 
+			    (td2->td_state == TDS_RUNQ)) {
+				assigned++;
+				if (td2->td_kse == NULL) {
+					panic ("BOUND thread with no KSE");
+				}
+			}
+		}
+#if 0
+		if ((unassigned + assigned) != kg->kg_runnable) {
+			panic("wrong number in runnable");
+		}
+#endif
+	}
+}
+

==== //depot/projects/kse/sys/kern/kern_synch.c#73 (text+ko) ====

@@ -345,7 +345,11 @@
 					    td->td_state == TDS_RUNQ) {
 						/* this could be optimised */
 						remrunqueue(td);
+						td->td_priority =
+						    kg->kg_user_pri;
 						setrunqueue(td);
+					} else {
+						td->td_priority = kg->kg_user_pri;
 					}
 				}
 			}

==== //depot/projects/kse/sys/kern/kern_thread.c#79 (text+ko) ====

@@ -76,7 +76,7 @@
 
 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
 
-tdlist_head_t zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
+struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
 struct mtx zombie_thread_lock;
 MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
     "zombie_thread_lock", MTX_SPIN);

==== //depot/projects/kse/sys/posix4/ksched.c#10 (text+ko) ====

@@ -185,8 +185,12 @@
 				if (td->td_state == TDS_RUNNING) {
 					td->td_kse->ke_flags |= KEF_NEEDRESCHED;
 				} else if (td->td_state == TDS_RUNQ) {
-					remrunqueue(td);
-					setrunqueue(td);
+					if (td->td_priority > kg->kg_user_pri) {
+						remrunqueue(td);
+						td->td_priority =
+						    kg->kg_user_pri;
+						setrunqueue(td);
+					}
 				}
 			}
 			mtx_unlock_spin(&sched_lock);
@@ -214,9 +218,14 @@
 				if (td->td_state == TDS_RUNNING) {
 					td->td_kse->ke_flags |= KEF_NEEDRESCHED;
 				} else if (td->td_state == TDS_RUNQ) {
-					remrunqueue(td);
-					setrunqueue(td);
+					if (td->td_priority > kg->kg_user_pri) {
+						remrunqueue(td);
+						td->td_priority =
+						    kg->kg_user_pri;
+						setrunqueue(td);
+					}
 				}
+				
 			}
 			mtx_unlock_spin(&sched_lock);
 		}

==== //depot/projects/kse/sys/sys/proc.h#115 (text+ko) ====

@@ -186,7 +186,6 @@
  * cache of free threads.
  */
 struct thread;
-typedef	TAILQ_HEAD(threadlist_head, thread) tdlist_head_t;
 
 /* 
  * The second structure is the Kernel Schedulable Entity. (KSE)
@@ -857,6 +856,7 @@
 int	thread_suspend_check(int how);
 void	thread_unsuspend(struct proc *p);
 
+void	thread_sanity_check(struct thread *td);
 #endif	/* _KERNEL */
 
 #endif	/* !_SYS_PROC_H_ */

==== //depot/projects/kse/sys/vm/vm_glue.c#33 (text+ko) ====


To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe p4-projects" in the body of the message




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200206250731.g5P7V9N27321>