Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 6 Jul 2004 06:05:49 GMT
From:      Julian Elischer <julian@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 56588 for review
Message-ID:  <200407060605.i6665nGL007839@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=56588

Change 56588 by julian@julian_jules1 on 2004/07/06 06:05:28

	cleaned up version of sched.4bsd.c that compiles
	but not tested..   thr now is scope_process.

Affected files ...

.. //depot/projects/nsched/sys/kern/kern_kse.c#17 edit
.. //depot/projects/nsched/sys/kern/kern_thr.c#9 edit
.. //depot/projects/nsched/sys/kern/sched_4bsd.c#22 edit
.. //depot/projects/nsched/sys/sys/proc.h#19 edit

Differences ...

==== //depot/projects/nsched/sys/kern/kern_kse.c#17 (text+ko) ====


==== //depot/projects/nsched/sys/kern/kern_thr.c#9 (text+ko) ====

@@ -34,6 +34,7 @@
 #include <sys/proc.h>
 #include <sys/resourcevar.h>
 #include <sys/sched.h>
+#include <sys/smp.h>
 #include <sys/sysent.h>
 #include <sys/systm.h>
 #include <sys/sysproto.h>
@@ -43,6 +44,7 @@
 
 #include <machine/frame.h>
 
+extern int virtual_cpu;
 /*
  * Back end support functions.
  */
@@ -118,6 +120,7 @@
 	ucontext_t ctx;
 	long id;
 	int error;
+	int ncpus;
 
 	if ((error = copyin(uap->ctx, &ctx, sizeof(ctx))))
 		return (error);
@@ -143,7 +146,14 @@
 	td0->td_proc = td->td_proc;
 	PROC_LOCK(td->td_proc);
 	td0->td_sigmask = td->td_sigmask;
-	td->td_proc->p_flag |= P_HADTHREADS;
+	/* First time through? */
+	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
+		ncpus = mp_ncpus;
+		if (virtual_cpu != 0)
+			ncpus = virtual_cpu;
+		sched_set_concurrancy(td->td_ksegrp, ncpus);
+		td->td_proc->p_flag |= P_HADTHREADS;
+	}
 	PROC_UNLOCK(td->td_proc);
 	td0->td_ucred = crhold(td->td_ucred);
 

==== //depot/projects/nsched/sys/kern/sched_4bsd.c#22 (text+ko) ====

@@ -102,7 +102,6 @@
 #define td_rqindex	td_sched->std_rqindex
 #define td_state	td_sched->std_state
 #define td_cpticks	td_sched->std_cpticks
-#define td_runq		td_sched->std_runq
 
 
 /* flags kept in ke_flags */
@@ -120,12 +119,13 @@
 #define	kg_last_assigned 	kg_sched->skg_last_assigned
 #define	kg_runq_threads		kg_sched->skg_runq_threads
 #define	kg_avail_opennings	kg_sched->skg_avail_opennings
+#define	kg_concurrancy		kg_sched->skg_concurrancy
 
 
 /****************************************************************
  * function prototypes 
  */
-static void	recycle_slot(struct ksegrp *kg); /* was kse_reassign */
+static void	slot_fill(struct ksegrp *kg); /* was kse_reassign */
 
 #define KTR_4BSD	0x0
 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
@@ -145,7 +145,7 @@
 #define	NICE_WEIGHT		1	/* Priorities per nice level. */
 
 #define STD_RUNQ_PCPU(std)						\
-    ((std)->std_runq != 0 && (std)->std_runq != &runq)
+    (((std)->std_runq != 0) && ((std)->std_runq != &runq))
 
 /*
  * TD_CAN_MIGRATE macro returns true if the thread can migrate between
@@ -163,14 +163,13 @@
 static void	setup_runqs(void);
 static void	roundrobin(void *arg);
 static void	schedcpu(void);
-static void	schedcpu_thread(void);
+static void	schedcpu_kthread(void);
 static void	sched_setup(void *dummy);
 static void	maybe_resched(struct thread *td);
 static void	updatepri(struct ksegrp *kg);
 static void	resetpriority(struct ksegrp *kg);
 static void	sched_add(struct thread *td);
 static void	sched_rem(struct thread *td);
-static struct td_sched * sched_choose(void);
 static void	adjustrunqueue( struct thread *td, int newpri) ;
 
 
@@ -180,7 +179,7 @@
 
 static struct kproc_desc sched_kp = {
         "schedcpu",
-        schedcpu_thread,
+        schedcpu_kthread,
         NULL
 };
 SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp)
@@ -252,11 +251,11 @@
 	struct	rqhead rq_queues[RQ_NQS];
 };
 
-static void	runq_add(struct runq *, struct thread *);
+static void	runq_add(struct runq *, struct td_sched *);
 static int	runq_check(struct runq *);
 static struct td_sched *runq_choose(struct runq *);
 static void	runq_init(struct runq *);
-static void	runq_remove(struct runq *, struct thread *);
+static void	runq_remove(struct runq *, struct td_sched *);
 
 #endif  /* end of Jake copyright file */
 /*
@@ -441,7 +440,7 @@
  * Recompute process priorities, every hz ticks.
  * MP-safe, called without the Giant mutex.
  * Called from:
- *  schedcpu_thread() which is a kthread that awakens once per second.
+ *  schedcpu_kthread() which is a kthread that awakens once per second.
  */
 /* ARGSUSED */
 static void
@@ -450,7 +449,7 @@
 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
 	struct thread *td;
 	struct proc *p;
-	struct td_sched *ts;
+	struct td_sched *std;
 	struct ksegrp *kg;
 	int awake, realstathz;
 
@@ -468,8 +467,8 @@
 		p->p_swtime++;
 		FOREACH_KSEGRP_IN_PROC(p, kg) { 
 			awake = 0;
-			FOREACH_THREAD(kg, td) {
-				ts = td->td_sched;
+			FOREACH_THREAD_IN_GROUP(kg, td) {
+				std = td->td_sched;
 				/*
 				 * Increment sleep time (if sleeping).  We
 				 * ignore overflow, as above.
@@ -560,7 +559,7 @@
  *  This is a kthread that runs forever..
  */
 static void
-schedcpu_thread(void)
+schedcpu_kthread(void)
 {
 	int nowake;
 
@@ -723,12 +722,13 @@
 /*
  * charge childs scheduling cpu usage to parent.
  *
- * XXXKSE assume only one thread & ksegrp keep estcpu in each ksegrp.
+ * XXXKSE assume only one thread & ksegrp. Keep estcpu in each ksegrp.
  * Charge it to the ksegrp that did the wait since process estcpu is sum of
  * all ksegrps, this is strictly as expected.  Assume that the child process
  * aggregated all the estcpu into the 'built-in' ksegrp.
  * Called from:
  *  exit()
+ * XXXKSE add a sched_wait() to transfer estcpu..?
  */
 void
 sched_exit(struct proc *parent, struct thread *td)
@@ -747,13 +747,12 @@
  * We should give the estcpu to someone, but WHO?
  * Only called on exit of last thread in the ksegrp.
  * Called from:
- *  thread_exit() (for threaded programs only)
+ *  thread_exit() (for threaded programs only when last thread exits)
  */
 void
 sched_exit_ksegrp(struct proc *parent, struct thread *td)
 {
 	/* free any resources we had on this ksegrp */
-	sched_set_concurrancy(td->td_ksegrp, 0);
 }
 
 /*
@@ -765,9 +764,9 @@
 sched_thread_exit(struct thread *td)
 {
 
-	if (td->td_proc->p_flag & P_SA)  {
+	if (td->td_proc->p_flag & P_HADTHREADS)  {
 		td->td_ksegrp->kg_avail_opennings++;
-		recycle_slot(td->td_ksegrp);
+		slot_fill(td->td_ksegrp);
 	}
 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 		sched_tdcnt--;
@@ -785,11 +784,14 @@
 sched_thr_exit(struct thread *td)
 {
 
+	if (td->td_proc->p_flag & P_HADTHREADS)  {
+		td->td_ksegrp->kg_avail_opennings++;
+		slot_fill(td->td_ksegrp);
+	}
 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 		sched_tdcnt--;
 }
 
-
 /*
  * Allocate any resources the scheduler needs to allocate or set up 
  * on a new process at fork() time. Set up our scheduler specifi extensions.
@@ -811,7 +813,7 @@
 
 	newtd->td_ksegrp->kg_concurrancy = 1;
 	/* non threaded process. ignore thread fairness stuff */
-	newtd->td_ksegrp->kg_avail_opennings = 0; 
+	newtd->td_ksegrp->kg_avail_opennings = 1; 
 	/* Our child inherrits our estimated cpu requirement */
 	newtd->td_ksegrp->kg_estcpu = td->td_ksegrp->kg_estcpu;
 }
@@ -935,19 +937,20 @@
 	 */
 	if (td == PCPU_GET(idlethread))
 		TD_SET_CAN_RUN(td);
-	else if (TD_IS_RUNNING(td)) {
-		/* Put us back on the run queue. */
-		if (p->p_flag & P_SA)
-			kg->kg_avail_opennings++;
-		setrunqueue(td);
-	} else if (p->p_flag & P_SA) {
-		/*
-		 * We will not be on the run queue. So we must be
-		 * sleeping or similar. As it's available,
-		 * another thread could use our 'slot'. 
-		 */
-		kg->kg_avail_opennings++;
-		recycle_slot(kg);
+	else {
+		td->td_ksegrp->kg_avail_opennings++;
+		if (TD_IS_RUNNING(td)) {
+			/* Put us back on the run queue. */
+			setrunqueue(td);
+		} else if (p->p_flag & P_HADTHREADS) {
+			/*
+			 * We will not be on the run queue. So we must be
+			 * sleeping or similar. As it's available,
+			 * another thread could use our 'slot'. 
+			 * (don't bother if there are no other threads)
+			 */
+			slot_fill(td->td_ksegrp);
+		}
 	}
 	if (newtd == NULL)
 		newtd = choosethread(flags);
@@ -985,7 +988,7 @@
  * the fact that the thread is becoming runnable,
  * and decide which run queue to use.
  * Called from:
- *  recycle_slot()  (local)
+ *  slot_fill()  (local)
  *  adjustrunqueue()  (local)
  *  setrunqueue()  (local)
  */
@@ -998,8 +1001,8 @@
 	mtx_assert(&sched_lock, MA_OWNED);
 	KASSERT(std->std_state != STDS_ONRUNQ,
 	    ("sched_add: kse %p (%s) already in run queue", std,
-	    std->std_proc->p_comm));
-	KASSERT(std->std_proc->p_sflag & PS_INMEM,
+	    td->td_proc->p_comm));
+	KASSERT(td->td_proc->p_sflag & PS_INMEM,
 	    ("sched_add: process swapped out"));
 
 #ifdef SMP
@@ -1044,7 +1047,7 @@
 	struct td_sched *std;
 
 	std = td->td_sched;
-	KASSERT(std->std_proc->p_sflag & PS_INMEM,
+	KASSERT(td->td_proc->p_sflag & PS_INMEM,
 	    ("sched_rem: process swapped out"));
 	KASSERT((std->std_state == STDS_ONRUNQ),
 	    ("sched_rem: KSE not on run queue"));
@@ -1055,7 +1058,7 @@
 	runq_remove(std->std_runq, std);
 
 	std->std_state = STDS_THREAD;
-	std->std_ksegrp->kg_runq_threads--;
+	td->td_ksegrp->kg_runq_threads--;
 }
 
 /*
@@ -1205,9 +1208,9 @@
 	/*
 	 * and set it up as if running
 	 */
-	std->std_oncpu    = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */
+	td_sched0.std_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */
 	td_sched0.std_state = STDS_THREAD;
-	kg_sched0.concurrancy = 1;
+	kg_sched0.skg_concurrancy = 1;
 	kg_sched0.skg_avail_opennings = 0; /* we are already running */
 
 }
@@ -1221,6 +1224,8 @@
 int
 sched_thr_newthread(struct thread *td, struct thread *newtd, int flags)
 {
+	struct td_sched *newstd;
+
 	newstd = newtd->td_sched;
 	bzero(&newstd->std_startzero,
 	    RANGEOF(struct td_sched, std_startzero, std_endzero));
@@ -1230,7 +1235,7 @@
 
 	thread_link(newtd, td->td_ksegrp);
 
-	std->std_oncpu    = NOCPU;
+	newstd->std_oncpu = NOCPU;
 	newstd->std_state = STDS_THREAD;
 	newstd->std_cpticks = 0;
 
@@ -1300,7 +1305,6 @@
 sched_destroyproc(struct proc *p)
 {
 	struct ksegrp *kg;
-	struct kg_sched *skg;
 	
 	KASSERT((p->p_numthreads == 1), ("Cached proc with > 1 thread "));
 	KASSERT((p->p_numksegrps == 1), ("Cached proc with > 1 ksegrp "));
@@ -1333,16 +1337,17 @@
 	if (concurrancy > skg->skg_concurrancy) {
 		skg->skg_concurrancy = concurrancy;
 		skg->skg_avail_opennings += concurrancy - skg->skg_concurrancy;
-		recycle_slot(kg);
+		slot_fill(kg);
 	} else {
 		/*
 		 * don't remove things from the queue.
 		 * they'll just get used up as they run.
+		 * XXXKSE Make this a true statement..
 		 */
 		skg->skg_concurrancy = concurrancy;
 		skg->skg_avail_opennings += concurrancy - skg->skg_concurrancy;
 		if (skg->skg_avail_opennings < 0)
-			skg->skg_avail_opennings == 0;
+			skg->skg_avail_opennings = 0;
 	}
 	mtx_unlock_spin(&sched_lock);
 }
@@ -1394,7 +1399,7 @@
 system run queue as those that were are activly running, or because there
 are no threads queued, that pointer is NULL.
 
-/*** warning.. nmo longer true with multiple run queues ***/
+*** warning.. no longer true with multiple run queues ***
 When a thread is removed from the run queue to become run, we know
 it was the highest priority thread in the queue (at the head
 of the queue). If it is also the last assigned we know M was 1 and must
@@ -1508,14 +1513,14 @@
 #endif
 
 	if (std != NULL) {
-		KASSERT(std->std_proc->p_sflag & PS_INMEM,
-		    ("sched_choose: process swapped out"));
 		runq_remove(rq, std);
 		std->std_state = STDS_THREAD;
 		td = std->std_thread;
-		kg = std->std_ksegrp;
+		KASSERT(td->td_proc->p_sflag & PS_INMEM,
+		    ("choosethread: process swapped out"));
+		kg = td->td_ksegrp;
 		kg->kg_runq_threads--;
-		if (td->td_proc->p_flag & P_SA) {
+		if (td->td_proc->p_flag & P_HADTHREADS) {
 			if (kg->kg_last_assigned == td) {
 				kg->kg_last_assigned = TAILQ_PREV(td,
 				    threadqueue, td_runq);
@@ -1558,12 +1563,12 @@
  *  remrunqueue()  (local) (commented out)
  */
 static void
-recycle_slot(struct ksegrp *kg)
+slot_fill(struct ksegrp *kg)
 {
 	struct thread *td;
 
-	while (kg->kg_avail_opennings) {
-		mtx_assert(&sched_lock, MA_OWNED);
+	mtx_assert(&sched_lock, MA_OWNED);
+	while (kg->kg_avail_opennings > 0) {
 		/*
 		 * Find the first unassigned thread
 		 */
@@ -1579,21 +1584,20 @@
 			kg->kg_last_assigned = td;
 			kg->kg_avail_opennings--;
 			sched_add(td);
-			CTR2(KTR_RUNQ, "recycle_slot: std%p -> td%p", std, td);
+			CTR2(KTR_RUNQ, "slot_fill: std%p -> td%p", std, td);
 		} else {
 			/* no threads to use up the slots. quit now */
-			break
+			break;
 		}
 	}
-	return;
 }
 
 #if 0
 /*
  * Remove a thread from its KSEGRP's run queue.
- * This in turn may remove it from a KSE if it was already assigned
- * to one, possibly causing a new thread to be assigned to the KSE
- * and the KSE getting a new priority.
+ * This in turn may remove it from the system run queue if it was
+ * on it to one, possibly causing a new thread to be assigned to the slot
+ * with a different priority.
  * Called from:
  *  Not used
  */
@@ -1614,9 +1618,10 @@
 	/*
 	 * If it is not a threaded process, take the shortcut.
 	 */
-	if ((td->td_proc->p_flag & P_SA) == 0) {
+	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
 		/* Bring its kse with it, leave the thread attached */
 		sched_rem(td);
+		kg->kg_avail_opennings++;
 		return;
 	}
    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
@@ -1635,7 +1640,7 @@
 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
 		if (td2 == td) 
 			kg->kg_last_assigned = td3;
-		recycle_slot(kg);
+		slot_fill(kg);
 	}
 }
 #endif
@@ -1659,8 +1664,7 @@
 	/*
 	 * If it is not a threaded process, take the shortcut.
 	 */
-	if ((td->td_proc->p_flag & P_SA) == 0) {
-		/* We only care about the kse in the run queue. */
+	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
 		td->td_priority = newpri;
 		if (std->std_rqindex != (newpri / RQ_PPQ)) {
 			sched_rem(td);
@@ -1709,7 +1713,6 @@
 void
 setrunqueue(struct thread *td)
 {
-	struct td_sched *std;
 	struct ksegrp *kg;
 	struct thread *td2;
 	struct thread *tda;
@@ -1721,12 +1724,13 @@
 	TD_SET_RUNQ(td);
 	kg = td->td_ksegrp;
 	kg->kg_runnable++;
-	if ((td->td_proc->p_flag & P_SA) == 0) {
+	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
 		/*
 		 * Common path optimisation: Only one of everything
 		 * and the KSE is always already attached.
 		 * Totally ignore the ksegrp run queue.
 		 */
+		kg->kg_avail_opennings--;
 		sched_add(td);
 		return;
 	}
@@ -1784,7 +1788,6 @@
 			 */
 			td = TAILQ_NEXT(tda, td_runq);
 			tda = kg->kg_last_assigned = td;
-			std->std_thread = td;
 		}
 		kg->kg_avail_opennings--;
 		sched_add(td);
@@ -1792,7 +1795,6 @@
 	}
 }
 
---got to here --
 
 /************************************************************************
  * Critical section marker functions					*
@@ -1927,7 +1929,7 @@
 	runq_setbit(rq, pri);
 	rqh = &rq->rq_queues[pri];
 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
-	    std->std_proc, std->std_thread->td_priority, pri, rqh);
+	    std->std_thread->td_proc, std->std_thread->td_priority, pri, rqh);
 	TAILQ_INSERT_TAIL(rqh, std, std_procq);
 }
 
@@ -1970,7 +1972,8 @@
 		std = TAILQ_FIRST(rqh);
 		KASSERT(std != NULL, ("runq_choose: no proc on busy queue"));
 		CTR3(KTR_RUNQ,
-		    "runq_choose: pri=%d kse=%p rqh=%p", pri, std, rqh);
+		    "runq_choose: pri=%d td=%p rqh=%p",
+		    pri, std->std_thread, rqh);
 		return (std);
 	}
 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
@@ -1989,7 +1992,7 @@
 	struct rqhead *rqh;
 	int pri;
 
-	KASSERT(std->std_proc->p_sflag & PS_INMEM,
+	KASSERT(std->std_thread->td_proc->p_sflag & PS_INMEM,
 		("runq_remove: process swapped out"));
 	pri = std->std_rqindex;
 	rqh = &rq->rq_queues[pri];

==== //depot/projects/nsched/sys/sys/proc.h#19 (text+ko) ====

@@ -568,7 +568,7 @@
 #define	P_CONTINUED	0x10000	/* Proc has continued from a stopped state. */
 #define	P_STOPPED_SIG	0x20000	/* Stopped due to SIGSTOP/SIGTSTP. */
 #define	P_STOPPED_TRACE	0x40000	/* Stopped because of tracing. */
-#define	P_STOPPED_SINGLE	0x80000	/* Only one thread can continue */
+#define	P_STOPPED_SINGLE 0x80000	/* Only one thread can continue */
 					/* (not to user) */
 #define	P_PROTECTED	0x100000 /* Do not kill on memory overcommit. */
 #define	P_SIGEVENT	0x200000 /* Process pending signals changed. */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200407060605.i6665nGL007839>