Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 12 Mar 2008 07:29:20 GMT
From:      Peter Wemm <peter@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 137471 for review
Message-ID:  <200803120729.m2C7TK2W012355@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=137471

Change 137471 by peter@peter_overcee on 2008/03/12 07:28:21

	switch runq back to threads. moves ts_procq back to thread.
	This likely won't compile anymore.

Affected files ...

.. //depot/projects/bike_sched/sys/kern/kern_switch.c#10 edit
.. //depot/projects/bike_sched/sys/kern/sched_4bsd.c#16 edit
.. //depot/projects/bike_sched/sys/kern/sched_ule.c#16 edit
.. //depot/projects/bike_sched/sys/sys/proc.h#10 edit
.. //depot/projects/bike_sched/sys/sys/runq.h#3 edit

Differences ...

==== //depot/projects/bike_sched/sys/kern/kern_switch.c#10 (text+ko) ====

@@ -398,38 +398,38 @@
  * corresponding status bit.
  */
 void
-runq_add(struct runq *rq, struct td_sched *ts, int flags)
+runq_add(struct runq *rq, struct thread *td, int flags)
 {
 	struct rqhead *rqh;
 	int pri;
 
-	pri = TS_TO_TD(ts)->td_priority / RQ_PPQ;
-	TS_TO_TD(ts)->td_rqindex = pri;
+	pri = td->td_priority / RQ_PPQ;
+	td->td_rqindex = pri;
 	runq_setbit(rq, pri);
 	rqh = &rq->rq_queues[pri];
-	CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
-	    TS_TO_TD(ts), ts, TS_TO_TD(ts)->td_priority, pri, rqh);
+	CTR5(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
+	    td, td->td_priority, pri, rqh);
 	if (flags & SRQ_PREEMPTED)
-		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
+		TAILQ_INSERT_HEAD(rqh, td, td_procq);
 	else
-		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
+		TAILQ_INSERT_TAIL(rqh, td, td_procq);
 }
 
 void
-runq_add_pri(struct runq *rq, struct td_sched *ts, u_char pri, int flags)
+runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
 {
 	struct rqhead *rqh;
 
 	KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
-	TS_TO_TD(ts)->td_rqindex = pri;
+	td->td_rqindex = pri;
 	runq_setbit(rq, pri);
 	rqh = &rq->rq_queues[pri];
-	CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p",
-	    TS_TO_TD(ts), ts, TS_TO_TD(ts)->td_priority, pri, rqh);
+	CTR5(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
+	    td, td->td_priority, pri, rqh);
 	if (flags & SRQ_PREEMPTED) {
-		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
+		TAILQ_INSERT_HEAD(rqh, td, td_procq);
 	} else {
-		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
+		TAILQ_INSERT_TAIL(rqh, td, td_procq);
 	}
 }
 /*
@@ -463,11 +463,11 @@
 /*
  * Find the highest priority process on the run queue.
  */
-struct td_sched *
+struct thread *
 runq_choose(struct runq *rq)
 {
 	struct rqhead *rqh;
-	struct td_sched *ts;
+	struct thread *td;
 	int pri;
 
 	while ((pri = runq_findbit(rq)) != -1) {
@@ -481,44 +481,44 @@
 			 */
 			int count = runq_fuzz;
 			int cpu = PCPU_GET(cpuid);
-			struct td_sched *ts2;
-			ts2 = ts = TAILQ_FIRST(rqh);
+			struct thread *td2;
+			td2 = td = TAILQ_FIRST(rqh);
 
-			while (count-- && ts2) {
-				if (TS_TO_TD(ts)->td_lastcpu == cpu) {
-					ts = ts2;
+			while (count-- && td2) {
+				if (td->td_lastcpu == cpu) {
+					td = td2;
 					break;
 				}
-				ts2 = TAILQ_NEXT(ts2, ts_procq);
+				td2 = TAILQ_NEXT(td2, td_procq);
 			}
 		} else
 #endif
-			ts = TAILQ_FIRST(rqh);
-		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
+			td = TAILQ_FIRST(rqh);
+		KASSERT(td != NULL, ("runq_choose: no proc on busy queue"));
 		CTR3(KTR_RUNQ,
-		    "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
-		return (ts);
+		    "runq_choose: pri=%d td=%p rqh=%p", pri, td, rqh);
+		return (td);
 	}
 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
 
 	return (NULL);
 }
 
-struct td_sched *
+struct thread *
 runq_choose_from(struct runq *rq, u_char idx)
 {
 	struct rqhead *rqh;
-	struct td_sched *ts;
+	struct thread *td;
 	int pri;
 
 	if ((pri = runq_findbit_from(rq, idx)) != -1) {
 		rqh = &rq->rq_queues[pri];
-		ts = TAILQ_FIRST(rqh);
-		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
+		td = TAILQ_FIRST(rqh);
+		KASSERT(td != NULL, ("runq_choose: no proc on busy queue"));
 		CTR4(KTR_RUNQ,
-		    "runq_choose_from: pri=%d ts=%p idx=%d rqh=%p",
-		    pri, ts, TS_TO_TD(ts)->td_rqindex, rqh);
-		return (ts);
+		    "runq_choose_from: pri=%d td=%p idx=%d rqh=%p",
+		    pri, td, td->td_rqindex, rqh);
+		return (td);
 	}
 	CTR1(KTR_RUNQ, "runq_choose_from: idleproc pri=%d", pri);
 
@@ -530,36 +530,36 @@
  * Caller must set state afterwards.
  */
 void
-runq_remove(struct runq *rq, struct td_sched *ts)
+runq_remove(struct runq *rq, struct thread *td)
 {
 
-	runq_remove_idx(rq, ts, NULL);
+	runq_remove_idx(rq, td, NULL);
 }
 
 void
-runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
+runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
 {
 	struct rqhead *rqh;
 	u_char pri;
 
-	KASSERT(TS_TO_TD(ts)->td_flags & TDF_INMEM,
+	KASSERT(td->td_flags & TDF_INMEM,
 		("runq_remove_idx: thread swapped out"));
-	pri = TS_TO_TD(ts)->td_rqindex;
+	pri = td->td_rqindex;
 	KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
 	rqh = &rq->rq_queues[pri];
-	CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p",
-	    TS_TO_TD(ts), ts, TS_TO_TD(ts)->td_priority, pri, rqh);
+	CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
+	    td, td->td_priority, pri, rqh);
 	{
-		struct td_sched *nts;
+		struct thread *ntd;
 
-		TAILQ_FOREACH(nts, rqh, ts_procq)
-			if (nts == ts)
+		TAILQ_FOREACH(ntd, rqh, td_procq)
+			if (ntd == td)
 				break;
-		if (ts != nts)
-			panic("runq_remove_idx: ts %p not on rqindex %d",
-			    ts, pri);
+		if (td != ntd)
+			panic("runq_remove_idx: td %p not on rqindex %d",
+			    td, pri);
 	}
-	TAILQ_REMOVE(rqh, ts, ts_procq);
+	TAILQ_REMOVE(rqh, td, td_procq);
 	if (TAILQ_EMPTY(rqh)) {
 		CTR0(KTR_RUNQ, "runq_remove_idx: empty");
 		runq_clrbit(rq, pri);

==== //depot/projects/bike_sched/sys/kern/sched_4bsd.c#16 (text+ko) ====

@@ -79,7 +79,6 @@
  * the requirements of this scheduler
  */
 struct td_sched {
-	TAILQ_ENTRY(td_sched) ts_procq;	/* (j/z) Run queue. */
 	fixpt_t		ts_pctcpu;	/* (j) %cpu during p_swtime. */
 	u_char		ts_flags;	/* (t) Flags */
 	int		ts_cpticks;	/* (j) Ticks of cpu time. */

==== //depot/projects/bike_sched/sys/kern/sched_ule.c#16 (text+ko) ====

@@ -82,7 +82,6 @@
  * by the thread lock.
  */
 struct td_sched {	
-	TAILQ_ENTRY(td_sched) ts_procq;	/* Run queue. */
 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
 	short		ts_flags;	/* TSF_* flags. */
 	u_char		ts_cpu;		/* CPU that we have affinity for. */
@@ -301,8 +300,8 @@
 static void tdq_move(struct tdq *, struct tdq *);
 static int tdq_idled(struct tdq *);
 static void tdq_notify(struct td_sched *);
-static struct td_sched *tdq_steal(struct tdq *);
-static struct td_sched *runq_steal(struct runq *);
+static struct thread *tdq_steal(struct tdq *);
+static struct thread *runq_steal(struct runq *);
 static int sched_pickcpu(struct td_sched *, int);
 static void sched_balance(void);
 static void sched_balance_groups(void);
@@ -329,7 +328,6 @@
 runq_print(struct runq *rq)
 {
 	struct rqhead *rqh;
-	struct td_sched *ts;
 	struct thread *td;
 	int pri;
 	int j;
@@ -342,8 +340,7 @@
 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
 				pri = j + (i << RQB_L2BPW);
 				rqh = &rq->rq_queues[pri];
-				TAILQ_FOREACH(ts, rqh, ts_procq) {
-					td = TS_TO_TD(ts);
+				TAILQ_FOREACH(td, rqh, td_procq) {
 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
 					    td, td->td_name, td->td_priority, td->td_rqindex, pri);
 				}
@@ -859,10 +856,10 @@
  * Steals load from a timeshare queue.  Honors the rotating queue head
  * index.
  */
-static struct td_sched *
+static struct thread *
 runq_steal_from(struct runq *rq, u_char start)
 {
-	struct td_sched *ts;
+	struct thread *td;
 	struct rqbits *rqb;
 	struct rqhead *rqh;
 	int first;
@@ -888,9 +885,9 @@
 			pri = RQB_FFS(rqb->rqb_bits[i]);
 		pri += (i << RQB_L2BPW);
 		rqh = &rq->rq_queues[pri];
-		TAILQ_FOREACH(ts, rqh, ts_procq) {
-			if (first && THREAD_CAN_MIGRATE(TS_TO_TD(ts)))
-				return (ts);
+		TAILQ_FOREACH(td, rqh, td_procq) {
+			if (first && THREAD_CAN_MIGRATE(td))
+				return (td);
 			first = 1;
 		}
 	}
@@ -905,12 +902,12 @@
 /*
  * Steals load from a standard linear queue.
  */
-static struct td_sched *
+static struct thread *
 runq_steal(struct runq *rq)
 {
 	struct rqhead *rqh;
 	struct rqbits *rqb;
-	struct td_sched *ts;
+	struct thread *td;
 	int word;
 	int bit;
 
@@ -922,9 +919,9 @@
 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
 				continue;
 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
-			TAILQ_FOREACH(ts, rqh, ts_procq)
-				if (THREAD_CAN_MIGRATE(TS_TO_TD(ts)))
-					return (ts);
+			TAILQ_FOREACH(td, rqh, td_procq)
+				if (THREAD_CAN_MIGRATE(td))
+					return (td);
 		}
 	}
 	return (NULL);
@@ -933,16 +930,16 @@
 /*
  * Attempt to steal a thread in priority order from a thread queue.
  */
-static struct td_sched *
+static struct thread *
 tdq_steal(struct tdq *tdq)
 {
-	struct td_sched *ts;
+	struct td_sched *td;
 
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
-	if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
-		return (ts);
-	if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL)
-		return (ts);
+	if ((td = runq_steal(&tdq->tdq_realtime)) != NULL)
+		return (td);
+	if ((td = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL)
+		return (td);
 	return (runq_steal(&tdq->tdq_idle));
 }
 
@@ -951,15 +948,15 @@
  * current lock and returns with the assigned queue locked.
  */
 static inline struct tdq *
-sched_setcpu(struct td_sched *ts, int cpu, int flags)
+sched_setcpu(struct td_sched *td, int cpu, int flags)
 {
 	struct thread *td;
 	struct tdq *tdq;
 
-	THREAD_LOCK_ASSERT(TS_TO_TD(ts), MA_OWNED);
+	THREAD_LOCK_ASSERT(td, MA_OWNED);
 
 	tdq = TDQ_CPU(cpu);
-	td = TS_TO_TD(ts);
+	ts = TD_TO_TS(td);
 	ts->ts_cpu = cpu;
 
 	/* If the lock matches just return the queue. */

==== //depot/projects/bike_sched/sys/sys/proc.h#10 (text+ko) ====

@@ -190,6 +190,7 @@
 	/* The two queues below should someday be merged. */
 	TAILQ_ENTRY(thread) td_slpq;	/* (t) Sleep queue. */
 	TAILQ_ENTRY(thread) td_lockq;	/* (t) Lock queue. */
+	TAILQ_ENTRY(thread) td_procq;	/* (j/z) Run queue. */
 
 	TAILQ_HEAD(, selinfo) td_selq;	/* (p) List of selinfos. */
 	struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */

==== //depot/projects/bike_sched/sys/sys/runq.h#3 (text+ko) ====

@@ -31,7 +31,7 @@
 
 #include <machine/runq.h>
 
-struct td_sched;
+struct thread;
 
 /*
  * Run queue parameters.
@@ -43,7 +43,7 @@
 /*
  * Head of run queues.
  */
-TAILQ_HEAD(rqhead, td_sched);
+TAILQ_HEAD(rqhead, thread);
 
 /*
  * Bit array which maintains the status of a run queue.  When a queue is
@@ -62,13 +62,13 @@
 	struct	rqhead rq_queues[RQ_NQS];
 };
 
-void	runq_add(struct runq *, struct td_sched *, int);
-void	runq_add_pri(struct runq *, struct td_sched *, u_char, int);
+void	runq_add(struct runq *, struct thread *, int);
+void	runq_add_pri(struct runq *, struct thread *, u_char, int);
 int	runq_check(struct runq *);
-struct	td_sched *runq_choose(struct runq *);
-struct	td_sched *runq_choose_from(struct runq *, u_char);
+struct	thread *runq_choose(struct runq *);
+struct	thread *runq_choose_from(struct runq *, u_char);
 void	runq_init(struct runq *);
-void	runq_remove(struct runq *, struct td_sched *);
-void	runq_remove_idx(struct runq *, struct td_sched *, u_char *);
+void	runq_remove(struct runq *, struct thread *);
+void	runq_remove_idx(struct runq *, struct thread *, u_char *);
 
 #endif



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200803120729.m2C7TK2W012355>