Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 2 Dec 2007 04:17:20 GMT
From:      Peter Wemm <peter@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 129957 for review
Message-ID:  <200712020417.lB24HKB3053895@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=129957

Change 129957 by peter@peter_daintree on 2007/12/02 04:16:30

	Eliminate the cross-pointers between the thread and the scheduler-specific
	data area.  Use pointer arithmetic (since it's essentially free, maybe
	even evaluateable at compile time!) rather than pointer dereferences
	(expensive, always evaluated at runtime) to get from one to the other.
	
	The trick is handling thread0, which is statically allocated rather than
	coming from a uma zone with the adjacent td_sched.  I've hacked it so
	that thread0 is dereferenced via a pointer.  Fortunately, references
	to thread0 are very rare, so the tradeoff should be very positive.

Affected files ...

.. //depot/projects/bike_sched/sys/amd64/amd64/machdep.c#6 edit
.. //depot/projects/bike_sched/sys/arm/at91/kb920x_machdep.c#7 edit
.. //depot/projects/bike_sched/sys/arm/sa11x0/assabet_machdep.c#6 edit
.. //depot/projects/bike_sched/sys/i386/i386/machdep.c#6 edit
.. //depot/projects/bike_sched/sys/ia64/ia64/machdep.c#6 edit
.. //depot/projects/bike_sched/sys/kern/init_main.c#4 edit
.. //depot/projects/bike_sched/sys/kern/kern_switch.c#8 edit
.. //depot/projects/bike_sched/sys/kern/sched_4bsd.c#14 edit
.. //depot/projects/bike_sched/sys/kern/sched_ule.c#14 edit
.. //depot/projects/bike_sched/sys/pc98/pc98/machdep.c#7 edit
.. //depot/projects/bike_sched/sys/powerpc/powerpc/machdep.c#5 edit
.. //depot/projects/bike_sched/sys/sparc64/sparc64/machdep.c#5 edit
.. //depot/projects/bike_sched/sys/sun4v/sun4v/machdep.c#2 edit
.. //depot/projects/bike_sched/sys/sys/proc.h#8 edit
.. //depot/projects/bike_sched/sys/sys/sched.h#7 edit

Differences ...

==== //depot/projects/bike_sched/sys/amd64/amd64/machdep.c#6 (text+ko) ====

@@ -1126,6 +1126,7 @@
 	u_int64_t msr;
 	char *env;
 
+	sched_init_thread0();
 	thread0.td_kstack = physfree + KERNBASE;
 	bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE);
 	physfree += KSTACK_PAGES * PAGE_SIZE;

==== //depot/projects/bike_sched/sys/arm/at91/kb920x_machdep.c#7 (text) ====

@@ -449,6 +449,7 @@
 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
 	undefined_init();
 				
+	sched_init_thread0();
 	proc_linkup0(&proc0, &thread0);
 	thread0.td_kstack = kernelstack.pv_va;
 	thread0.td_pcb = (struct pcb *)

==== //depot/projects/bike_sched/sys/arm/sa11x0/assabet_machdep.c#6 (text+ko) ====

@@ -422,6 +422,7 @@
 
 	/* Set stack for exception handlers */
 	
+	sched_init_thread0();
 	proc_linkup0(&proc0, &thread0);
 	thread0.td_kstack = kernelstack.pv_va;
 	thread0.td_pcb = (struct pcb *)

==== //depot/projects/bike_sched/sys/i386/i386/machdep.c#6 (text+ko) ====

@@ -2077,6 +2077,7 @@
 	int gsel_tss, metadata_missing, x;
 	struct pcpu *pc;
 
+	sched_init_thread0();
 	thread0.td_kstack = proc0kstack;
 	thread0.td_pcb = (struct pcb *)
 	   (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;

==== //depot/projects/bike_sched/sys/ia64/ia64/machdep.c#6 (text+ko) ====

@@ -628,6 +628,7 @@
 	if (boothowto & RB_VERBOSE)
 		bootverbose = 1;
 
+	sched_init_thread0();
 	/*
 	 * Setup the PCPU data for the bootstrap processor. It is needed
 	 * by printf(). Also, since printf() has critical sections, we

==== //depot/projects/bike_sched/sys/kern/init_main.c#4 (text+ko) ====

@@ -94,7 +94,7 @@
 static struct session session0;
 static struct pgrp pgrp0;
 struct	proc proc0;
-struct	thread thread0 __aligned(16);
+struct	thread *thread0p;
 struct	vmspace vmspace0;
 struct	proc *initproc;
 

==== //depot/projects/bike_sched/sys/kern/kern_switch.c#8 (text+ko) ====

@@ -231,8 +231,6 @@
 	 */
 	ctd = curthread;
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
-	  ("thread has no (or wrong) sched-private part."));
 	KASSERT((td->td_inhibitors == 0),
 			("maybe_preempt: trying to run inhibited thread"));
 	pri = td->td_priority;
@@ -405,12 +403,12 @@
 	struct rqhead *rqh;
 	int pri;
 
-	pri = ts->ts_thread->td_priority / RQ_PPQ;
+	pri = TS_TO_TD(ts)->td_priority / RQ_PPQ;
 	ts->ts_rqindex = pri;
 	runq_setbit(rq, pri);
 	rqh = &rq->rq_queues[pri];
 	CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
-	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
+	    TS_TO_TD(ts), ts, TS_TO_TD(ts)->td_priority, pri, rqh);
 	if (flags & SRQ_PREEMPTED)
 		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
 	else
@@ -427,7 +425,7 @@
 	runq_setbit(rq, pri);
 	rqh = &rq->rq_queues[pri];
 	CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p",
-	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
+	    TS_TO_TD(ts), ts, TS_TO_TD(ts)->td_priority, pri, rqh);
 	if (flags & SRQ_PREEMPTED) {
 		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
 	} else {
@@ -487,7 +485,7 @@
 			ts2 = ts = TAILQ_FIRST(rqh);
 
 			while (count-- && ts2) {
-				if (ts->ts_thread->td_lastcpu == cpu) {
+				if (TS_TO_TD(ts)->td_lastcpu == cpu) {
 					ts = ts2;
 					break;
 				}
@@ -544,13 +542,13 @@
 	struct rqhead *rqh;
 	u_char pri;
 
-	KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
+	KASSERT(TS_TO_TD(ts)->td_flags & TDF_INMEM,
 		("runq_remove_idx: thread swapped out"));
 	pri = ts->ts_rqindex;
 	KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
 	rqh = &rq->rq_queues[pri];
 	CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p",
-	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
+	    TS_TO_TD(ts), ts, TS_TO_TD(ts)->td_priority, pri, rqh);
 	{
 		struct td_sched *nts;
 
@@ -586,10 +584,8 @@
 {
 	struct td_sched *ts;
 
-	ts = (struct td_sched *) (td + 1);
+	ts = TD_TO_TS(td);
 	bzero(ts, sizeof(*ts));
-	td->td_sched     = ts;
-	ts->ts_thread	= td;
 }
 
 #endif /* KERN_SWITCH_INCLUDE */

==== //depot/projects/bike_sched/sys/kern/sched_4bsd.c#14 (text+ko) ====

@@ -80,28 +80,30 @@
  */
 struct td_sched {
 	TAILQ_ENTRY(td_sched) ts_procq;	/* (j/z) Run queue. */
-	struct thread	*ts_thread;	/* (*) Active associated thread. */
 	fixpt_t		ts_pctcpu;	/* (j) %cpu during p_swtime. */
 	u_char		ts_rqindex;	/* (j) Run queue index. */
+	u_char		ts_flags;	/* (t) Flags */
 	int		ts_cpticks;	/* (j) Ticks of cpu time. */
 	int		ts_slptime;	/* (j) Seconds !RUNNING. */
 	struct runq	*ts_runq;	/* runq the thread is currently on */
 };
 
-/* flags kept in td_flags */
-#define TDF_DIDRUN	TDF_SCHED0	/* thread actually ran. */
-#define TDF_EXIT	TDF_SCHED1	/* thread is being killed. */
-#define TDF_BOUND	TDF_SCHED2
+#define TSF_DIDRUN	0x01	/* thread actually ran. */
+#define TSF_EXIT	0x02	/* thread is being killed. */
+#define TSF_BOUND	0x04	/* stuck to one CPU */
 
-#define ts_flags	ts_thread->td_flags
-#define TSF_DIDRUN	TDF_DIDRUN /* thread actually ran. */
-#define TSF_EXIT	TDF_EXIT /* thread is being killed. */
-#define TSF_BOUND	TDF_BOUND /* stuck to one CPU */
-
 #define SKE_RUNQ_PCPU(ts)						\
     ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
 
-static struct td_sched td_sched0;
+#define TD_TO_TS(td) ((struct td_sched *)(&(td)[1]))
+#define TS_TO_TD(ts) (&((struct thread *)(ts))[-1])
+
+/* Packed structure to match the layout of the uma thread zone */
+static struct {
+	struct thread	initial_thread;
+	struct td_sched	initial_sched;
+} sched0 __aligned(16);
+
 struct mtx sched_lock;
 
 static int	sched_tdcnt;	/* Total runnable threads in the system. */
@@ -359,7 +361,7 @@
 		FOREACH_THREAD_IN_PROC(p, td) { 
 			awake = 0;
 			thread_lock(td);
-			ts = td->td_sched;
+			ts = TD_TO_TS(td);
 			/*
 			 * Increment sleep time (if sleeping).  We
 			 * ignore overflow, as above.
@@ -465,7 +467,7 @@
 	fixpt_t loadfac;
 	unsigned int newcpu;
 
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	loadfac = loadfactor(averunnable.ldavg[0]);
 	if (ts->ts_slptime > 5 * loadfac)
 		td->td_estcpu = 0;
@@ -544,9 +546,7 @@
 	 * Set up the scheduler specific parts of proc0.
 	 */
 	proc0.p_sched = NULL; /* XXX */
-	thread0.td_sched = &td_sched0;
 	thread0.td_lock = &sched_lock;
-	td_sched0.ts_thread = &thread0;
 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
 }
 
@@ -588,7 +588,7 @@
 	struct td_sched *ts;
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 
 	ts->ts_cpticks++;
 	td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
@@ -672,7 +672,7 @@
 		return;
 	td->td_priority = prio;
 	if (TD_ON_RUNQ(td) && 
-	    td->td_sched->ts_rqindex != (prio / RQ_PPQ)) {
+	    TD_TO_TS(td)->ts_rqindex != (prio / RQ_PPQ)) {
 		sched_rem(td);
 		sched_add(td, SRQ_BORING);
 	}
@@ -790,7 +790,7 @@
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	td->td_slptick = ticks;
-	td->td_sched->ts_slptime = 0;
+	TD_TO_TS(td)->ts_slptime = 0;
 }
 
 void
@@ -799,7 +799,7 @@
 	struct td_sched *ts;
 	struct proc *p;
 
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	p = td->td_proc;
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
@@ -852,7 +852,7 @@
 		 */
 		KASSERT((newtd->td_inhibitors == 0),
 			("trying to run inhibited thread"));
-		newtd->td_sched->ts_flags |= TSF_DIDRUN;
+		TD_TO_TS(newtd)->ts_flags |= TSF_DIDRUN;
         	TD_SET_RUNNING(newtd);
 		if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
 			sched_load_add();
@@ -905,7 +905,7 @@
 	struct td_sched *ts;
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	if (ts->ts_slptime > 1) {
 		updatepri(td);
 		resetpriority(td);
@@ -1047,7 +1047,7 @@
 	int cpu;
 	int single_cpu = 0;
 
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	KASSERT((td->td_inhibitors == 0),
 	    ("sched_add: trying to run inhibited thread"));
@@ -1116,7 +1116,7 @@
 #else /* SMP */
 {
 	struct td_sched *ts;
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	KASSERT((td->td_inhibitors == 0),
 	    ("sched_add: trying to run inhibited thread"));
@@ -1172,7 +1172,7 @@
 {
 	struct td_sched *ts;
 
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	KASSERT(td->td_flags & TDF_INMEM,
 	    ("sched_rem: thread swapped out"));
 	KASSERT(TD_ON_RUNQ(td),
@@ -1208,7 +1208,7 @@
 
 	if (ts == NULL || 
 	    (kecpu != NULL && 
-	     kecpu->ts_thread->td_priority < ts->ts_thread->td_priority)) {
+	     TS_TO_TD(kecpu)->td_priority < TS_TO_TD(ts)->td_priority)) {
 		CTR2(KTR_RUNQ, "choosing td_sched %p from pcpu runq %d", kecpu,
 		     PCPU_GET(cpuid));
 		ts = kecpu;
@@ -1226,9 +1226,9 @@
 		runq_remove(rq, ts);
 		ts->ts_flags |= TSF_DIDRUN;
 
-		KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
+		KASSERT(TS_TO_TD(ts)->td_flags & TDF_INMEM,
 		    ("sched_choose: thread swapped out"));
-		return (ts->ts_thread);
+		return (TS_TO_TD(ts));
 	} 
 	return (PCPU_GET(idlethread));
 }
@@ -1242,7 +1242,7 @@
 	KASSERT(TD_IS_RUNNING(td),
 	    ("sched_bind: cannot bind non-running thread"));
 
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 
 	ts->ts_flags |= TSF_BOUND;
 #ifdef SMP
@@ -1258,14 +1258,14 @@
 sched_unbind(struct thread* td)
 {
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	td->td_sched->ts_flags &= ~TSF_BOUND;
+	TD_TO_TS(td)->ts_flags &= ~TSF_BOUND;
 }
 
 int
 sched_is_bound(struct thread *td)
 {
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	return (td->td_sched->ts_flags & TSF_BOUND);
+	return (TD_TO_TS(td)->ts_flags & TSF_BOUND);
 }
 
 void
@@ -1295,13 +1295,20 @@
 	return (sizeof(struct thread) + sizeof(struct td_sched));
 }
 
+/*
+ * Early boot support.  Make thread0 a viable entity.
+ */
+void
+sched_init_thread0(void)
+{
+	
+	thread0p = &sched0.initial_thread;
+}
+
 fixpt_t
 sched_pctcpu(struct thread *td)
 {
-	struct td_sched *ts;
-
-	ts = td->td_sched;
-	return (ts->ts_pctcpu);
+	return (TD_TO_TS(td)->ts_pctcpu);
 }
 
 void

==== //depot/projects/bike_sched/sys/kern/sched_ule.c#14 (text+ko) ====

@@ -83,7 +83,6 @@
  */
 struct td_sched {	
 	TAILQ_ENTRY(td_sched) ts_procq;	/* Run queue. */
-	struct thread	*ts_thread;	/* Active associated thread. */
 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
 	short		ts_flags;	/* TSF_* flags. */
 	u_char		ts_rqindex;	/* Run queue index. */
@@ -103,7 +102,13 @@
 #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
 #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
 
-static struct td_sched td_sched0;
+#define TD_TO_TS(td) ((struct td_sched *)(&(td)[1]))
+#define TS_TO_TD(ts) (&((struct thread *)(ts))[-1])
+
+static struct {
+	struct thread	initial_thread;
+	struct td_sched	initial_sched;
+} sched0 __aligned(16);
 
 /*
  * Cpu percentage computation macros and defines.
@@ -326,6 +331,7 @@
 {
 	struct rqhead *rqh;
 	struct td_sched *ts;
+	struct thread *td;
 	int pri;
 	int j;
 	int i;
@@ -338,8 +344,9 @@
 				pri = j + (i << RQB_L2BPW);
 				rqh = &rq->rq_queues[pri];
 				TAILQ_FOREACH(ts, rqh, ts_procq) {
+					td = TS_TO_TD(ts);
 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
-					    ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
+					    td, td->td_name, td->td_priority, ts->ts_rqindex, pri);
 				}
 			}
 	}
@@ -384,9 +391,9 @@
 tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
 {
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
-	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
+	THREAD_LOCK_ASSERT(TS_TO_TD(ts), MA_OWNED);
 #ifdef SMP
-	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
+	if (THREAD_CAN_MIGRATE(TS_TO_TD(ts))) {
 		tdq->tdq_transferable++;
 		tdq->tdq_group->tdg_transferable++;
 		ts->ts_flags |= TSF_XFERABLE;
@@ -395,7 +402,7 @@
 	if (ts->ts_runq == &tdq->tdq_timeshare) {
 		u_char pri;
 
-		pri = ts->ts_thread->td_priority;
+		pri = TS_TO_TD(ts)->td_priority;
 		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
 			("Invalid priority %d on timeshare runq", pri));
 		/*
@@ -430,7 +437,7 @@
 {
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
 	KASSERT(ts->ts_runq != NULL,
-	    ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
+	    ("tdq_runq_remove: thread %p null ts_runq", TS_TO_TD(ts)));
 #ifdef SMP
 	if (ts->ts_flags & TSF_XFERABLE) {
 		tdq->tdq_transferable--;
@@ -449,7 +456,7 @@
 		 */
 		ts->ts_ltick = ticks;
 		sched_pctcpu_update(ts);
-		sched_priority(ts->ts_thread);
+		sched_priority(TS_TO_TD(ts));
 	} else
 		runq_remove(ts->ts_runq, ts);
 }
@@ -464,12 +471,12 @@
 	int class;
 
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
-	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
-	class = PRI_BASE(ts->ts_thread->td_pri_class);
+	THREAD_LOCK_ASSERT(TS_TO_TD(ts), MA_OWNED);
+	class = PRI_BASE(TS_TO_TD(ts)->td_pri_class);
 	tdq->tdq_load++;
 	CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
 	if (class != PRI_ITHD &&
-	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
+	    (TS_TO_TD(ts)->td_proc->p_flag & P_NOLOAD) == 0)
 #ifdef SMP
 		tdq->tdq_group->tdg_load++;
 #else
@@ -486,11 +493,11 @@
 {
 	int class;
 
-	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
+	THREAD_LOCK_ASSERT(TS_TO_TD(ts), MA_OWNED);
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
-	class = PRI_BASE(ts->ts_thread->td_pri_class);
+	class = PRI_BASE(TS_TO_TD(ts)->td_pri_class);
 	if (class != PRI_ITHD &&
-	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
+	    (TS_TO_TD(ts)->td_proc->p_flag & P_NOLOAD) == 0)
 #ifdef SMP
 		tdq->tdq_group->tdg_load--;
 #else
@@ -715,7 +722,7 @@
 	}
 	if (tdq == to)
 		return;
-	td = ts->ts_thread;
+	td = TS_TO_TD(ts);
 	/*
 	 * Although the run queue is locked the thread may be blocked.  Lock
 	 * it to clear this and acquire the run-queue lock.
@@ -817,7 +824,7 @@
 	int cpu;
 
 	cpu = ts->ts_cpu;
-	pri = ts->ts_thread->td_priority;
+	pri = TS_TO_TD(ts)->td_priority;
 	pcpu = pcpu_find(cpu);
 	ctd = pcpu->pc_curthread;
 	cpri = ctd->td_priority;
@@ -883,7 +890,7 @@
 		pri += (i << RQB_L2BPW);
 		rqh = &rq->rq_queues[pri];
 		TAILQ_FOREACH(ts, rqh, ts_procq) {
-			if (first && THREAD_CAN_MIGRATE(ts->ts_thread))
+			if (first && THREAD_CAN_MIGRATE(TS_TO_TD(ts)))
 				return (ts);
 			first = 1;
 		}
@@ -917,7 +924,7 @@
 				continue;
 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
 			TAILQ_FOREACH(ts, rqh, ts_procq)
-				if (THREAD_CAN_MIGRATE(ts->ts_thread))
+				if (THREAD_CAN_MIGRATE(TS_TO_TD(ts)))
 					return (ts);
 		}
 	}
@@ -950,10 +957,10 @@
 	struct thread *td;
 	struct tdq *tdq;
 
-	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
+	THREAD_LOCK_ASSERT(TS_TO_TD(ts), MA_OWNED);
 
 	tdq = TDQ_CPU(cpu);
-	td = ts->ts_thread;
+	td = TS_TO_TD(ts);
 	ts->ts_cpu = cpu;
 
 	/* If the lock matches just return the queue. */
@@ -1078,7 +1085,7 @@
 		    curthread->td_priority);
 		return (self);
 	}
-	pri = ts->ts_thread->td_priority;
+	pri = TS_TO_TD(ts)->td_priority;
 	cpu = ts->ts_cpu;
 	/*
 	 * Regardless of affinity, if the last cpu is idle send it there.
@@ -1145,17 +1152,17 @@
 		return (ts);
 	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
 	if (ts != NULL) {
-		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
+		KASSERT(TS_TO_TD(ts)->td_priority >= PRI_MIN_TIMESHARE,
 		    ("tdq_choose: Invalid priority on timeshare queue %d",
-		    ts->ts_thread->td_priority));
+		    TS_TO_TD(ts)->td_priority));
 		return (ts);
 	}
 
 	ts = runq_choose(&tdq->tdq_idle);
 	if (ts != NULL) {
-		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
+		KASSERT(TS_TO_TD(ts)->td_priority >= PRI_MIN_IDLE,
 		    ("tdq_choose: Invalid priority on idle queue %d",
-		    ts->ts_thread->td_priority));
+		    TS_TO_TD(ts)->td_priority));
 		return (ts);
 	}
 
@@ -1330,7 +1337,7 @@
 	/* Add thread0's load since it's running. */
 	TDQ_LOCK(tdq);
 	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
-	tdq_load_add(tdq, &td_sched0);
+	tdq_load_add(tdq, TD_TO_TS(&thread0));
 	TDQ_UNLOCK(tdq);
 }
 
@@ -1388,7 +1395,7 @@
 	struct td_sched *ts;
 	int div;
 
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	/*
 	 * The score is only needed if this is likely to be an interactive
 	 * task.  Don't go through the expense of computing it if there's
@@ -1425,6 +1432,7 @@
 static void
 sched_priority(struct thread *td)
 {
+	struct td_sched *ts;
 	int score;
 	int pri;
 
@@ -1453,15 +1461,15 @@
 		    pri, score));
 	} else {
 		pri = SCHED_PRI_MIN;
-		if (td->td_sched->ts_ticks)
-			pri += SCHED_PRI_TICKS(td->td_sched);
+		ts = TD_TO_TS(td);
+		if (ts->ts_ticks)
+			pri += SCHED_PRI_TICKS(ts);
 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
 		KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
 		    ("sched_priority: invalid priority %d: nice %d, " 
 		    "ticks %d ftick %d ltick %d tick pri %d",
-		    pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
-		    td->td_sched->ts_ftick, td->td_sched->ts_ltick,
-		    SCHED_PRI_TICKS(td->td_sched)));
+		    pri, td->td_proc->p_nice, ts->ts_ticks,
+		    ts->ts_ftick, ts->ts_ltick, SCHED_PRI_TICKS(ts)));
 	}
 	sched_user_prio(td, pri);
 
@@ -1479,7 +1487,7 @@
 	struct td_sched *ts;
 	u_int sum;
 
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	sum = ts->ts_runtime + ts->ts_slptime;
 	if (sum < SCHED_SLP_RUN_MAX)
 		return;
@@ -1521,14 +1529,16 @@
 static void
 sched_interact_fork(struct thread *td)
 {
+	struct td_sched *ts;
 	int ratio;
 	int sum;
 
-	sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
+	ts = TD_TO_TS(td);
+	sum = ts->ts_runtime + ts->ts_slptime;
 	if (sum > SCHED_SLP_RUN_FORK) {
 		ratio = sum / SCHED_SLP_RUN_FORK;
-		td->td_sched->ts_runtime /= ratio;
-		td->td_sched->ts_slptime /= ratio;
+		ts->ts_runtime /= ratio;
+		ts->ts_slptime /= ratio;
 	}
 }
 
@@ -1543,10 +1553,8 @@
 	 * Set up the scheduler specific parts of proc0.
 	 */
 	proc0.p_sched = NULL; /* XXX */
-	thread0.td_sched = &td_sched0;
-	td_sched0.ts_ltick = ticks;
-	td_sched0.ts_ftick = ticks;
-	td_sched0.ts_thread = &thread0;
+	TD_TO_TS(&thread0)->ts_ltick = ticks;
+	TD_TO_TS(&thread0)->ts_ftick = ticks;
 }
 
 /*
@@ -1602,7 +1610,7 @@
 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
 	    td, td->td_name, td->td_priority, prio, curthread,
 	    curthread->td_name);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	if (td->td_priority == prio)
 		return;
@@ -1753,7 +1761,7 @@
 	TDQ_UNLOCK(tdq);
 	thread_lock(td);
 	spinlock_exit();
-	sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
+	sched_setcpu(TD_TO_TS(td), TDQ_ID(tdq), SRQ_YIELDING);
 #else
 	td->td_lock = TDQ_LOCKPTR(tdq);
 #endif
@@ -1770,7 +1778,7 @@
 {
 	struct tdq *tdn;
 
-	tdn = TDQ_CPU(td->td_sched->ts_cpu);
+	tdn = TDQ_CPU(TD_TO_TS(td)->ts_cpu);
 #ifdef SMP
 	/*
 	 * Do the lock dance required to avoid LOR.  We grab an extra
@@ -1781,7 +1789,7 @@
 	thread_block_switch(td);	/* This releases the lock on tdq. */
 	TDQ_LOCK(tdn);
 	tdq_add(tdn, td, flags);
-	tdq_notify(td->td_sched);
+	tdq_notify(TD_TO_TS(td));
 	/*
 	 * After we unlock tdn the new cpu still can't switch into this
 	 * thread until we've unblocked it in cpu_switch().  The lock
@@ -1843,7 +1851,7 @@
 
 	cpuid = PCPU_GET(cpuid);
 	tdq = TDQ_CPU(cpuid);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	mtx = td->td_lock;
 #ifdef SMP
 	ts->ts_rltick = ticks;
@@ -1968,7 +1976,7 @@
 	int slptick;
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	/*
 	 * If we slept for more than a tick update our interactivity and
 	 * priority.
@@ -2005,8 +2013,8 @@
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	sched_newthread(child);
 	child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
-	ts = td->td_sched;
-	ts2 = child->td_sched;
+	ts = TD_TO_TS(td);
+	ts2 = TD_TO_TS(child);
 	ts2->ts_cpu = ts->ts_cpu;
 	ts2->ts_runq = NULL;
 	/*
@@ -2028,7 +2036,7 @@
 	 */
 	sched_interact_fork(child);
 	sched_priority(child);
-	td->td_sched->ts_runtime += tickincr;
+	TD_TO_TS(td)->ts_runtime += tickincr;
 	sched_interact_update(td);
 	sched_priority(td);
 }
@@ -2053,7 +2061,7 @@
 	if (TD_ON_RUNQ(td)) {
 		struct tdq *tdq;
 
-		tdq = TDQ_CPU(td->td_sched->ts_cpu);
+		tdq = TDQ_CPU(TD_TO_TS(td)->ts_cpu);
 		if (THREAD_CAN_MIGRATE(td)) {
 			tdq->tdq_transferable--;
 			tdq->tdq_group->tdg_transferable--;
@@ -2091,7 +2099,7 @@
 	 * launch expensive things to mark their children as expensive.
 	 */
 	thread_lock(td);
-	td->td_sched->ts_runtime += child->td_sched->ts_runtime;
+	TD_TO_TS(td)->ts_runtime += TD_TO_TS(child)->ts_runtime;
 	sched_interact_update(td);
 	sched_priority(td);
 	thread_unlock(td);
@@ -2129,7 +2137,7 @@
 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
 			tdq->tdq_ridx = tdq->tdq_idx;
 	}
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	/*
 	 * We only do slicing code for TIMESHARE threads.
 	 */
@@ -2139,7 +2147,7 @@
 	 * We used a tick; charge it to the thread so that we can compute our
 	 * interactivity.
 	 */
-	td->td_sched->ts_runtime += tickincr;
+	ts->ts_runtime += tickincr;
 	sched_interact_update(td);
 	/*
 	 * We used up one time slice.
@@ -2162,7 +2170,7 @@
 {
 	struct td_sched *ts;
 
-	ts = curthread->td_sched;
+	ts = TD_TO_TS(curthread);
 	/* Adjust ticks for pctcpu */
 	ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
 	ts->ts_ltick = ticks;
@@ -2217,7 +2225,7 @@
 	ts = tdq_choose(tdq);
 	if (ts) {
 		tdq_runq_rem(tdq, ts);
-		return (ts->ts_thread);
+		return (TS_TO_TD(ts));
 	}
 #ifdef SMP
 	/*
@@ -2284,7 +2292,7 @@
 	KASSERT(td->td_flags & TDF_INMEM,
 	    ("sched_add: thread swapped out"));
 
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	class = PRI_BASE(td->td_pri_class);
         TD_SET_RUNQ(td);
 	if (ts->ts_slice == 0)
@@ -2342,7 +2350,7 @@
 	    td, td->td_name, td->td_priority, curthread,
 	    curthread->td_name);
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	/*
 	 * Recalculate the priority before we select the target cpu or
 	 * run-queue.
@@ -2401,7 +2409,7 @@
 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
 	    td, td->td_name, td->td_priority, curthread,
 	    curthread->td_name);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	tdq = TDQ_CPU(ts->ts_cpu);
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
@@ -2422,7 +2430,7 @@
 	struct td_sched *ts;
 
 	pctcpu = 0;
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	if (ts == NULL)
 		return (0);
 
@@ -2449,7 +2457,7 @@
 	struct td_sched *ts;
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	if (ts->ts_flags & TSF_BOUND)
 		sched_unbind(td);
 	ts->ts_flags |= TSF_BOUND;
@@ -2472,7 +2480,7 @@
 	struct td_sched *ts;
 
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	if ((ts->ts_flags & TSF_BOUND) == 0)
 		return;
 	ts->ts_flags &= ~TSF_BOUND;
@@ -2485,7 +2493,7 @@
 sched_is_bound(struct thread *td)
 {
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	return (td->td_sched->ts_flags & TSF_BOUND);
+	return (TD_TO_TS(td)->ts_flags & TSF_BOUND);
 }
 
 /*
@@ -2532,6 +2540,16 @@
 }
 
 /*
+ * Early boot support.  Make thread0 a viable entity.
+ */
+void
+sched_init_thread0(void)
+{
+
+	thread0p = &sched0.initial_thread;
+}
+
+/*
  * The actual idle process.
  */
 void
@@ -2570,7 +2588,7 @@
 		spinlock_exit();
 	} else {
 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
-		tdq_load_rem(tdq, td->td_sched);
+		tdq_load_rem(tdq, TD_TO_TS(td));
 	}
 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
 	newtd = choosethread();
@@ -2597,7 +2615,7 @@
 	 */
 	cpuid = PCPU_GET(cpuid);
 	tdq = TDQ_CPU(cpuid);
-	ts = td->td_sched;
+	ts = TD_TO_TS(td);
 	if (TD_IS_IDLETHREAD(td))
 		td->td_lock = TDQ_LOCKPTR(tdq);
 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
@@ -2605,6 +2623,7 @@
 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
 }
 
+
 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
     "Scheduler");
 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
@@ -2640,6 +2659,5 @@
 static int ccpu = 0;
 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
 
-
 #define KERN_SWITCH_INCLUDE 1
 #include "kern/kern_switch.c"

==== //depot/projects/bike_sched/sys/pc98/pc98/machdep.c#7 (text+ko) ====

@@ -1906,6 +1906,7 @@
 	int gsel_tss, metadata_missing, x;
 	struct pcpu *pc;
 
+	sched_init_thread0();
 	thread0.td_kstack = proc0kstack;
 	thread0.td_pcb = (struct pcb *)
 	   (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;

==== //depot/projects/bike_sched/sys/powerpc/powerpc/machdep.c#5 (text+ko) ====

@@ -292,6 +292,7 @@
 	/*
 	 * Start initializing proc0 and thread0.
 	 */
+	sched_init_thread0();
 	proc_linkup0(&proc0, &thread0);
 	thread0.td_frame = &frame0;
 

==== //depot/projects/bike_sched/sys/sparc64/sparc64/machdep.c#5 (text+ko) ====

@@ -399,6 +399,7 @@
 	/*
 	 * Initialize proc0 stuff (p_contested needs to be done early).
 	 */
+	sched_init_thread0();
 	proc_linkup0(&proc0, &thread0);
 	proc0.p_md.md_sigtramp = NULL;
 	proc0.p_md.md_utrap = NULL;

==== //depot/projects/bike_sched/sys/sun4v/sun4v/machdep.c#2 (text+ko) ====

@@ -363,7 +363,7 @@
 	/*
 	 * Initialize proc0 stuff (p_contested needs to be done early).
 	 */
-
+	sched_init_thread0();
 	proc_linkup0(&proc0, &thread0);
 	proc0.p_md.md_sigtramp = NULL;
 	proc0.p_md.md_utrap = NULL;

==== //depot/projects/bike_sched/sys/sys/proc.h#8 (text+ko) ====

@@ -732,7 +732,8 @@
 extern struct sx proctree_lock;
 extern struct mtx ppeers_lock;
 extern struct proc proc0;		/* Process slot for swapper. */
-extern struct thread thread0;		/* Primary thread in proc0. */
+extern struct thread *thread0p;		/* Primary thread in proc0. */
+#define thread0 (*thread0p)		/* API Compatability */
 extern struct vmspace vmspace0;		/* VM space for proc0. */
 extern int hogticks;			/* Limit on kernel cpu hogs. */
 extern int lastpid;

==== //depot/projects/bike_sched/sys/sys/sched.h#7 (text+ko) ====

@@ -169,10 +169,12 @@
 #define SCHED_STAT_INC(var)
 #endif
 
-/* temporarily here */
+/* (not so) temporarily here */
 void schedinit(void);
 void sched_newproc(struct proc *p, struct thread *td);
 void sched_newthread(struct thread *td);
+void sched_init_thread0(void);
+
 #endif /* _KERNEL */
 
 /* POSIX 1003.1b Process Scheduling */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200712020417.lB24HKB3053895>