Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 25 May 2011 08:28:22 +0000
From:      rudot@FreeBSD.org
To:        svn-soc-all@FreeBSD.org
Subject:   socsvn commit: r222364 - soc2011/rudot/kern
Message-ID:  <20110525082822.1B89A1065670@hub.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: rudot
Date: Wed May 25 08:28:21 2011
New Revision: 222364
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=222364

Log:
  Single run-queue per system (even if it has more CPUs)

Modified:
  soc2011/rudot/kern/sched_4bsd.c

Modified: soc2011/rudot/kern/sched_4bsd.c
==============================================================================
--- soc2011/rudot/kern/sched_4bsd.c	Wed May 25 07:34:49 2011	(r222363)
+++ soc2011/rudot/kern/sched_4bsd.c	Wed May 25 08:28:21 2011	(r222364)
@@ -129,11 +129,6 @@
 static void	updatepri(struct thread *td);
 static void	resetpriority(struct thread *td);
 static void	resetpriority_thread(struct thread *td);
-#ifdef SMP
-static int	sched_pickcpu(struct thread *td);
-static int	forward_wakeup(int cpunum);
-static void	kick_other_cpu(int pri, int cpuid);
-#endif
 
 static struct kproc_desc sched_kp = {
         "schedcpu",
@@ -149,24 +144,9 @@
  */
 static struct runq runq;
 
-#ifdef SMP
-/*
- * Per-CPU run queues
- */
-static struct runq runq_pcpu[MAXCPU];
-long runq_length[MAXCPU];
-#endif
-
 static void
 setup_runqs(void)
 {
-#ifdef SMP
-	int i;
-
-	for (i = 0; i < MAXCPU; ++i)
-		runq_init(&runq_pcpu[i]);
-#endif
-
 	runq_init(&runq);
 }
 
@@ -652,11 +632,7 @@
 int
 sched_runnable(void)
 {
-#ifdef SMP
-	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
-#else
 	return runq_check(&runq);
-#endif
 }
 
 int
@@ -1060,247 +1036,8 @@
 	sched_add(td, SRQ_BORING);
 }
 
-#ifdef SMP
-static int
-forward_wakeup(int cpunum)
-{
-	struct pcpu *pc;
-	cpumask_t dontuse, id, map, map2, map3, me;
-
-	mtx_assert(&sched_lock, MA_OWNED);
-
-	CTR0(KTR_RUNQ, "forward_wakeup()");
-
-	if ((!forward_wakeup_enabled) ||
-	     (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
-		return (0);
-	if (!smp_started || cold || panicstr)
-		return (0);
-
-	forward_wakeups_requested++;
-
-	/*
-	 * Check the idle mask we received against what we calculated
-	 * before in the old version.
-	 */
-	me = PCPU_GET(cpumask);
-
-	/* Don't bother if we should be doing it ourself. */
-	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
-		return (0);
-
-	dontuse = me | stopped_cpus | hlt_cpus_mask;
-	map3 = 0;
-	if (forward_wakeup_use_loop) {
-		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
-			id = pc->pc_cpumask;
-			if ((id & dontuse) == 0 &&
-			    pc->pc_curthread == pc->pc_idlethread) {
-				map3 |= id;
-			}
-		}
-	}
-
-	if (forward_wakeup_use_mask) {
-		map = 0;
-		map = idle_cpus_mask & ~dontuse;
-
-		/* If they are both on, compare and use loop if different. */
-		if (forward_wakeup_use_loop) {
-			if (map != map3) {
-				printf("map (%02X) != map3 (%02X)\n", map,
-				    map3);
-				map = map3;
-			}
-		}
-	} else {
-		map = map3;
-	}
-
-	/* If we only allow a specific CPU, then mask off all the others. */
-	if (cpunum != NOCPU) {
-		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
-		map &= (1 << cpunum);
-	} else {
-		/* Try choose an idle die. */
-		if (forward_wakeup_use_htt) {
-			map2 =  (map & (map >> 1)) & 0x5555;
-			if (map2) {
-				map = map2;
-			}
-		}
-
-		/* Set only one bit. */
-		if (forward_wakeup_use_single) {
-			map = map & ((~map) + 1);
-		}
-	}
-	if (map) {
-		forward_wakeups_delivered++;
-		ipi_selected(map, IPI_AST);
-		return (1);
-	}
-	if (cpunum == NOCPU)
-		printf("forward_wakeup: Idle processor not found\n");
-	return (0);
-}
-
-static void
-kick_other_cpu(int pri, int cpuid)
-{
-	struct pcpu *pcpu;
-	int cpri;
-
-	pcpu = pcpu_find(cpuid);
-	if (idle_cpus_mask & pcpu->pc_cpumask) {
-		forward_wakeups_delivered++;
-		ipi_cpu(cpuid, IPI_AST);
-		return;
-	}
-
-	cpri = pcpu->pc_curthread->td_priority;
-	if (pri >= cpri)
-		return;
-
-#if defined(IPI_PREEMPTION) && defined(PREEMPTION)
-#if !defined(FULL_PREEMPTION)
-	if (pri <= PRI_MAX_ITHD)
-#endif /* ! FULL_PREEMPTION */
-	{
-		ipi_cpu(cpuid, IPI_PREEMPT);
-		return;
-	}
-#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
-
-	pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
-	ipi_cpu(cpuid, IPI_AST);
-	return;
-}
-#endif /* SMP */
-
-#ifdef SMP
-static int
-sched_pickcpu(struct thread *td)
-{
-	int best, cpu;
-
-	mtx_assert(&sched_lock, MA_OWNED);
-
-	if (THREAD_CAN_SCHED(td, td->td_lastcpu))
-		best = td->td_lastcpu;
-	else
-		best = NOCPU;
-	for (cpu = 0; cpu <= mp_maxid; cpu++) {
-		if (CPU_ABSENT(cpu))
-			continue;
-		if (!THREAD_CAN_SCHED(td, cpu))
-			continue;
-	
-		if (best == NOCPU)
-			best = cpu;
-		else if (runq_length[cpu] < runq_length[best])
-			best = cpu;
-	}
-	KASSERT(best != NOCPU, ("no valid CPUs"));
-
-	return (best);
-}
-#endif
-
 void
 sched_add(struct thread *td, int flags)
-#ifdef SMP
-{
-	struct td_sched *ts;
-	int forwarded = 0;
-	int cpu;
-	int single_cpu = 0;
-
-	ts = td->td_sched;
-	THREAD_LOCK_ASSERT(td, MA_OWNED);
-	KASSERT((td->td_inhibitors == 0),
-	    ("sched_add: trying to run inhibited thread"));
-	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
-	    ("sched_add: bad thread state"));
-	KASSERT(td->td_flags & TDF_INMEM,
-	    ("sched_add: thread swapped out"));
-
-	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
-	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
-	    sched_tdname(curthread));
-	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
-	    KTR_ATTR_LINKED, sched_tdname(td));
-
-
-	/*
-	 * Now that the thread is moving to the run-queue, set the lock
-	 * to the scheduler's lock.
-	 */
-	if (td->td_lock != &sched_lock) {
-		mtx_lock_spin(&sched_lock);
-		thread_lock_set(td, &sched_lock);
-	}
-	TD_SET_RUNQ(td);
-
-	if (td->td_pinned != 0) {
-		cpu = td->td_lastcpu;
-		ts->ts_runq = &runq_pcpu[cpu];
-		single_cpu = 1;
-		CTR3(KTR_RUNQ,
-		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
-		    cpu);
-	} else if (td->td_flags & TDF_BOUND) {
-		/* Find CPU from bound runq. */
-		KASSERT(SKE_RUNQ_PCPU(ts),
-		    ("sched_add: bound td_sched not on cpu runq"));
-		cpu = ts->ts_runq - &runq_pcpu[0];
-		single_cpu = 1;
-		CTR3(KTR_RUNQ,
-		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
-		    cpu);
-	} else if (ts->ts_flags & TSF_AFFINITY) {
-		/* Find a valid CPU for our cpuset */
-		cpu = sched_pickcpu(td);
-		ts->ts_runq = &runq_pcpu[cpu];
-		single_cpu = 1;
-		CTR3(KTR_RUNQ,
-		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
-		    cpu);
-	} else {
-		CTR2(KTR_RUNQ,
-		    "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
-		    td);
-		cpu = NOCPU;
-		ts->ts_runq = &runq;
-	}
-
-	if (single_cpu && (cpu != PCPU_GET(cpuid))) {
-	        kick_other_cpu(td->td_priority, cpu);
-	} else {
-		if (!single_cpu) {
-			cpumask_t me = PCPU_GET(cpumask);
-			cpumask_t idle = idle_cpus_mask & me;
-
-			if (!idle && ((flags & SRQ_INTR) == 0) &&
-			    (idle_cpus_mask & ~(hlt_cpus_mask | me)))
-				forwarded = forward_wakeup(cpu);
-		}
-
-		if (!forwarded) {
-			if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
-				return;
-			else
-				maybe_resched(td);
-		}
-	}
-
-	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
-		sched_load_add();
-	runq_add(ts->ts_runq, td, flags);
-	if (cpu != NOCPU)
-		runq_length[cpu]++;
-}
-#else /* SMP */
 {
 	struct td_sched *ts;
 
@@ -1348,7 +1085,6 @@
 	runq_add(ts->ts_runq, td, flags);
 	maybe_resched(td);
 }
-#endif /* SMP */
 
 void
 sched_rem(struct thread *td)
@@ -1367,10 +1103,6 @@
 
 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
 		sched_load_rem();
-#ifdef SMP
-	if (ts->ts_runq != &runq)
-		runq_length[ts->ts_runq - runq_pcpu]--;
-#endif
 	runq_remove(ts->ts_runq, td);
 	TD_SET_CAN_RUN(td);
 }
@@ -1386,34 +1118,11 @@
 	struct runq *rq;
 
 	mtx_assert(&sched_lock,  MA_OWNED);
-#ifdef SMP
-	struct thread *tdcpu;
 
 	rq = &runq;
-	td = runq_choose_fuzz(&runq, runq_fuzz);
-	tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
-
-	if (td == NULL ||
-	    (tdcpu != NULL &&
-	     tdcpu->td_priority < td->td_priority)) {
-		CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
-		     PCPU_GET(cpuid));
-		td = tdcpu;
-		rq = &runq_pcpu[PCPU_GET(cpuid)];
-	} else {
-		CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
-	}
-
-#else
-	rq = &runq;
 	td = runq_choose(&runq);
-#endif
 
 	if (td) {
-#ifdef SMP
-		if (td == tdcpu)
-			runq_length[PCPU_GET(cpuid)]--;
-#endif
 		runq_remove(rq, td);
 		td->td_flags |= TDF_DIDRUN;
 
@@ -1469,7 +1178,6 @@
 
 	td->td_flags |= TDF_BOUND;
 #ifdef SMP
-	ts->ts_runq = &runq_pcpu[cpu];
 	if (PCPU_GET(cpuid) == cpu)
 		return;
 
@@ -1647,19 +1355,6 @@
 		return;
 
 	switch (td->td_state) {
-	case TDS_RUNQ:
-		/*
-		 * If we are on a per-CPU runqueue that is in the set,
-		 * then nothing needs to be done.
-		 */
-		if (ts->ts_runq != &runq &&
-		    THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
-			return;
-
-		/* Put this thread on a valid per-CPU runqueue. */
-		sched_rem(td);
-		sched_add(td, SRQ_BORING);
-		break;
 	case TDS_RUNNING:
 		/*
 		 * See if our current CPU is in the set.  If not, force a



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20110525082822.1B89A1065670>