Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 30 Mar 2009 19:20:56 +0000 (UTC)
From:      Jeff Roberson <jeff@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-7@freebsd.org
Subject:   svn commit: r190570 - stable/7/sys/kern
Message-ID:  <200903301920.n2UJKuuB075965@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jeff
Date: Mon Mar 30 19:20:56 2009
New Revision: 190570
URL: http://svn.freebsd.org/changeset/base/190570

Log:
  MFC SVN rev 189787.
   - Fix steal_thresh calculation with odd numbers of cpus and sched_affinity()
     for threads on runqueues.
  Approved by:	re

Modified:
  stable/7/sys/kern/sched_ule.c

Modified: stable/7/sys/kern/sched_ule.c
==============================================================================
--- stable/7/sys/kern/sched_ule.c	Mon Mar 30 18:47:13 2009	(r190569)
+++ stable/7/sys/kern/sched_ule.c	Mon Mar 30 19:20:56 2009	(r190570)
@@ -1395,11 +1395,11 @@ sched_initticks(void *dummy)
 	 */
 	balance_interval = realstathz;
 	/*
-	 * Set steal thresh to log2(mp_ncpu) but no greater than 4.  This
-	 * prevents excess thrashing on large machines and excess idle on
-	 * smaller machines.
+	 * Set steal thresh to roughly log2(mp_ncpu) but no greater than 4. 
+	 * This prevents excess thrashing on large machines and excess idle 
+	 * on smaller machines.
 	 */
-	steal_thresh = min(ffs(mp_ncpus) - 1, 4);
+	steal_thresh = min(fls(mp_ncpus) - 1, 3);
 	affinity = SCHED_AFFINITY_DEFAULT;
 #endif
 }
@@ -2549,6 +2549,11 @@ sched_affinity(struct thread *td)
 	ts = td->td_sched;
 	if (THREAD_CAN_SCHED(td, ts->ts_cpu))
 		return;
+	if (TD_ON_RUNQ(td)) {
+		sched_rem(td);
+		sched_add(td, SRQ_BORING);
+		return;
+	}
 	if (!TD_IS_RUNNING(td))
 		return;
 	td->td_flags |= TDF_NEEDRESCHED;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200903301920.n2UJKuuB075965>