Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 25 Jun 2011 10:22:19 +0000
From:      rudot@FreeBSD.org
To:        svn-soc-all@FreeBSD.org
Subject:   socsvn commit: r223695 - soc2011/rudot/kern
Message-ID:  <20110625102219.3F4A1106564A@hub.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: rudot
Date: Sat Jun 25 10:22:18 2011
New Revision: 223695
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=223695

Log:
  time slice remembered from previous run
  idle threads also chosen according to the virtual deadline

Modified:
  soc2011/rudot/kern/sched_fbfs.c

Modified: soc2011/rudot/kern/sched_fbfs.c
==============================================================================
--- soc2011/rudot/kern/sched_fbfs.c	Sat Jun 25 03:43:58 2011	(r223694)
+++ soc2011/rudot/kern/sched_fbfs.c	Sat Jun 25 10:22:18 2011	(r223695)
@@ -68,22 +68,11 @@
 dtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
 #endif
 
-/*
- * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
- * the range 100-256 Hz (approximately).
- */
-#define	ESTCPULIM(e) \
-    min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
-    RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
-#ifdef SMP
-#define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
-#else
-#define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
-#endif
-#define	NICE_WEIGHT		1	/* Priorities per nice level. */
-
 #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
 
+static int realstathz;
+static int sched_slice = 1;
+
 /*
  * The schedulable entity that runs a context.
  * This is  an extension to the thread structure and is tailored to
@@ -95,6 +84,7 @@
 	int		ts_slptime;	/* (j) Seconds !RUNNING. */
 	int		ts_flags;
 	int		ts_vdeadline;	/* virtual deadline. */
+	int		ts_slice;	/* Remaining slice in number of ticks */
 	struct runq	*ts_runq;	/* runq the thread is currently on */
 #ifdef KTR
 	char		ts_name[TS_NAME_LEN];
@@ -118,17 +108,17 @@
 struct mtx sched_lock;
 
 static int	sched_tdcnt;	/* Total runnable threads in the system. */
-static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
-#define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
 
 static void	setup_runqs(void);
 static void	sched_priority(struct thread *td, u_char prio);
 static void	sched_setup(void *dummy);
+static void	sched_initticks(void *dummy);
 
 static struct	thread *edf_choose(struct rqhead * rqh);
 static struct	thread *runq_choose_bfs(struct runq * rq);
 
 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
+SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL);
 
 /*
  * Global run queue.
@@ -146,80 +136,14 @@
 	runq_init(&runq);
 }
 
-static int
-sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
-{
-	int error, new_val;
-
-	new_val = sched_quantum * tick;
-	error = sysctl_handle_int(oidp, &new_val, 0, req);
-        if (error != 0 || req->newptr == NULL)
-		return (error);
-	if (new_val < tick)
-		return (EINVAL);
-	sched_quantum = new_val / tick;
-	hogticks = 2 * sched_quantum;
-	return (0);
-}
-
 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
 
 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
     "Scheduler name");
 
-SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
-    0, sizeof sched_quantum, sysctl_kern_quantum, "I",
-    "Roundrobin scheduling quantum in microseconds");
-
-#ifdef SMP
-/* Enable forwarding of wakeups to all other cpus */
-SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
-
-static int runq_fuzz = 1;
-SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
-
-static int forward_wakeup_enabled = 1;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
-	   &forward_wakeup_enabled, 0,
-	   "Forwarding of wakeup to idle CPUs");
-
-static int forward_wakeups_requested = 0;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
-	   &forward_wakeups_requested, 0,
-	   "Requests for Forwarding of wakeup to idle CPUs");
-
-static int forward_wakeups_delivered = 0;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
-	   &forward_wakeups_delivered, 0,
-	   "Completed Forwarding of wakeup to idle CPUs");
-
-static int forward_wakeup_use_mask = 1;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
-	   &forward_wakeup_use_mask, 0,
-	   "Use the mask of idle cpus");
-
-static int forward_wakeup_use_loop = 0;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
-	   &forward_wakeup_use_loop, 0,
-	   "Use a loop to find idle cpus");
-
-static int forward_wakeup_use_single = 0;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
-	   &forward_wakeup_use_single, 0,
-	   "Only signal one idle cpu");
-
-static int forward_wakeup_use_htt = 0;
-SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
-	   &forward_wakeup_use_htt, 0,
-	   "account for htt");
+SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
+	"Slice size for timeshare threads");
 
-#endif
-#if 0
-static int sched_followon = 0;
-SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
-	   &sched_followon, 0,
-	   "allow threads to share a quantum");
-#endif
 
 static __inline void
 sched_load_add(void)
@@ -247,13 +171,11 @@
 int
 maybe_preempt(struct thread *td)
 {
-#ifdef PREEMPTION
-#endif
 	return (0);
 }
 
 /* I keep it here because the top command wants it. */
-static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
+static fixpt_t  ccpu = 0;
 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
 
 /* ARGSUSED */
@@ -262,6 +184,9 @@
 {
 	int i;
 
+	realstathz = hz;
+	sched_slice = (realstathz/10);
+
 	prio_ratios[0] = 128;
 	for (i = 1; i <= PRIO_MAX - PRIO_MIN; ++i) {
 		prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
@@ -269,14 +194,17 @@
 
 	setup_runqs();
 
-	if (sched_quantum == 0)
-		sched_quantum = SCHED_QUANTUM;
-	hogticks = 2 * sched_quantum;
-
 	/* Account for thread0. */
 	sched_load_add();
 }
 
+static void
+sched_initticks(void *dummy)
+{
+	realstathz = stathz ? stathz : hz;
+	sched_slice = (realstathz/10);  /* ~100ms */
+}
+
 /* External interfaces start here */
 
 /*
@@ -295,6 +223,7 @@
 	thread0.td_sched = &td_sched0;
 	thread0.td_lock = &sched_lock;
 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
+	td_sched0.ts_slice = sched_slice;
 }
 
 int
@@ -306,25 +235,9 @@
 int
 sched_rr_interval(void)
 {
-	if (sched_quantum == 0)
-		sched_quantum = SCHED_QUANTUM;
-	return (sched_quantum);
+	return (hz/(realstathz/sched_slice));
 }
 
-/*
- * We adjust the priority of the current process.  The priority of
- * a process gets worse as it accumulates CPU time.  The cpu usage
- * estimator (td_estcpu) is increased here.  resetpriority() will
- * compute a different priority each time td_estcpu increases by
- * INVERSE_ESTCPU_WEIGHT
- * (until MAXPRI is reached).  The cpu usage estimator ramps up
- * quite quickly when the process is running (linearly), and decays
- * away exponentially, at a rate which is proportionally slower when
- * the system is busy.  The basic principle is that the system will
- * 90% forget that the process used a lot of CPU time in 5 * loadav
- * seconds.  This causes the system to favor processes which haven't
- * run much recently, and to round-robin among other processes.
- */
 void
 sched_clock(struct thread *td)
 {
@@ -334,26 +247,23 @@
 	ts = td->td_sched;
 
 	ts->ts_cpticks++;
-	td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
 
-	/*
-	 * Force a context switch if the current thread has used up a full
-	 * quantum (default quantum is 100ms).
-	 */
-	if (!TD_IS_IDLETHREAD(td) &&
-	    ticks - PCPU_GET(switchticks) >= sched_quantum) {
-		td->td_flags |= TDF_NEEDRESCHED;
-		ts->ts_vdeadline = ticks + sched_quantum *
-		    prio_ratios[td->td_proc->p_nice - PRIO_MIN] / 128;
-		
-		CTR4(KTR_SCHED, "timeslice fill: t: %d, i: %d, r: %d, d: %d",
-		    ticks, td->td_proc->p_nice - PRIO_MIN,
-		    prio_ratios[td->td_proc->p_nice - PRIO_MIN],
-		    ts->ts_vdeadline
-		);
+	if (--ts->ts_slice > 0)
+		return;
+
+	ts->ts_vdeadline = ticks + sched_slice *
+	    prio_ratios[td->td_proc->p_nice - PRIO_MIN] / 128;
+	ts->ts_slice = sched_slice;
+	td->td_flags |= TDF_NEEDRESCHED;
+	
+	CTR4(KTR_SCHED, "timeslice fill: t: %d, i: %d, r: %d, d: %d",
+	    ticks, td->td_proc->p_nice - PRIO_MIN,
+	    prio_ratios[td->td_proc->p_nice - PRIO_MIN],
+	    ts->ts_vdeadline
+	);
 		
-		CTR1(KTR_SCHED, "queue number: %d", td->td_rqindex);
-	}
+	CTR1(KTR_SCHED, "queue number: %d", td->td_rqindex);
+	CTR1(KTR_SCHED, "thread: 0x%x", td);
 }
 
 /*
@@ -377,7 +287,6 @@
 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
 	    "prio:%d", child->td_priority);
 	thread_lock(td);
-	td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
 	thread_unlock(td);
 	mtx_lock_spin(&sched_lock);
 	if ((child->td_proc->p_flag & P_NOLOAD) == 0)
@@ -396,12 +305,14 @@
 {
 	struct td_sched *ts;
 
-	childtd->td_estcpu = td->td_estcpu;
 	childtd->td_lock = &sched_lock;
 	childtd->td_cpuset = cpuset_ref(td->td_cpuset);
 	ts = childtd->td_sched;
 	bzero(ts, sizeof(*ts));
+	td->td_sched->ts_slice /= 2;
 	ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
+	ts->ts_vdeadline = td->td_sched->ts_vdeadline;
+	ts->ts_slice = td->td_sched->ts_slice;
 }
 
 void
@@ -789,7 +700,7 @@
 		if (rqb->rqb_bits[i] == 0)
 			continue;
 		pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
-		if (pri == RQ_TIMESHARE) {
+		if ((pri == RQ_TIMESHARE) || (pri == RQ_IDLE)) {
 			td = edf_choose(&rq->rq_queues[pri]);
 			return (td);
 		}



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20110625102219.3F4A1106564A>