Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 4 Nov 2012 18:28:54 +0000 (UTC)
From:      Alexander Motin <mav@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r242578 - in projects/calloutng/sys: kern sys
Message-ID:  <201211041828.qA4ISs3J045427@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: mav
Date: Sun Nov  4 18:28:54 2012
New Revision: 242578
URL: http://svn.freebsd.org/changeset/base/242578

Log:
  Improve coalescing of the hardclock/statclock/etc events and callout events
  by specifying time where callout_process() may optionally be called, i.e.
  there are some events that could be handled at that time if CPU is active,
  but that also could be handled later if CPU is sleeping now. This reduces
  rate of interrupts on completely idle CPU from 6-8Hz down to 4Hz as it is
  in HEAD where hardclock() directly calls callout_ticka()
  
  Reduce forced minimal interrupt rate on idle from 4Hz to 2Hz.

Modified:
  projects/calloutng/sys/kern/kern_clocksource.c
  projects/calloutng/sys/kern/kern_timeout.c
  projects/calloutng/sys/sys/callout.h

Modified: projects/calloutng/sys/kern/kern_clocksource.c
==============================================================================
--- projects/calloutng/sys/kern/kern_clocksource.c	Sun Nov  4 15:15:47 2012	(r242577)
+++ projects/calloutng/sys/kern/kern_clocksource.c	Sun Nov  4 18:28:54 2012	(r242578)
@@ -72,7 +72,8 @@ static int		round_freq(struct eventtimer
 static void		getnextcpuevent(struct bintime *event, int idle);
 static void		getnextevent(struct bintime *event);
 static int		handleevents(struct bintime *now, int fake);
-static void		cpu_new_callout(int cpu, struct bintime bt);
+static void		cpu_new_callout(int cpu, struct bintime bt,
+			    struct bintime bt_opt);
 
 static struct mtx	et_hw_mtx;
 
@@ -135,6 +136,7 @@ struct pcpu_state {
 	struct bintime	nextstat;	/* Next statclock() event. */
 	struct bintime	nextprof;	/* Next profclock() event. */
 	struct bintime	nextcall;	/* Next callout event. */
+	struct bintime  nextcallopt;	/* Next optional callout event. */
 #ifdef KDTRACE_HOOKS
 	struct bintime	nextcyc;	/* Next OpenSolaris cyclics event. */
 #endif
@@ -238,9 +240,10 @@ handleevents(struct bintime *now, int fa
 		}
 	} else
 		state->nextprof = state->nextstat;
-	if (bintime_cmp(now, &state->nextcall, >=) &&
-		(state->nextcall.sec != -1)) {
+	if (bintime_cmp(now, &state->nextcallopt, >=) &&
+		(state->nextcallopt.sec != -1)) {
 		state->nextcall.sec = -1;
+		state->nextcallopt.sec = -1;
 		callout_process(now);
 	}
 
@@ -283,7 +286,7 @@ getnextcpuevent(struct bintime *event, i
 	*event = state->nexthard;
 	if (idle || (!activetick && !profiling &&
 	    (timer->et_flags & ET_FLAGS_PERCPU) == 0)) {
-		hardfreq = idle ? 4 : (stathz / 2);
+		hardfreq = idle ? 2 : (stathz / 2);
 		if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > hardfreq)
 			hardfreq = tc_min_ticktock_freq;
 		if (hz > hardfreq) {
@@ -637,6 +640,7 @@ cpu_initclocks_bsp(void)
 		state->nextcyc.sec = -1;
 #endif
 		state->nextcall.sec = -1;
+		state->nextcallopt.sec = -1;
 	}
 	callout_new_inserted = cpu_new_callout;
 	periodic = want_periodic;
@@ -863,7 +867,7 @@ clocksource_cyc_set(const struct bintime
 #endif
 
 static void
-cpu_new_callout(int cpu, struct bintime bt)
+cpu_new_callout(int cpu, struct bintime bt, struct bintime bt_opt)
 {
 	struct bintime now;
 	struct pcpu_state *state;
@@ -881,6 +885,7 @@ cpu_new_callout(int cpu, struct bintime 
 	 * with respect to race conditions between interrupts execution 
 	 * and scheduling. 
 	 */
+	state->nextcallopt = bt_opt;
 	if (state->nextcall.sec != -1 &&
 	    bintime_cmp(&bt, &state->nextcall, >=)) {
 		ET_HW_UNLOCK(state);

Modified: projects/calloutng/sys/kern/kern_timeout.c
==============================================================================
--- projects/calloutng/sys/kern/kern_timeout.c	Sun Nov  4 15:15:47 2012	(r242577)
+++ projects/calloutng/sys/kern/kern_timeout.c	Sun Nov  4 18:28:54 2012	(r242578)
@@ -182,7 +182,8 @@ struct callout_cpu cc_cpu;
 	(sizeof(time_t) == (sizeof(int64_t)) ? INT64_MAX : INT32_MAX)
 
 static int timeout_cpu;
-void (*callout_new_inserted)(int cpu, struct bintime bt) = NULL;
+void (*callout_new_inserted)(int cpu, struct bintime bt,
+    struct bintime bt_opt) = NULL;
 static struct callout *
 softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
     int *lockcalls, int *gcalls, int direct);
@@ -369,11 +370,14 @@ start_softclock(void *dummy)
 
 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
 
+#define	CC_HASH_SHIFT	10
+
 static inline int
 callout_hash(struct bintime *bt)
 {
 
-	return (int) ((bt->sec<<10)+(bt->frac>>54));
+	return (int) ((bt->sec << CC_HASH_SHIFT) +
+	    (bt->frac >> (64 - CC_HASH_SHIFT)));
 } 
 
 static inline int
@@ -386,7 +390,7 @@ get_bucket(struct bintime *bt)
 void
 callout_process(struct bintime *now)
 {
-	struct bintime max, min, next, tmp_max, tmp_min;
+	struct bintime max, min, next, next_opt, tmp_max, tmp_min;
 	struct callout *tmp;
 	struct callout_cpu *cc;
 	struct callout_tailq *sc;
@@ -443,7 +447,7 @@ callout_process(struct bintime *now)
 		first = (first + 1) & callwheelmask;
 	}
 	cc->cc_exec_next_dir = NULL;
-	future = (last + hz / 4) & callwheelmask;
+	future = (last + (3 << CC_HASH_SHIFT) / 4) & callwheelmask;
 	max.sec = min.sec = TIME_T_MAX;
 	max.frac = min.frac = UINT64_MAX;
 	/*
@@ -486,8 +490,9 @@ callout_process(struct bintime *now)
 		last = (last + 1) & callwheelmask;
 	}
 	if (max.sec == TIME_T_MAX) {
-		next = *now;
-		bintime_addx(&next, (uint64_t)1 << (64 - 2));
+		next = next_opt = *now;
+		bintime_addx(&next, (uint64_t)3 << (64 - 2));
+		bintime_addx(&next_opt, (uint64_t)3 << (64 - 3));
 	} else {
 		/*
 		 * Now that we found something to aggregate, schedule an
@@ -502,9 +507,10 @@ callout_process(struct bintime *now)
 			next.sec >>= 1;
 		} else 
 			next = max;
+		next_opt = min;
 	}
 	if (callout_new_inserted != NULL)
-		(*callout_new_inserted)(cpu, next);
+		(*callout_new_inserted)(cpu, next, next_opt);
 	cc->cc_firstevent = next;
 	cc->cc_lastscan = *now;
 #ifdef CALLOUT_PROFILING
@@ -607,7 +613,9 @@ callout_cc_add(struct callout *c, struct
 	    (bintime_cmp(&bt, &cc->cc_firstevent, <) ||
 	    !bintime_isset(&cc->cc_firstevent))) {
 		cc->cc_firstevent = c->c_time;
-		(*callout_new_inserted)(cpu, c->c_time);
+		bt = c->c_time;
+		bintime_sub(&bt, &c->c_precision);
+		(*callout_new_inserted)(cpu, c->c_time, bt);
 	}
 }
 

Modified: projects/calloutng/sys/sys/callout.h
==============================================================================
--- projects/calloutng/sys/sys/callout.h	Sun Nov  4 15:15:47 2012	(r242577)
+++ projects/calloutng/sys/sys/callout.h	Sun Nov  4 18:28:54 2012	(r242578)
@@ -114,7 +114,8 @@ int	callout_schedule_on(struct callout *
 #define	callout_stop(c)		_callout_stop_safe(c, 0)
 int	_callout_stop_safe(struct callout *, int);
 void	callout_process(struct bintime *);
-extern void (*callout_new_inserted)(int cpu, struct bintime bt);
+extern void (*callout_new_inserted)(int cpu, struct bintime bt,
+    struct bintime);
 
 #endif
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201211041828.qA4ISs3J045427>