Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 30 May 2012 15:51:02 +0000 (UTC)
From:      Davide Italiano <davide@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r236315 - in projects/calloutng/sys: kern netinet sys
Message-ID:  <201205301551.q4UFp2Pi060531@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: davide
Date: Wed May 30 15:51:02 2012
New Revision: 236315
URL: http://svn.freebsd.org/changeset/base/236315

Log:
  Commit the work that has been done so far:
  - Switch the way we measure the time in callout(9) from ticks to bintime.
  - Change some fields in the structures callout, callout_cpu and cc_mig_ent.
  - callout_reset_on() now converts the argument to_ticks to some bintime
  value before giving it to callout_cc_add(), which has been modified to
  receive a struct bintime rather than a value in ticks.
  - Add a new per-cpu list to handle pending callout requests rather than
  processing them directly on the callwheel. This simplify the logic of
  the softclock() handler.
  - Refactor the callout_tick() and callout_tickstofirst() code to make them
  aware of the bintime measurement.
  - Workaround tcp_timer_to_xtimer() to survive to the aforementioned switch.
  - Refactor the cpu_new_callout() function in order to update the timer in
  one-shot fashion via loadtimer() function. Until now the timer was
  reprogrammed every tick.
  
  Discussed with:		mav

Modified:
  projects/calloutng/sys/kern/kern_clock.c
  projects/calloutng/sys/kern/kern_clocksource.c
  projects/calloutng/sys/kern/kern_timeout.c
  projects/calloutng/sys/netinet/tcp_timer.c
  projects/calloutng/sys/sys/_callout.h
  projects/calloutng/sys/sys/callout.h

Modified: projects/calloutng/sys/kern/kern_clock.c
==============================================================================
--- projects/calloutng/sys/kern/kern_clock.c	Wed May 30 14:47:51 2012	(r236314)
+++ projects/calloutng/sys/kern/kern_clock.c	Wed May 30 15:51:02 2012	(r236315)
@@ -549,7 +549,6 @@ hardclock_cnt(int cnt, int usermode)
 	if (td->td_intr_frame != NULL)
 		PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
 #endif
-	callout_tick();
 	/* We are in charge to handle this tick duty. */
 	if (newticks > 0) {
 		/* Dangerous and no need to call these things concurrently. */

Modified: projects/calloutng/sys/kern/kern_clocksource.c
==============================================================================
--- projects/calloutng/sys/kern/kern_clocksource.c	Wed May 30 14:47:51 2012	(r236314)
+++ projects/calloutng/sys/kern/kern_clocksource.c	Wed May 30 15:51:02 2012	(r236315)
@@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/sched.h>
 #include <sys/smp.h>
 #include <sys/sysctl.h>
+#include <sys/time.h>
 #include <sys/timeet.h>
 #include <sys/timetc.h>
 
@@ -72,7 +73,7 @@ static void		getnextcpuevent(struct bint
 static void		getnextevent(struct bintime *event);
 static int		handleevents(struct bintime *now, int fake);
 #ifdef SMP
-static void		cpu_new_callout(int cpu, int ticks);
+static void		cpu_new_callout(int cpu, struct bintime bt);
 #endif
 
 static struct mtx	et_hw_mtx;
@@ -135,6 +136,7 @@ struct pcpu_state {
 	struct bintime	nexthard;	/* Next hardlock() event. */
 	struct bintime	nextstat;	/* Next statclock() event. */
 	struct bintime	nextprof;	/* Next profclock() event. */
+	struct bintime	nextcall;	/* Next callout event. */
 #ifdef KDTRACE_HOOKS
 	struct bintime	nextcyc;	/* Next OpenSolaris cyclics event. */
 #endif
@@ -236,6 +238,11 @@ handleevents(struct bintime *now, int fa
 		}
 	} else
 		state->nextprof = state->nextstat;
+	if (bintime_cmp(now, &state->nextcall, >=) &&
+		(state->nextcall.sec != -1)) {
+		state->nextcall.sec = -1;
+		callout_tick();
+	}
 
 #ifdef KDTRACE_HOOKS
 	if (fake == 0 && cyclic_clock_func != NULL &&
@@ -269,21 +276,21 @@ getnextcpuevent(struct bintime *event, i
 {
 	struct bintime tmp;
 	struct pcpu_state *state;
-	int skip;
 
 	state = DPCPU_PTR(timerstate);
 	/* Handle hardclock() events. */
 	*event = state->nexthard;
-	if (idle || (!activetick && !profiling &&
-	    (timer->et_flags & ET_FLAGS_PERCPU) == 0)) {
-		skip = idle ? 4 : (stathz / 2);
-		if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip)
-			skip = tc_min_ticktock_freq;
-		skip = callout_tickstofirst(hz / skip) - 1;
-		CTR2(KTR_SPARE2, "skip   at %d: %d", curcpu, skip);
-		tmp = hardperiod;
-		bintime_mul(&tmp, skip);
-		bintime_add(event, &tmp);
+	/* Handle callout events. */
+	tmp = callout_tickstofirst();
+	if (state->nextcall.sec == -1)
+		state->nextcall = tmp;
+	if (bintime_cmp(&tmp, &state->nextcall, <) && 	
+	    (tmp.sec != -1)) {
+		state->nextcall = tmp;
+	}	
+	if (bintime_cmp(event, &state->nextcall, >) && 
+	    (state->nextcall.sec != -1)) {
+		*event = state->nextcall;
 	}
 	if (!idle) { /* If CPU is active - handle other types of events. */
 		if (bintime_cmp(event, &state->nextstat, >))
@@ -625,6 +632,7 @@ cpu_initclocks_bsp(void)
 #ifdef KDTRACE_HOOKS
 		state->nextcyc.sec = -1;
 #endif
+		state->nextcall.sec = -1;
 	}
 #ifdef SMP
 	callout_new_inserted = cpu_new_callout;
@@ -858,9 +866,9 @@ clocksource_cyc_set(const struct bintime
 
 #ifdef SMP
 static void
-cpu_new_callout(int cpu, int ticks)
+cpu_new_callout(int cpu, struct bintime bt)
 {
-	struct bintime tmp;
+	struct bintime now;
 	struct pcpu_state *state;
 
 	CTR3(KTR_SPARE2, "new co at %d:    on %d in %d",
@@ -876,17 +884,39 @@ cpu_new_callout(int cpu, int ticks)
 	 * If timer is global - there is chance it is already programmed.
 	 */
 	if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) {
-		tmp = hardperiod;
-		bintime_mul(&tmp, ticks - 1);
-		bintime_add(&tmp, &state->nexthard);
-		if (bintime_cmp(&tmp, &state->nextevent, <))
-			state->nextevent = tmp;
+		/* 
+		 * Update next callout time. We can do this only if 
+		 * this one on which we're running is the target CPU.
+		 */
+		if (!periodic) {
+			if (bintime_cmp(&bt, &state->nextcall, ==)) {
+				ET_HW_UNLOCK(state);
+				return;
+			}
+			if (state->nextcall.sec == -1 ||
+			    bintime_cmp(&bt, &state->nextcall, <))
+				state->nextcall = bt;
+			if (bintime_cmp(&state->nextcall, &state->nextevent, >=)) {
+				ET_HW_UNLOCK(state);
+				return;
+			}	
+			state->nextevent = state->nextcall;
+			if (cpu == curcpu) {
+				loadtimer(&now, 0);
+				ET_HW_UNLOCK(state);
+			}
+			else
+				goto out;
+		}
+		if (bintime_cmp(&state->nexthard, &state->nextevent, <))
+			state->nextevent = state->nexthard;
 		if (periodic ||
 		    bintime_cmp(&state->nextevent, &nexttick, >=)) {
 			ET_HW_UNLOCK(state);
 			return;
 		}
 	}
+out:
 	/*
 	 * Otherwise we have to wake that CPU up, as we can't get present
 	 * bintime to reprogram global timer from here. If timer is per-CPU,

Modified: projects/calloutng/sys/kern/kern_timeout.c
==============================================================================
--- projects/calloutng/sys/kern/kern_timeout.c	Wed May 30 14:47:51 2012	(r236314)
+++ projects/calloutng/sys/kern/kern_timeout.c	Wed May 30 15:51:02 2012	(r236315)
@@ -68,9 +68,6 @@ SDT_PROBE_DEFINE(callout_execute, kernel
 SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
     "struct callout *");
 
-static int avg_depth;
-SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
-    "Average number of items examined per softclock call. Units = 1/1000");
 static int avg_gcalls;
 SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
     "Average number of Giant callouts made per softclock call. Units = 1/1000");
@@ -94,10 +91,10 @@ int callwheelsize, callwheelbits, callwh
  */
 struct cc_mig_ent {
 #ifdef SMP
-	void	(*ce_migration_func)(void *);
-	void	*ce_migration_arg;
-	int	ce_migration_cpu;
-	int	ce_migration_ticks;
+	void			(*ce_migration_func)(void *);
+	void			*ce_migration_arg;
+	int			ce_migration_cpu;
+	struct bintime		ce_migration_time;
 #endif
 };
 	
@@ -127,18 +124,19 @@ struct callout_cpu {
 	struct callout		*cc_next;
 	struct callout		*cc_curr;
 	void			*cc_cookie;
-	int 			cc_ticks;
-	int 			cc_softticks;
+	struct bintime 		cc_ticks;
+	struct bintime 		cc_softticks;
 	int			cc_cancel;
 	int			cc_waiting;
-	int 			cc_firsttick;
+	struct bintime 		cc_firsttick;
+	struct callout_tailq	*cc_localexp;		  
 };
 
 #ifdef SMP
 #define	cc_migration_func	cc_migrating_entity.ce_migration_func
 #define	cc_migration_arg	cc_migrating_entity.ce_migration_arg
 #define	cc_migration_cpu	cc_migrating_entity.ce_migration_cpu
-#define	cc_migration_ticks	cc_migrating_entity.ce_migration_ticks
+#define	cc_migration_time	cc_migrating_entity.ce_migration_time
 
 struct callout_cpu cc_cpu[MAXCPU];
 #define	CPUBLOCK	MAXCPU
@@ -153,8 +151,14 @@ struct callout_cpu cc_cpu;
 #define	CC_UNLOCK(cc)	mtx_unlock_spin(&(cc)->cc_lock)
 #define	CC_LOCK_ASSERT(cc)	mtx_assert(&(cc)->cc_lock, MA_OWNED)
 
+#define FREQ2BT(freq, bt)                                               \
+{                                                                       \
+        (bt)->sec = 0;                                                  \
+        (bt)->frac = ((uint64_t)0x8000000000000000  / (freq)) << 1;     \
+}
+
 static int timeout_cpu;
-void (*callout_new_inserted)(int cpu, int ticks) = NULL;
+void (*callout_new_inserted)(int cpu, struct bintime bt) = NULL;
 
 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
 
@@ -184,7 +188,8 @@ cc_cme_cleanup(struct callout_cpu *cc)
 
 #ifdef SMP
 	cc->cc_migration_cpu = CPUBLOCK;
-	cc->cc_migration_ticks = 0;
+	cc->cc_migration_time.sec = 0;
+	cc->cc_migration_time.frac = 0;
 	cc->cc_migration_func = NULL;
 	cc->cc_migration_arg = NULL;
 #endif
@@ -230,6 +235,8 @@ kern_timeout_callwheel_alloc(caddr_t v)
 	v = (caddr_t)(cc->cc_callout + ncallout);
 	cc->cc_callwheel = (struct callout_tailq *)v;
 	v = (caddr_t)(cc->cc_callwheel + callwheelsize);
+	cc->cc_localexp = (struct callout_tailq *)v;
+	v = (caddr_t)(cc->cc_localexp + 1);
 	return(v);
 }
 
@@ -244,6 +251,7 @@ callout_cpu_init(struct callout_cpu *cc)
 	for (i = 0; i < callwheelsize; i++) {
 		TAILQ_INIT(&cc->cc_callwheel[i]);
 	}
+	TAILQ_INIT(cc->cc_localexp);
 	cc_cme_cleanup(cc);
 	if (cc->cc_callout == NULL)
 		return;
@@ -325,6 +333,8 @@ start_softclock(void *dummy)
 		cc->cc_callwheel = malloc(
 		    sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
 		    M_WAITOK);
+		cc->cc_localexp = malloc(
+		    sizeof(struct callout_tailq), M_CALLOUT, M_WAITOK);
 		callout_cpu_init(cc);
 	}
 #endif
@@ -332,10 +342,23 @@ start_softclock(void *dummy)
 
 SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
 
+static int
+get_bucket(struct bintime *bt) 
+{
+	time_t sec;
+	uint64_t frac;
+	sec = bt->sec;
+	frac = bt->frac;
+	return (int) (((sec<<10)+(frac>>54)) & callwheelmask);
+} 
+
 void
 callout_tick(void)
 {
+	struct callout *tmp;
 	struct callout_cpu *cc;
+	struct callout_tailq *sc;
+	struct bintime now;
 	int need_softclock;
 	int bucket;
 
@@ -346,48 +369,63 @@ callout_tick(void)
 	need_softclock = 0;
 	cc = CC_SELF();
 	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
-	cc->cc_firsttick = cc->cc_ticks = ticks;
-	for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) {
-		bucket = cc->cc_softticks & callwheelmask;
-		if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
-			need_softclock = 1;
-			break;
+	binuptime(&now);
+	for (bucket = 0; bucket < callwheelsize; ++bucket) {
+		sc = &cc->cc_callwheel[bucket];
+		TAILQ_FOREACH(tmp, sc, c_links.tqe) {
+			if (bintime_cmp(&tmp->c_time,&now, <=)) {
+				TAILQ_INSERT_TAIL(cc->cc_localexp,tmp,c_staiter);
+				TAILQ_REMOVE(sc, tmp, c_links.tqe);
+				need_softclock = 1;
+			}	
 		}
 	}
+	cc->cc_softticks = now;
 	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
 	/*
 	 * swi_sched acquires the thread lock, so we don't want to call it
 	 * with cc_lock held; incorrect locking order.
 	 */
-	if (need_softclock)
+	if (need_softclock) {
 		swi_sched(cc->cc_cookie, 0);
+	}
 }
 
-int
-callout_tickstofirst(int limit)
+struct bintime
+callout_tickstofirst(void)
 {
 	struct callout_cpu *cc;
 	struct callout *c;
 	struct callout_tailq *sc;
-	int curticks;
-	int skip = 1;
+	struct bintime tmp;
+	struct bintime now;
+	int bucket;
 
+	tmp.sec = 0;
+	tmp.frac = 0;
 	cc = CC_SELF();
 	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
-	curticks = cc->cc_ticks;
-	while( skip < ncallout && skip < limit ) {
-		sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ];
-		/* search scanning ticks */
-		TAILQ_FOREACH( c, sc, c_links.tqe ){
-			if (c->c_time - curticks <= ncallout)
-				goto out;
+	binuptime(&now);
+	for (bucket = 0; bucket < callwheelsize; ++bucket) {
+		sc = &cc->cc_callwheel[bucket];
+		TAILQ_FOREACH( c, sc, c_links.tqe ) {
+			if (tmp.sec == 0 && tmp.frac == 0) 
+				tmp = c->c_time;
+			if (bintime_cmp(&c->c_time, &now, <)) 
+				tmp = now;
+			if (bintime_cmp(&c->c_time, &tmp, <=)) 
+				tmp = c->c_time;
+			
 		}
-		skip++;
 	}
-out:
-	cc->cc_firsttick = curticks + skip;
+	if (tmp.sec == 0 && tmp.frac == 0) {
+		cc->cc_firsttick.sec = -1;
+		cc->cc_firsttick.frac = -1;
+	}
+	else
+		cc->cc_firsttick = tmp;
 	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
-	return (skip);
+	return (cc->cc_firsttick);
 }
 
 static struct callout_cpu *
@@ -415,26 +453,35 @@ callout_lock(struct callout *c)
 }
 
 static void
-callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks,
-    void (*func)(void *), void *arg, int cpu)
+callout_cc_add(struct callout *c, struct callout_cpu *cc, 
+    struct bintime to_bintime, void (*func)(void *), void *arg, int cpu)
 {
-
+	int bucket;	
+	struct bintime now;
+	struct bintime tmp;
+	
+	tmp.sec = 1;
+	tmp.frac = 0;
 	CC_LOCK_ASSERT(cc);
-
-	if (to_ticks <= 0)
-		to_ticks = 1;
+	binuptime(&now);
+	if (bintime_cmp(&to_bintime, &now, <)) {
+		bintime_add(&now, &tmp);
+		to_bintime = now;
+	}
 	c->c_arg = arg;
 	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
 	c->c_func = func;
-	c->c_time = ticks + to_ticks;
-	TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 
+	c->c_time = to_bintime; 
+	bucket = get_bucket(&c->c_time);	
+	TAILQ_INSERT_TAIL(&cc->cc_callwheel[bucket & callwheelmask], 
 	    c, c_links.tqe);
-	if ((c->c_time - cc->cc_firsttick) < 0 &&
-	    callout_new_inserted != NULL) {
-		cc->cc_firsttick = c->c_time;
-		(*callout_new_inserted)(cpu,
-		    to_ticks + (ticks - cc->cc_ticks));
-	}
+	/*
+	 * Inform the eventtimers(4) subsystem there's a new callout 
+	 * that has been inserted.
+	 */
+	if (callout_new_inserted != NULL)
+	(*callout_new_inserted)(cpu,
+	    to_bintime);
 }
 
 static void
@@ -442,7 +489,7 @@ callout_cc_del(struct callout *c, struct
 {
 
 	if (cc->cc_next == c)
-		cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
+		cc->cc_next = TAILQ_NEXT(c, c_staiter);
 	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
 		c->c_func = NULL;
 		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
@@ -462,7 +509,8 @@ softclock_call_cc(struct callout *c, str
 	struct callout_cpu *new_cc;
 	void (*new_func)(void *);
 	void *new_arg;
-	int new_cpu, new_ticks;
+	int new_cpu;
+	struct bintime new_time;
 #endif
 #ifdef DIAGNOSTIC
 	struct bintime bt1, bt2;
@@ -471,7 +519,7 @@ softclock_call_cc(struct callout *c, str
 	static timeout_t *lastfunc;
 #endif
 
-	cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
+	cc->cc_next = TAILQ_NEXT(c, c_staiter);
 	class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
 	sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
 	c_lock = c->c_lock;
@@ -574,7 +622,7 @@ skip:
 		 * migration just perform it now.
 		 */
 		new_cpu = cc->cc_migration_cpu;
-		new_ticks = cc->cc_migration_ticks;
+		new_time = cc->cc_migration_time;
 		new_func = cc->cc_migration_func;
 		new_arg = cc->cc_migration_arg;
 		cc_cme_cleanup(cc);
@@ -598,7 +646,7 @@ skip:
 		 * is not easy.
 		 */
 		new_cc = callout_cpu_switch(c, cc, new_cpu);
-		callout_cc_add(c, new_cc, new_ticks, new_func, new_arg,
+		callout_cc_add(c, new_cc, new_time, new_func, new_arg,
 		    new_cpu);
 		CC_UNLOCK(new_cc);
 		CC_LOCK(cc);
@@ -633,10 +681,7 @@ softclock(void *arg)
 {
 	struct callout_cpu *cc;
 	struct callout *c;
-	struct callout_tailq *bucket;
-	int curticks;
 	int steps;	/* #steps since we last allowed interrupts */
-	int depth;
 	int mpcalls;
 	int lockcalls;
 	int gcalls;
@@ -644,46 +689,34 @@ softclock(void *arg)
 #ifndef MAX_SOFTCLOCK_STEPS
 #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
 #endif /* MAX_SOFTCLOCK_STEPS */
-
+	
 	mpcalls = 0;
 	lockcalls = 0;
 	gcalls = 0;
-	depth = 0;
 	steps = 0;
 	cc = (struct callout_cpu *)arg;
 	CC_LOCK(cc);
-	while (cc->cc_softticks - 1 != cc->cc_ticks) {
-		/*
-		 * cc_softticks may be modified by hard clock, so cache
-		 * it while we work on a given bucket.
-		 */
-		curticks = cc->cc_softticks;
-		cc->cc_softticks++;
-		bucket = &cc->cc_callwheel[curticks & callwheelmask];
-		c = TAILQ_FIRST(bucket);
-		while (c != NULL) {
-			depth++;
-			if (c->c_time != curticks) {
-				c = TAILQ_NEXT(c, c_links.tqe);
-				++steps;
-				if (steps >= MAX_SOFTCLOCK_STEPS) {
-					cc->cc_next = c;
-					/* Give interrupts a chance. */
-					CC_UNLOCK(cc);
-					;	/* nothing */
-					CC_LOCK(cc);
-					c = cc->cc_next;
-					steps = 0;
-				}
-			} else {
-				TAILQ_REMOVE(bucket, c, c_links.tqe);
-				c = softclock_call_cc(c, cc, &mpcalls,
-				    &lockcalls, &gcalls);
-				steps = 0;
-			}
+
+	c = TAILQ_FIRST(cc->cc_localexp);
+	while (c != NULL) {
+		++steps;
+		if (steps >= MAX_SOFTCLOCK_STEPS) {
+			cc->cc_next = c;
+			/* Give interrupts a chance. */
+			CC_UNLOCK(cc);
+			;	/* nothing */
+			CC_LOCK(cc);
+			c = cc->cc_next;
+			steps = 0;
 		}
+		else {
+			TAILQ_REMOVE(cc->cc_localexp, c, c_staiter);	
+			c = softclock_call_cc(c, cc, &mpcalls,
+			    &lockcalls, &gcalls);
+			steps = 0;
+		}	
 	}
-	avg_depth += (depth * 1000 - avg_depth) >> 8;
+
 	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
 	avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
 	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
@@ -781,8 +814,19 @@ callout_reset_on(struct callout *c, int 
     void *arg, int cpu)
 {
 	struct callout_cpu *cc;
+	struct bintime bt;
+	struct bintime now;
 	int cancelled = 0;
+	int bucket; 
+	
+	/*
+	 * Convert ticks to struct bintime.
+	 */
 
+	FREQ2BT(hz,&bt);
+	binuptime(&now);
+	bintime_mul(&bt,to_ticks);
+	bintime_add(&bt,&now);
 	/*
 	 * Don't allow migration of pre-allocated callouts lest they
 	 * become unbalanced.
@@ -814,7 +858,8 @@ callout_reset_on(struct callout *c, int 
 		if (cc->cc_next == c) {
 			cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
 		}
-		TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
+		bucket = get_bucket(&c->c_time);	
+		TAILQ_REMOVE(&cc->cc_callwheel[bucket], c,
 		    c_links.tqe);
 
 		cancelled = 1;
@@ -830,13 +875,13 @@ callout_reset_on(struct callout *c, int 
 	if (c->c_cpu != cpu) {
 		if (cc->cc_curr == c) {
 			cc->cc_migration_cpu = cpu;
-			cc->cc_migration_ticks = to_ticks;
+			cc->cc_migration_time = bt;
 			cc->cc_migration_func = ftn;
 			cc->cc_migration_arg = arg;
 			c->c_flags |= CALLOUT_DFRMIGRATION;
-			CTR5(KTR_CALLOUT,
-		    "migration of %p func %p arg %p in %d to %u deferred",
-			    c, c->c_func, c->c_arg, to_ticks, cpu);
+			CTR6(KTR_CALLOUT,
+		    "migration of %p func %p arg %p in %ld %ld to %u deferred",
+			    c, c->c_func, c->c_arg, bt.sec, bt.frac, cpu);
 			CC_UNLOCK(cc);
 			return (cancelled);
 		}
@@ -844,9 +889,9 @@ callout_reset_on(struct callout *c, int 
 	}
 #endif
 
-	callout_cc_add(c, cc, to_ticks, ftn, arg, cpu);
-	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
-	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
+	callout_cc_add(c, cc, bt, ftn, arg, cpu);
+	CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %ld %ld",
+	    cancelled ? "re" : "", c, c->c_func, c->c_arg, bt.sec, bt.frac);
 	CC_UNLOCK(cc);
 
 	return (cancelled);
@@ -874,7 +919,7 @@ _callout_stop_safe(c, safe)
 {
 	struct callout_cpu *cc, *old_cc;
 	struct lock_class *class;
-	int use_lock, sq_locked;
+	int use_lock, sq_locked, bucket;
 
 	/*
 	 * Some old subsystems don't hold Giant while running a callout_stop(),
@@ -1024,7 +1069,8 @@ again:
 
 	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
 	    c, c->c_func, c->c_arg);
-	TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
+	bucket = get_bucket(&c->c_time);
+	TAILQ_REMOVE(&cc->cc_callwheel[bucket], c,
 	    c_links.tqe);
 	callout_cc_del(c, cc);
 

Modified: projects/calloutng/sys/netinet/tcp_timer.c
==============================================================================
--- projects/calloutng/sys/netinet/tcp_timer.c	Wed May 30 14:47:51 2012	(r236314)
+++ projects/calloutng/sys/netinet/tcp_timer.c	Wed May 30 15:51:02 2012	(r236315)
@@ -662,21 +662,54 @@ tcp_timer_active(struct tcpcb *tp, int t
 
 #define	ticks_to_msecs(t)	(1000*(t) / hz)
 
+#define bintime_to_msecs(bt)						\
+	(((uint64_t)1000 *						\
+	(uint32_t)  (bt.frac >> 32)) >> 32) + (bt.sec * 1000); 
+
 void
 tcp_timer_to_xtimer(struct tcpcb *tp, struct tcp_timer *timer, struct xtcp_timer *xtimer)
 {
+	struct bintime now;
+	struct bintime tmp;
+	
 	bzero(xtimer, sizeof(struct xtcp_timer));
 	if (timer == NULL)
 		return;
-	if (callout_active(&timer->tt_delack))
-		xtimer->tt_delack = ticks_to_msecs(timer->tt_delack.c_time - ticks);
-	if (callout_active(&timer->tt_rexmt))
-		xtimer->tt_rexmt = ticks_to_msecs(timer->tt_rexmt.c_time - ticks);
-	if (callout_active(&timer->tt_persist))
-		xtimer->tt_persist = ticks_to_msecs(timer->tt_persist.c_time - ticks);
-	if (callout_active(&timer->tt_keep))
-		xtimer->tt_keep = ticks_to_msecs(timer->tt_keep.c_time - ticks);
-	if (callout_active(&timer->tt_2msl))
-		xtimer->tt_2msl = ticks_to_msecs(timer->tt_2msl.c_time - ticks);
+	
+	if (callout_active(&timer->tt_delack)) {
+		binuptime(&now);
+		tmp = timer->tt_delack.c_time;		
+		bintime_sub(&tmp,&now);
+		xtimer->tt_delack = bintime_to_msecs(tmp);
+	}
+	
+	if (callout_active(&timer->tt_rexmt)) {
+		binuptime(&now);
+		tmp = timer->tt_rexmt.c_time;
+		bintime_sub(&tmp,&now);
+		xtimer->tt_rexmt = bintime_to_msecs(tmp);
+	}
+	
+	if (callout_active(&timer->tt_persist)) {
+		binuptime(&now);
+		tmp = timer->tt_persist.c_time;
+		bintime_sub(&tmp,&now);
+		xtimer->tt_persist = bintime_to_msecs(tmp);
+	}
+	
+	if (callout_active(&timer->tt_keep)) {
+		binuptime(&now);
+		tmp = timer->tt_keep.c_time;
+		bintime_sub(&tmp,&now); 
+		xtimer->tt_keep = bintime_to_msecs(tmp);
+	}
+
+	if (callout_active(&timer->tt_2msl)) {
+		binuptime(&now);
+		tmp = timer->tt_2msl.c_time;
+		bintime_sub(&tmp,&now);
+		xtimer->tt_2msl = bintime_to_msecs(tmp);
+	}
+
 	xtimer->t_rcvtime = ticks_to_msecs(ticks - tp->t_rcvtime);
 }

Modified: projects/calloutng/sys/sys/_callout.h
==============================================================================
--- projects/calloutng/sys/sys/_callout.h	Wed May 30 14:47:51 2012	(r236314)
+++ projects/calloutng/sys/sys/_callout.h	Wed May 30 15:51:02 2012	(r236315)
@@ -39,6 +39,7 @@
 #define	_SYS__CALLOUT_H
 
 #include <sys/queue.h>
+#include <sys/time.h>
 
 struct lock_object;
 
@@ -50,7 +51,8 @@ struct callout {
 		SLIST_ENTRY(callout) sle;
 		TAILQ_ENTRY(callout) tqe;
 	} c_links;
-	int	c_time;				/* ticks to the event */
+	TAILQ_ENTRY(callout) c_staiter;
+	struct bintime c_time;			/* ticks to the event */
 	void	*c_arg;				/* function argument */
 	void	(*c_func)(void *);		/* function to call */
 	struct lock_object *c_lock;		/* lock to handle */

Modified: projects/calloutng/sys/sys/callout.h
==============================================================================
--- projects/calloutng/sys/sys/callout.h	Wed May 30 14:47:51 2012	(r236314)
+++ projects/calloutng/sys/sys/callout.h	Wed May 30 15:51:02 2012	(r236315)
@@ -79,8 +79,8 @@ int	callout_schedule_on(struct callout *
 #define	callout_stop(c)		_callout_stop_safe(c, 0)
 int	_callout_stop_safe(struct callout *, int);
 void	callout_tick(void);
-int	callout_tickstofirst(int limit);
-extern void (*callout_new_inserted)(int cpu, int ticks);
+struct bintime callout_tickstofirst(void);
+extern void (*callout_new_inserted)(int cpu, struct bintime bt);
 
 #endif
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201205301551.q4UFp2Pi060531>