Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 27 Nov 2016 20:21:38 +0000 (UTC)
From:      Hans Petter Selasky <hselasky@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r309218 - in projects/hps_head/sys: dev/nand dev/oce dev/twa kern net netgraph netinet netinet6 netpfil/pf sys tests/callout_test
Message-ID:  <201611272021.uARKLclU072590@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: hselasky
Date: Sun Nov 27 20:21:38 2016
New Revision: 309218
URL: https://svnweb.freebsd.org/changeset/base/309218

Log:
  Define the callout return values clear and readable so that simple
  mistakes cannot be made when writing new code. Return a 32-bit union
  instead of "int" to ensure all use of the return values for callout
  functions is properly evaluated. Else a compilation error will result.

Modified:
  projects/hps_head/sys/dev/nand/nandsim_chip.c
  projects/hps_head/sys/dev/oce/oce_if.c
  projects/hps_head/sys/dev/twa/tw_osl_freebsd.c
  projects/hps_head/sys/kern/kern_exit.c
  projects/hps_head/sys/kern/kern_timeout.c
  projects/hps_head/sys/kern/subr_taskqueue.c
  projects/hps_head/sys/net/if_llatbl.c
  projects/hps_head/sys/netgraph/ng_base.c
  projects/hps_head/sys/netinet/if_ether.c
  projects/hps_head/sys/netinet/in.c
  projects/hps_head/sys/netinet/tcp_timer.c
  projects/hps_head/sys/netinet6/in6.c
  projects/hps_head/sys/netinet6/nd6.c
  projects/hps_head/sys/netpfil/pf/if_pfsync.c
  projects/hps_head/sys/sys/callout.h
  projects/hps_head/sys/tests/callout_test/callout_test.c

Modified: projects/hps_head/sys/dev/nand/nandsim_chip.c
==============================================================================
--- projects/hps_head/sys/dev/nand/nandsim_chip.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/dev/nand/nandsim_chip.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -401,8 +401,7 @@ nandsim_delay(struct nandsim_chip *chip,
 
 	chip->sm_state = NANDSIM_STATE_TIMEOUT;
 	tm = (timeout/10000) * (hz / 100);
-	if (callout_reset(&chip->ns_callout, tm, nandsim_callout_eh, ev) &
-	    CALLOUT_RET_CANCELLED) {
+	if (callout_reset(&chip->ns_callout, tm, nandsim_callout_eh, ev).bit.cancelled) {
 		/* XXX we are leaking the old event */
 		return (-1);
 	}

Modified: projects/hps_head/sys/dev/oce/oce_if.c
==============================================================================
--- projects/hps_head/sys/dev/oce/oce_if.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/dev/oce/oce_if.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -365,8 +365,8 @@ oce_attach(device_t dev)
 	oce_add_sysctls(sc);
 
 	callout_init(&sc->timer, CALLOUT_MPSAFE);
-	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
-	if (rc & CALLOUT_RET_CANCELLED)
+	rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc).bit.cancelled;
+	if (rc)
 		goto stats_free;
 
 	sc->next =NULL;

Modified: projects/hps_head/sys/dev/twa/tw_osl_freebsd.c
==============================================================================
--- projects/hps_head/sys/dev/twa/tw_osl_freebsd.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/dev/twa/tw_osl_freebsd.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -476,14 +476,14 @@ twa_watchdog(TW_VOID *arg)
 		device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
 #endif /* TW_OSL_DEBUG */
 		my_watchdog_was_pending =
-			callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
+			callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle).bit.cancelled;
 		tw_cl_reset_ctlr(ctlr_handle);
 #ifdef    TW_OSL_DEBUG
 		device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
 #endif /* TW_OSL_DEBUG */
 	} else if (driver_is_active) {
 		my_watchdog_was_pending =
-			callout_reset(&(sc->watchdog_callout[i]),  5*hz, twa_watchdog, &sc->ctlr_handle);
+			callout_reset(&(sc->watchdog_callout[i]),  5*hz, twa_watchdog, &sc->ctlr_handle).bit.cancelled;
 	}
 #ifdef    TW_OSL_DEBUG
 	if (i_need_a_reset || my_watchdog_was_pending)

Modified: projects/hps_head/sys/kern/kern_exit.c
==============================================================================
--- projects/hps_head/sys/kern/kern_exit.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/kern/kern_exit.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -347,13 +347,13 @@ exit1(struct thread *td, int rval, int s
 	 */
 	if (timevalisset(&p->p_realtimer.it_value)) {
 		timevalclear(&p->p_realtimer.it_interval);
-		drain = callout_stop(&p->p_itcallout);
+		drain = callout_stop(&p->p_itcallout).bit.draining;
 	} else {
 		drain = 0;
 	}
 	PROC_UNLOCK(p);
 
-	if (drain & CALLOUT_RET_DRAINING)
+	if (drain)
 		callout_drain(&p->p_itcallout);
 
 	umtx_thread_exit(td);

Modified: projects/hps_head/sys/kern/kern_timeout.c
==============================================================================
--- projects/hps_head/sys/kern/kern_timeout.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/kern/kern_timeout.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -1013,9 +1013,9 @@ callout_handle_init(struct callout_handl
 
 #ifdef KTR
 static const char *
-callout_retvalstring(int retval)
+callout_retvalstring(callout_ret_t retval)
 {
-	switch (retval) {
+	switch (retval.value) {
 	case CALLOUT_RET_DRAINING:
 		return ("callout cannot be stopped and needs drain");
 	case CALLOUT_RET_CANCELLED:
@@ -1028,12 +1028,12 @@ callout_retvalstring(int retval)
 }
 #endif
 
-static int
+static callout_ret_t
 callout_restart_async(struct callout *c, struct callout_args *coa,
     callout_func_t *drain_fn)
 {
 	struct callout_cpu *cc;
-	int retval;
+	callout_ret_t retval;
 	int direct;
 
 	cc = callout_lock(c);
@@ -1047,7 +1047,7 @@ callout_restart_async(struct callout *c,
 	 */
 	if (cc_exec_curr(cc, direct) == c) {
 
-		retval = CALLOUT_RET_DRAINING;
+		retval.value = CALLOUT_RET_DRAINING;
 
 		/* set drain function, if any */
 		if (drain_fn != NULL)
@@ -1060,7 +1060,7 @@ callout_restart_async(struct callout *c,
 		if (cc_exec_cancel(cc, direct) == false ||
 		    cc_exec_restart(cc, direct) == true) {
 			cc_exec_cancel(cc, direct) = true;
-			retval |= CALLOUT_RET_CANCELLED;
+			retval.value |= CALLOUT_RET_CANCELLED;
 		}
 
 		/*
@@ -1107,9 +1107,9 @@ callout_restart_async(struct callout *c,
 			} else {
 				TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
 			}
-			retval = CALLOUT_RET_CANCELLED;
+			retval.value = CALLOUT_RET_CANCELLED;
 		} else {
-			retval = CALLOUT_RET_STOPPED;
+			retval.value = CALLOUT_RET_STOPPED;
 		}
 
 		CTR4(KTR_CALLOUT, "%s: %p func %p arg %p",
@@ -1126,7 +1126,7 @@ callout_restart_async(struct callout *c,
 
 			/* return callback to pre-allocated list, if any */
 			if ((c->c_flags & CALLOUT_LOCAL_ALLOC) &&
-			    retval != CALLOUT_RET_STOPPED) {
+			    retval.value != CALLOUT_RET_STOPPED) {
 				callout_cc_del(c, cc);
 			}
 		}
@@ -1201,7 +1201,7 @@ callout_when(sbintime_t sbt, sbintime_t 
  * callout_pending() - returns truth if callout is still waiting for timeout
  * callout_deactivate() - marks the callout as having been serviced
  */
-int
+callout_ret_t
 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
     callout_func_t *ftn, void *arg, int cpu, int flags)
 {
@@ -1223,19 +1223,19 @@ callout_reset_sbt_on(struct callout *c, 
 /*
  * Common idioms that can be optimized in the future.
  */
-int
+callout_ret_t
 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
 {
 	return (callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu));
 }
 
-int
+callout_ret_t
 callout_schedule(struct callout *c, int to_ticks)
 {
 	return (callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu));
 }
 
-int
+callout_ret_t
 callout_stop(struct callout *c)
 {
 	/* get callback stopped, if any */
@@ -1248,17 +1248,17 @@ callout_drain_function(void *arg)
 	wakeup(&callout_drain_function);
 }
 
-int
+callout_ret_t
 callout_async_drain(struct callout *c, callout_func_t *fn)
 {
 	/* get callback stopped, if any */
 	return (callout_restart_async(c, NULL, fn));
 }
 
-int
+callout_ret_t
 callout_drain(struct callout *c)
 {
-	int retval;
+	callout_ret_t retval;
 
 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
 	    "Draining callout");
@@ -1267,7 +1267,7 @@ callout_drain(struct callout *c)
 
 	retval = callout_async_drain(c, &callout_drain_function);
 
-	if (retval & CALLOUT_RET_DRAINING) {
+	if (retval.bit.draining) {
 		void *ident = &callout_drain_function;
 		struct callout_cpu *cc;
 		int direct;

Modified: projects/hps_head/sys/kern/subr_taskqueue.c
==============================================================================
--- projects/hps_head/sys/kern/subr_taskqueue.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/kern/subr_taskqueue.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -520,7 +520,7 @@ taskqueue_cancel_timeout(struct taskqueu
 	int error;
 
 	TQ_LOCK(queue);
-	pending = (callout_stop(&timeout_task->c) & CALLOUT_RET_CANCELLED) ? 1 : 0;
+	pending = callout_stop(&timeout_task->c).bit.cancelled;
 	error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
 	if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
 		timeout_task->f &= ~DT_CALLOUT_ARMED;

Modified: projects/hps_head/sys/net/if_llatbl.c
==============================================================================
--- projects/hps_head/sys/net/if_llatbl.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/net/if_llatbl.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -520,7 +520,7 @@ lltable_free(struct lltable *llt)
 	IF_AFDATA_WUNLOCK(llt->llt_ifp);
 
 	LIST_FOREACH_SAFE(lle, &dchain, lle_chain, next) {
-		if (callout_stop(&lle->lle_timer) & CALLOUT_RET_CANCELLED)
+		if (callout_stop(&lle->lle_timer).bit.cancelled)
 			LLE_REMREF(lle);
 		llentry_free(lle);
 	}

Modified: projects/hps_head/sys/netgraph/ng_base.c
==============================================================================
--- projects/hps_head/sys/netgraph/ng_base.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/netgraph/ng_base.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -3795,8 +3795,8 @@ ng_callout(struct callout *c, node_p nod
 	NGI_ARG1(item) = arg1;
 	NGI_ARG2(item) = arg2;
 	oitem = c->c_arg;
-	if ((callout_reset(c, ticks, &ng_callout_trampoline, item) &
-	     CALLOUT_RET_CANCELLED) && oitem != NULL)
+	if (callout_reset(c, ticks, &ng_callout_trampoline, item).bit.cancelled
+	    && oitem != NULL)
 		NG_FREE_ITEM(oitem);
 	return (0);
 }
@@ -3811,10 +3811,10 @@ ng_uncallout(struct callout *c, node_p n
 	KASSERT(c != NULL, ("ng_uncallout: NULL callout"));
 	KASSERT(node != NULL, ("ng_uncallout: NULL node"));
 
-	rval = callout_stop(c);
+	rval = callout_stop(c).bit.cancelled;
 	item = c->c_arg;
 	/* Do an extra check */
-	if ((rval & CALLOUT_RET_CANCELLED) &&
+	if ((rval != 0) &&
 	    (c->c_func == &ng_callout_trampoline) &&
 	    (item != NULL) && (NGI_NODE(item) == node)) {
 		/*

Modified: projects/hps_head/sys/netinet/if_ether.c
==============================================================================
--- projects/hps_head/sys/netinet/if_ether.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/netinet/if_ether.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -553,13 +553,10 @@ arpresolve_full(struct ifnet *ifp, int i
 		error = is_gw != 0 ? EHOSTUNREACH : EHOSTDOWN;
 
 	if (renew) {
-		int canceled;
-
 		LLE_ADDREF(la);
 		la->la_expire = time_uptime;
-		canceled = callout_reset(&la->lle_timer, hz * V_arpt_down,
-		    arptimer, la);
-		if (canceled & CALLOUT_RET_CANCELLED)
+		if (callout_reset(&la->lle_timer, hz * V_arpt_down,
+		    arptimer, la).bit.cancelled)
 			LLE_REMREF(la);
 		la->la_asked++;
 		LLE_WUNLOCK(la);
@@ -1248,7 +1245,7 @@ arp_check_update_lle(struct arphdr *ah, 
 static void
 arp_mark_lle_reachable(struct llentry *la)
 {
-	int canceled, wtime;
+	int wtime;
 
 	LLE_WLOCK_ASSERT(la);
 
@@ -1261,9 +1258,8 @@ arp_mark_lle_reachable(struct llentry *l
 		wtime = V_arpt_keep - V_arp_maxtries * V_arpt_rexmit;
 		if (wtime < 0)
 			wtime = V_arpt_keep;
-		canceled = callout_reset(&la->lle_timer,
-		    hz * wtime, arptimer, la);
-		if (canceled & CALLOUT_RET_CANCELLED)
+		if (callout_reset(&la->lle_timer,
+		    hz * wtime, arptimer, la).bit.cancelled)
 			LLE_REMREF(la);
 	}
 	la->la_asked = 0;
@@ -1368,15 +1364,14 @@ garp_rexmit(void *arg)
 	if (ia->ia_garp_count >= garp_rexmit_count) {
 		ifa_free(&ia->ia_ifa);
 	} else {
-		int rescheduled;
+		int cancelled;
 		IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp);
-		rescheduled = callout_reset(&ia->ia_garp_timer,
+		cancelled = callout_reset(&ia->ia_garp_timer,
 		    (1 << ia->ia_garp_count) * hz,
-		    garp_rexmit, ia);
+		    garp_rexmit, ia).bit.cancelled;
 		IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);
-		if (rescheduled & CALLOUT_RET_CANCELLED) {
+		if (cancelled)
 			ifa_free(&ia->ia_ifa);
-		}
 	}
 }
 
@@ -1406,7 +1401,7 @@ garp_timer_start(struct ifaddr *ifa)
 	IF_ADDR_WLOCK(ia->ia_ifa.ifa_ifp);
 	ia->ia_garp_count = 0;
 	if (!(callout_reset(&ia->ia_garp_timer, (1 << ia->ia_garp_count) * hz,
-	    garp_rexmit, ia) & CALLOUT_RET_CANCELLED)) {
+	    garp_rexmit, ia).bit.cancelled)) {
 		ifa_ref(ifa);
 	}
 	IF_ADDR_WUNLOCK(ia->ia_ifa.ifa_ifp);

Modified: projects/hps_head/sys/netinet/in.c
==============================================================================
--- projects/hps_head/sys/netinet/in.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/netinet/in.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -638,9 +638,8 @@ in_difaddr_ioctl(caddr_t data, struct if
 	}
 
 	IF_ADDR_WLOCK(ifp);
-	if (callout_stop(&ia->ia_garp_timer) & CALLOUT_RET_CANCELLED) {
+	if (callout_stop(&ia->ia_garp_timer).bit.cancelled)
 		ifa_free(&ia->ia_ifa);
-	}
 	IF_ADDR_WUNLOCK(ifp);
 
 	EVENTHANDLER_INVOKE(ifaddr_event, ifp);
@@ -1154,7 +1153,7 @@ in_lltable_free_entry(struct lltable *ll
 	}
 
 	/* cancel timer */
-	if (callout_stop(&lle->lle_timer) & CALLOUT_RET_CANCELLED)
+	if (callout_stop(&lle->lle_timer).bit.cancelled)
 		LLE_REMREF(lle);
 
 	/* Drop hold queue */

Modified: projects/hps_head/sys/netinet/tcp_timer.c
==============================================================================
--- projects/hps_head/sys/netinet/tcp_timer.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/netinet/tcp_timer.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -1001,7 +1001,7 @@ tcp_timer_stop(struct tcpcb *tp, uint32_
 			panic("tp %p bad timer_type %#x", tp, timer_type);
 		}
 
-	if (callout_async_drain(t_callout, tcp_timer_discard) & CALLOUT_RET_DRAINING) {
+	if (callout_async_drain(t_callout, tcp_timer_discard).bit.draining) {
 		/*
 		 * Can't stop the callout, defer tcpcb actual deletion
 		 * to the last one. We do this using the async drain

Modified: projects/hps_head/sys/netinet6/in6.c
==============================================================================
--- projects/hps_head/sys/netinet6/in6.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/netinet6/in6.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -2098,7 +2098,7 @@ in6_lltable_free_entry(struct lltable *l
 		lltable_unlink_entry(llt, lle);
 	}
 
-	if (callout_stop(&lle->lle_timer) & CALLOUT_RET_CANCELLED)
+	if (callout_stop(&lle->lle_timer).bit.cancelled)
 		LLE_REMREF(lle);
 
 	llentry_free(lle);

Modified: projects/hps_head/sys/netinet6/nd6.c
==============================================================================
--- projects/hps_head/sys/netinet6/nd6.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/netinet6/nd6.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -523,28 +523,28 @@ skip1:
 static void
 nd6_llinfo_settimer_locked(struct llentry *ln, long tick)
 {
-	int canceled;
+	int cancelled;
 
 	LLE_WLOCK_ASSERT(ln);
 
 	if (tick < 0) {
 		ln->la_expire = 0;
 		ln->ln_ntick = 0;
-		canceled = callout_stop(&ln->lle_timer);
+		cancelled = callout_stop(&ln->lle_timer).bit.cancelled;
 	} else {
 		ln->la_expire = time_uptime + tick / hz;
 		LLE_ADDREF(ln);
 		if (tick > INT_MAX) {
 			ln->ln_ntick = tick - INT_MAX;
-			canceled = callout_reset(&ln->lle_timer, INT_MAX,
-			    nd6_llinfo_timer, ln);
+			cancelled = callout_reset(&ln->lle_timer, INT_MAX,
+			    nd6_llinfo_timer, ln).bit.cancelled;
 		} else {
 			ln->ln_ntick = 0;
-			canceled = callout_reset(&ln->lle_timer, tick,
-			    nd6_llinfo_timer, ln);
+			cancelled = callout_reset(&ln->lle_timer, tick,
+			    nd6_llinfo_timer, ln).bit.cancelled;
 		}
 	}
-	if (canceled & CALLOUT_RET_CANCELLED)
+	if (cancelled)
 		LLE_REMREF(ln);
 }
 

Modified: projects/hps_head/sys/netpfil/pf/if_pfsync.c
==============================================================================
--- projects/hps_head/sys/netpfil/pf/if_pfsync.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/netpfil/pf/if_pfsync.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -353,7 +353,7 @@ pfsync_clone_destroy(struct ifnet *ifp)
 
 		TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
 		sc->sc_deferred--;
-		if (callout_stop(&pd->pd_tmo) & CALLOUT_RET_CANCELLED) {
+		if (callout_stop(&pd->pd_tmo).bit.cancelled) {
 			pf_release_state(pd->pd_st);
 			m_freem(pd->pd_m);
 			free(pd, M_PFSYNC);
@@ -1776,7 +1776,7 @@ pfsync_undefer_state(struct pf_state *st
 
 	TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
 		 if (pd->pd_st == st) {
-			if (callout_stop(&pd->pd_tmo) & CALLOUT_RET_CANCELLED)
+			if (callout_stop(&pd->pd_tmo).bit.cancelled)
 				pfsync_undefer(pd, drop);
 			return;
 		}

Modified: projects/hps_head/sys/sys/callout.h
==============================================================================
--- projects/hps_head/sys/sys/callout.h	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/sys/callout.h	Sun Nov 27 20:21:38 2016	(r309218)
@@ -52,11 +52,27 @@
 #define	CALLOUT_SET_LC(x)	(((x) & 7) << 16) /* set lock class */
 #define	CALLOUT_GET_LC(x)	(((x) >> 16) & 7) /* get lock class */
 
-/* return values for all callout_xxx() functions */
-#define	CALLOUT_RET_CANCELLED_AND_DRAINING (CALLOUT_RET_CANCELLED | CALLOUT_RET_DRAINING)
-#define	CALLOUT_RET_DRAINING	2 /* callout is being serviced */
-#define	CALLOUT_RET_CANCELLED	1 /* callout was successfully stopped */
-#define	CALLOUT_RET_STOPPED	0 /* callout was already stopped */
+/* return value for all callout_xxx() functions */
+typedef union callout_ret {
+	struct {
+		unsigned cancelled : 1;
+		unsigned draining : 1;
+		unsigned reserved : 30;
+	} bit;
+	unsigned value;
+} callout_ret_t;
+
+#define	CALLOUT_RET_CANCELLED_AND_DRAINING \
+    ((const callout_ret_t){.bit.cancelled = 1,.bit.draining = 1}).value
+/* callout is being serviced */
+#define	CALLOUT_RET_DRAINING \
+    ((const callout_ret_t){.bit.draining = 1}).value
+/* callout was successfully stopped */
+#define	CALLOUT_RET_CANCELLED \
+    ((const callout_ret_t){.bit.cancelled = 1}).value
+/* callout was already stopped */
+#define	CALLOUT_RET_STOPPED \
+    ((const callout_ret_t){.value = 0}).value
 
 #define	C_DIRECT_EXEC		0x0001 /* direct execution of callout */
 #define	C_PRELBITS		7
@@ -74,8 +90,8 @@ struct callout_handle {
 #ifdef _KERNEL
 #define	callout_active(c)	((c)->c_flags & CALLOUT_ACTIVE)
 #define	callout_deactivate(c)	((c)->c_flags &= ~CALLOUT_ACTIVE)
-int	callout_drain(struct callout *);
-int	callout_async_drain(struct callout *, callout_func_t *);
+callout_ret_t	callout_drain(struct callout *);
+callout_ret_t	callout_async_drain(struct callout *, callout_func_t *);
 void	callout_init(struct callout *, int);
 void	callout_init_lock_function(struct callout *, callout_lock_func_t *, int);
 void	callout_init_lock_object(struct callout *, struct lock_object *, int);
@@ -89,7 +105,7 @@ void	callout_init_lock_object(struct cal
 	callout_init_lock_object((c), ((rw) != NULL) ? &(rw)->lock_object : \
 	   NULL, (flags))
 #define	callout_pending(c)	((c)->c_flags & CALLOUT_PENDING)
-int	callout_reset_sbt_on(struct callout *, sbintime_t, sbintime_t,
+callout_ret_t	callout_reset_sbt_on(struct callout *, sbintime_t, sbintime_t,
 	    callout_func_t *, void *, int, int);
 #define	callout_reset_sbt(c, sbt, pr, fn, arg, flags)			\
     callout_reset_sbt_on((c), (sbt), (pr), (fn), (arg), -1, (flags))
@@ -110,11 +126,11 @@ int	callout_reset_sbt_on(struct callout 
     callout_schedule_sbt_on((c), (sbt), (pr), -1, (flags))
 #define	callout_schedule_sbt_curcpu(c, sbt, pr, flags)			\
     callout_schedule_sbt_on((c), (sbt), (pr), PCPU_GET(cpuid), (flags))
-int	callout_schedule(struct callout *, int);
-int	callout_schedule_on(struct callout *, int, int);
+callout_ret_t	callout_schedule(struct callout *, int);
+callout_ret_t	callout_schedule_on(struct callout *, int, int);
 #define	callout_schedule_curcpu(c, on_tick)				\
     callout_schedule_on((c), (on_tick), PCPU_GET(cpuid))
-int	callout_stop(struct callout *);
+callout_ret_t	callout_stop(struct callout *);
 void	callout_when(sbintime_t, sbintime_t, int, sbintime_t *, sbintime_t *);
 void	callout_process(sbintime_t now);
 

Modified: projects/hps_head/sys/tests/callout_test/callout_test.c
==============================================================================
--- projects/hps_head/sys/tests/callout_test/callout_test.c	Sun Nov 27 20:11:55 2016	(r309217)
+++ projects/hps_head/sys/tests/callout_test/callout_test.c	Sun Nov 27 20:21:38 2016	(r309218)
@@ -129,7 +129,7 @@ test_callout(void *arg)
 void
 execute_the_co_test(struct callout_run *rn)
 {
-	int i, ret, cpu;
+	int i, cpu;
 	uint32_t tk_s, tk_e, tk_d;
 
 	mtx_lock(&rn->lock);
@@ -157,8 +157,7 @@ execute_the_co_test(struct callout_run *
 	}
 	/* OK everyone is waiting and we have the lock */
 	for (i = 0; i < rn->co_number_callouts; i++) {
-		ret = callout_async_drain(&rn->co_array[i], drainit);
-		if (!(ret & CALLOUT_RET_DRAINING)) {
+		if (!callout_async_drain(&rn->co_array[i], drainit).bit.draining) {
 			rn->cnt_one++;
 		} else {
 			rn->cnt_zero++;



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201611272021.uARKLclU072590>