From owner-svn-src-all@FreeBSD.ORG Thu Feb 12 13:31:09 2015 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [8.8.178.115]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 87994891; Thu, 12 Feb 2015 13:31:09 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 6848EF9; Thu, 12 Feb 2015 13:31:09 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.9/8.14.9) with ESMTP id t1CDV9Qq086270; Thu, 12 Feb 2015 13:31:09 GMT (envelope-from rrs@FreeBSD.org) Received: (from rrs@localhost) by svn.freebsd.org (8.14.9/8.14.9/Submit) id t1CDV9jR086269; Thu, 12 Feb 2015 13:31:09 GMT (envelope-from rrs@FreeBSD.org) Message-Id: <201502121331.t1CDV9jR086269@svn.freebsd.org> X-Authentication-Warning: svn.freebsd.org: rrs set sender to rrs@FreeBSD.org using -f From: Randall Stewart Date: Thu, 12 Feb 2015 13:31:09 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r278623 - head/sys/kern X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18-1 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 12 Feb 2015 13:31:09 -0000 Author: rrs Date: Thu Feb 12 13:31:08 2015 New Revision: 278623 URL: https://svnweb.freebsd.org/changeset/base/278623 Log: This fixes a bug I in-advertantly inserted when I updated the callout code in my last commit. The cc_exec_next is used to track the next when a direct call is being made from callout. It is *never* used in the in-direct method. When macro-izing I made it so that it would separate out direct/vs/non-direct. This is incorrect and can cause panics as Peter Holm has found for me (Thanks so much Peter for all your help in this). What this change does is restore that behavior but also get rid of the cc_next from the array and instead make it be part of the base callout structure. This way no one else will get confused since we will never use it for non-direct. Reviewed by: Peter Holm and more importantly tested by him ;-) MFC after: 3 days. Sponsored by: Netflix Inc. Modified: head/sys/kern/kern_timeout.c Modified: head/sys/kern/kern_timeout.c ============================================================================== --- head/sys/kern/kern_timeout.c Thu Feb 12 11:57:31 2015 (r278622) +++ head/sys/kern/kern_timeout.c Thu Feb 12 13:31:08 2015 (r278623) @@ -135,7 +135,6 @@ u_int callwheelsize, callwheelmask; * the migrating callout is already running. */ struct cc_exec { - struct callout *cc_next; struct callout *cc_curr; #ifdef SMP void (*ce_migration_func)(void *); @@ -155,6 +154,7 @@ struct cc_exec { struct callout_cpu { struct mtx_padalign cc_lock; struct cc_exec cc_exec_entity[2]; + struct callout *cc_next; struct callout *cc_callout; struct callout_list *cc_callwheel; struct callout_tailq cc_expireq; @@ -167,7 +167,7 @@ struct callout_cpu { }; #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr -#define cc_exec_next(cc, dir) cc->cc_exec_entity[dir].cc_next +#define cc_exec_next(cc) cc->cc_next #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting #ifdef SMP @@ -226,7 +226,6 @@ cc_cce_cleanup(struct callout_cpu *cc, i { cc_exec_curr(cc, direct) = NULL; - cc_exec_next(cc, direct) = NULL; cc_exec_cancel(cc, direct) = false; cc_exec_waiting(cc, direct) = false; #ifdef SMP @@ -482,7 +481,7 @@ callout_process(sbintime_t now) #ifdef CALLOUT_PROFILING ++depth_dir; #endif - cc_exec_next(cc, 1) = + cc_exec_next(cc) = LIST_NEXT(tmp, c_links.le); cc->cc_bucket = firstb & callwheelmask; LIST_REMOVE(tmp, c_links.le); @@ -491,7 +490,8 @@ callout_process(sbintime_t now) &mpcalls_dir, &lockcalls_dir, NULL, #endif 1); - tmp = cc_exec_next(cc, 1); + tmp = cc_exec_next(cc); + cc_exec_next(cc) = NULL; } else { tmpn = LIST_NEXT(tmp, c_links.le); LIST_REMOVE(tmp, c_links.le); @@ -575,7 +575,7 @@ callout_lock(struct callout *c) static void callout_cc_add(struct callout *c, struct callout_cpu *cc, sbintime_t sbt, sbintime_t precision, void (*func)(void *), - void *arg, int cpu, int flags, int direct) + void *arg, int cpu, int flags) { int bucket; @@ -584,8 +584,6 @@ callout_cc_add(struct callout *c, struct sbt = cc->cc_lastscan; c->c_arg = arg; c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); - if (flags & C_DIRECT_EXEC) - c->c_flags |= CALLOUT_DIRECT; c->c_flags &= ~CALLOUT_PROCESSED; c->c_func = func; c->c_time = sbt; @@ -596,7 +594,7 @@ callout_cc_add(struct callout *c, struct (u_int)(c->c_precision & 0xffffffff)); LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); if (cc->cc_bucket == bucket) - cc_exec_next(cc, direct) = c; + cc_exec_next(cc) = c; #ifndef NO_EVENTTIMERS /* * Inform the eventtimers(4) subsystem there's a new callout @@ -790,7 +788,7 @@ skip: new_cc = callout_cpu_switch(c, cc, new_cpu); flags = (direct) ? C_DIRECT_EXEC : 0; callout_cc_add(c, new_cc, new_time, new_prec, new_func, - new_arg, new_cpu, flags, direct); + new_arg, new_cpu, flags); CC_UNLOCK(new_cc); CC_LOCK(cc); #else @@ -994,6 +992,14 @@ callout_reset_sbt_on(struct callout *c, */ if (c->c_flags & CALLOUT_LOCAL_ALLOC) cpu = c->c_cpu; + /* + * This flag used to be added by callout_cc_add, but the + * first time you call this we could end up with the + * wrong direct flag if we don't do it before we add. + */ + if (flags & C_DIRECT_EXEC) { + c->c_flags |= CALLOUT_DIRECT; + } direct = (c->c_flags & CALLOUT_DIRECT) != 0; KASSERT(!direct || c->c_lock == NULL, ("%s: direct callout %p has lock", __func__, c)); @@ -1039,8 +1045,8 @@ callout_reset_sbt_on(struct callout *c, } if (c->c_flags & CALLOUT_PENDING) { if ((c->c_flags & CALLOUT_PROCESSED) == 0) { - if (cc_exec_next(cc, direct) == c) - cc_exec_next(cc, direct) = LIST_NEXT(c, c_links.le); + if (cc_exec_next(cc) == c) + cc_exec_next(cc) = LIST_NEXT(c, c_links.le); LIST_REMOVE(c, c_links.le); } else TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); @@ -1089,7 +1095,7 @@ callout_reset_sbt_on(struct callout *c, } #endif - callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags, direct); + callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), (u_int)(to_sbt & 0xffffffff)); @@ -1322,8 +1328,8 @@ again: c, c->c_func, c->c_arg); if (not_on_a_list == 0) { if ((c->c_flags & CALLOUT_PROCESSED) == 0) { - if (cc_exec_next(cc, direct) == c) - cc_exec_next(cc, direct) = LIST_NEXT(c, c_links.le); + if (cc_exec_next(cc) == c) + cc_exec_next(cc) = LIST_NEXT(c, c_links.le); LIST_REMOVE(c, c_links.le); } else TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);