Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 6 Jul 2004 07:26:30 GMT
From:      Julian Elischer <julian@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 56596 for review
Message-ID:  <200407060726.i667QUct010701@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=56596

Change 56596 by julian@julian_jules1 on 2004/07/06 07:26:07

	 catch up with kern_switch.c that has been moved elsewhere in my code.

Affected files ...

.. //depot/projects/nsched/sys/kern/sched_4bsd.c#24 edit
.. //depot/projects/nsched/sys/kern/sched_ule.c#12 edit

Differences ...

==== //depot/projects/nsched/sys/kern/sched_4bsd.c#24 (text+ko) ====

@@ -36,6 +36,8 @@
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD: src/sys/kern/sched_4bsd.c,v 1.43 2004/07/02 20:21:43 jhb Exp $");
 
+#include "opt_full_preemption.h"
+
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
@@ -1832,6 +1834,13 @@
 	KASSERT(td->td_critnest != 0,
 	    ("critical_exit: td_critnest == 0"));
 	if (td->td_critnest == 1) {
+#ifdef PREEMPTION
+		if (td->td_flags & TDF_OWEPREEMPT) {
+			mtx_lock_spin(&sched_lock);
+			mi_switch(SW_INVOL, NULL);
+			mtx_unlock_spin(&sched_lock);
+		}
+#endif
 		td->td_critnest = 0;
 		cpu_critical_exit();
 	} else {
@@ -1839,6 +1848,88 @@
 	}
 }
 
+/*
+ * This function is called when a thread is about to be put on run queue
+ * because it has been made runnable or its priority has been adjusted.  It
+ * determines if the new thread should be immediately preempted to.  If so,
+ * it switches to it and eventually returns true.  If not, it returns false
+ * so that the caller may place the thread on an appropriate run queue.
+ */
+int
+maybe_preempt(struct thread *td)
+{
+#ifdef PREEMPTION
+	struct thread *ctd;
+	int cpri, pri;
+#endif
+
+	mtx_assert(&sched_lock, MA_OWNED);
+#ifdef PREEMPTION
+	/*
+	 * The new thread should not preempt the current thread if any of the
+	 * following conditions are true:
+	 *
+	 *  - The current thread has a higher (numerically lower) priority.
+	 *  - It is too early in the boot for context switches (cold is set).
+	 *  - The current thread has an inhibitor set or is in the process of
+	 *    exiting.  In this case, the current thread is about to switch
+	 *    out anyways, so there's no point in preempting.  If we did,
+	 *    the current thread would not be properly resumed as well, so
+	 *    just avoid that whole landmine.
+	 *  - If the new thread's priority is not a realtime priority and
+	 *    the current thread's priority is not an idle priority and
+	 *    FULL_PREEMPTION is disabled.
+	 *
+	 * If all of these conditions are false, but the current thread is in
+	 * a nested critical section, then we have to defer the preemption
+	 * until we exit the critical section.  Otherwise, switch immediately
+	 * to the new thread.
+	 */
+	ctd = curthread;
+	pri = td->td_priority;
+	cpri = ctd->td_priority;
+	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
+	    td->td_td_sched->std_state != STDS_THREAD)
+		return (0);
+#ifndef FULL_PREEMPTION
+	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
+	    !(cpri >= PRI_MIN_IDLE))
+		return (0);
+#endif
+	if (ctd->td_critnest > 1) {
+		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
+		    ctd->td_critnest);
+		ctd->td_flags |= TDF_OWEPREEMPT;
+		return (0);
+	}
+
+	/*
+	 * Our thread state says that we are already on a run queue, so
+	 * update our state as if we had been dequeued by choosethread().
+	 */
+	MPASS(TD_ON_RUNQ(td));
+	TD_SET_RUNNING(td);
+	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
+	    td->td_proc->p_pid, td->td_proc->p_comm);
+	mi_switch(SW_INVOL, td);
+	return (1);
+#else
+	return (0);
+#endif
+}
+
+#ifndef PREEMPTION
+/* XXX: There should be a non-static version of this. */
+static void
+printf_caddr_t(void *data)
+{
+	printf("%s", (char *)data);
+}
+static char preempt_warning[] =
+    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
+SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
+    preempt_warning)
+#endif
 
 /************************************************************************
  * SYSTEM RUN QUEUE manipulations and tests				*

==== //depot/projects/nsched/sys/kern/sched_ule.c#12 (text+ko) ====

@@ -2772,6 +2772,13 @@
 	KASSERT(td->td_critnest != 0,
 	    ("critical_exit: td_critnest == 0"));
 	if (td->td_critnest == 1) {
+#ifdef PREEMPTION
+		if (td->td_flags & TDF_OWEPREEMPT) {
+			mtx_lock_spin(&sched_lock);
+			mi_switch(SW_INVOL, NULL);
+			mtx_unlock_spin(&sched_lock);
+		}
+#endif
 		td->td_critnest = 0;
 		cpu_critical_exit();
 	} else {
@@ -2780,6 +2787,88 @@
 }
 
 
+/*
+ * This function is called when a thread is about to be put on run queue
+ * because it has been made runnable or its priority has been adjusted.  It
+ * determines if the new thread should be immediately preempted to.  If so,
+ * it switches to it and eventually returns true.  If not, it returns false
+ * so that the caller may place the thread on an appropriate run queue.
+ */
+int
+maybe_preempt(struct thread *td)
+{
+#ifdef PREEMPTION
+	struct thread *ctd;
+	int cpri, pri;
+#endif
+
+	mtx_assert(&sched_lock, MA_OWNED);
+#ifdef PREEMPTION
+	/*
+	 * The new thread should not preempt the current thread if any of the
+	 * following conditions are true:
+	 *
+	 *  - The current thread has a higher (numerically lower) priority.
+	 *  - It is too early in the boot for context switches (cold is set).
+	 *  - The current thread has an inhibitor set or is in the process of
+	 *    exiting.  In this case, the current thread is about to switch
+	 *    out anyways, so there's no point in preempting.  If we did,
+	 *    the current thread would not be properly resumed as well, so
+	 *    just avoid that whole landmine.
+	 *  - If the new thread's priority is not a realtime priority and
+	 *    the current thread's priority is not an idle priority and
+	 *    FULL_PREEMPTION is disabled.
+	 *
+	 * If all of these conditions are false, but the current thread is in
+	 * a nested critical section, then we have to defer the preemption
+	 * until we exit the critical section.  Otherwise, switch immediately
+	 * to the new thread.
+	 */
+	ctd = curthread;
+	pri = td->td_priority;
+	cpri = ctd->td_priority;
+	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
+	    td->td_kse->ke_state != KES_THREAD)
+		return (0);
+#ifndef FULL_PREEMPTION
+	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
+	    !(cpri >= PRI_MIN_IDLE))
+		return (0);
+#endif
+	if (ctd->td_critnest > 1) {
+		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
+		    ctd->td_critnest);
+		ctd->td_flags |= TDF_OWEPREEMPT;
+		return (0);
+	}
+
+	/*
+	 * Our thread state says that we are already on a run queue, so
+	 * update our state as if we had been dequeued by choosethread().
+	 */
+	MPASS(TD_ON_RUNQ(td));
+	TD_SET_RUNNING(td);
+	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
+	    td->td_proc->p_pid, td->td_proc->p_comm);
+	mi_switch(SW_INVOL, td);
+	return (1);
+#else
+	return (0);
+#endif
+}
+
+#ifndef PREEMPTION
+/* XXX: There should be a non-static version of this. */
+static void
+printf_caddr_t(void *data)
+{
+	printf("%s", (char *)data);
+}
+static char preempt_warning[] =
+    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
+SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
+    preempt_warning)
+#endif
 /************************************************************************
  * SYSTEM RUN QUEUE manipulations and tests				*
  * basically from the standard BSD scheduler				*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200407060726.i667QUct010701>