Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 16 Feb 2017 12:31:48 -0800
From:      Cy Schubert <Cy.Schubert@komquats.com>
To:        Ryan Stone <rstone@FreeBSD.org>
Cc:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   Re: svn commit: r313814 - head/sys/kern
Message-ID:  <201702162031.v1GKVmjA097376@slippy.cwsent.com>
In-Reply-To: Message from Ryan Stone <rstone@FreeBSD.org> of "Thu, 16 Feb 2017 19:41:13 %2B0000." <201702161941.v1GJfDoP087457@repo.freebsd.org>

next in thread | previous in thread | raw e-mail | index | archive | help
In message <201702161941.v1GJfDoP087457@repo.freebsd.org>, Ryan Stone 
writes:
> Author: rstone
> Date: Thu Feb 16 19:41:13 2017
> New Revision: 313814
> URL: https://svnweb.freebsd.org/changeset/base/313814
> 
> Log:
>   Check for preemption after lowering a thread's priority
>   
>   When a high-priority thread is waiting for a mutex held by a
>   low-priority thread, it temporarily lends its priority to the
>   low-priority thread to prevent priority inversion.  When the mutex
>   is released, the lent priority is revoked and the low-priority
>   thread goes back to its original priority.
>   
>   When the priority of that thread is lowered (through a call to
>   sched_priority()), the schedule was not checking whether
>   there is now a high-priority thread in the run queue.  This can
>   cause threads with real-time priority to be starved in the run
>   queue while the low-priority thread finishes its quantum.
>   
>   Fix this by explicitly checking whether preemption is necessary
>   when a thread's priority is lowered.
>   
>   Sponsored by: Dell EMC Isilon
>   Obtained from: Sandvine Inc
>   Differential Revision:	https://reviews.freebsd.org/D9518
>   Reviewed by: Jeff Roberson (ule)
>   MFC after: 1 month
> 
> Modified:
>   head/sys/kern/sched_4bsd.c
>   head/sys/kern/sched_ule.c
> 
> Modified: head/sys/kern/sched_4bsd.c
> =============================================================================
> =
> --- head/sys/kern/sched_4bsd.c	Thu Feb 16 19:00:09 2017	(r31381
> 3)
> +++ head/sys/kern/sched_4bsd.c	Thu Feb 16 19:41:13 2017	(r31381
> 4)
> @@ -816,7 +816,12 @@ sched_class(struct thread *td, int class
>  static void
>  sched_priority(struct thread *td, u_char prio)
>  {
> -
> +	struct thread *newtd;
> +	struct runq *rq;
> +	u_char orig_pri;
> +#ifdef SMP
> +	struct thread *cputd;
> +#endif
>  
>  	KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
>  	    "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
> @@ -832,10 +837,43 @@ sched_priority(struct thread *td, u_char
>  	THREAD_LOCK_ASSERT(td, MA_OWNED);
>  	if (td->td_priority == prio)
>  		return;
> +	orig_pri = td->td_priority;
>  	td->td_priority = prio;
>  	if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
>  		sched_rem(td);
>  		sched_add(td, SRQ_BORING);
> +	} else if (orig_pri < prio && TD_IS_RUNNING(td)) {
> +		/*
> +		 * If we have decreased the priority of a running thread, we
> +		 * have to check if it should be preempted.
> +		 */
> +		rq = &runq;
> +		newtd = runq_choose(&runq);
> +#ifdef SMP
> +		cputd = runq_choose(&runq_pcpu[td->td_oncpu]);
> +		if (newtd == NULL ||
> +		    (cputd != NULL && cputd->td_priority < td->td_priority))
> +			newtd = cputd;
> +#endif
> +
> +		if (newtd != NULL && newtd->td_priority < prio
> +#ifndef FULL_PREEMPTION
> +		    && (newtd->td_priority <= PRI_MAX_ITHD ||
> +		        prio >= PRI_MIN_IDLE))
> +#endif
> +		) {
> +			if (td == curthread)
> +				/*
> +				 * Don't reschedule the thread here as it may
> +				 * be losing priority because it has released a
> +				 * mutex, and in that case we need it to finish
> +				 * releasing the lock before it gets preempted.
> +				 */
> +				td->td_owepreempt = 1;
> +			else
> +				kick_other_cpu(newtd->td_priority,
> +				    td->td_oncpu);
> +		}
>  	}
>  }
>  
> 
> Modified: head/sys/kern/sched_ule.c
> =============================================================================
> =
> --- head/sys/kern/sched_ule.c	Thu Feb 16 19:00:09 2017	(r313813)
> +++ head/sys/kern/sched_ule.c	Thu Feb 16 19:41:13 2017	(r313814)
> @@ -319,7 +319,7 @@ static void tdq_add(struct tdq *, struct
>  #ifdef SMP
>  static int tdq_move(struct tdq *, struct tdq *);
>  static int tdq_idled(struct tdq *);
> -static void tdq_notify(struct tdq *, struct thread *);
> +static void tdq_notify(struct tdq *, int);
>  static struct thread *tdq_steal(struct tdq *, int);
>  static struct thread *runq_steal(struct runq *, int);
>  static int sched_pickcpu(struct thread *, int);
> @@ -1040,16 +1040,14 @@ tdq_idled(struct tdq *tdq)
>   * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
>   */
>  static void
> -tdq_notify(struct tdq *tdq, struct thread *td)
> +tdq_notify(struct tdq *tdq, int pri)
>  {
>  	struct thread *ctd;
> -	int pri;
>  	int cpu;
>  
>  	if (tdq->tdq_ipipending)
>  		return;
> -	cpu = td_get_sched(td)->ts_cpu;
> -	pri = td->td_priority;
> +	cpu = TD_ID(tdq);

Just my luck. The day I svn up and rebuild.

--- sched_ule.o ---
/opt/src/svn-current/sys/kern/sched_ule.c:1050:8: error: implicit 
declaration of function 'TD_ID' is invalid in C99 [-Werror,-Wimplicit-functi
on-declaration]
        cpu = TD_ID(tdq);
              ^
-- 
Cheers,
Cy Schubert <Cy.Schubert@cschubert.com>
FreeBSD UNIX:  <cy@FreeBSD.org>   Web:  http://www.FreeBSD.org

	The need of the many outweighs the greed of the few.


>  	ctd = pcpu_find(cpu)->pc_curthread;
>  	if (!sched_shouldpreempt(pri, ctd->td_priority, 1))
>  		return;
> @@ -1675,6 +1673,22 @@ sched_pctcpu_update(struct td_sched *ts,
>  	ts->ts_ltick = t;
>  }
>  
> +static void
> +sched_check_preempt(struct tdq *tdq, struct thread *td)
> +{
> +
> +	KASSERT(TD_IS_RUNNING(td), ("thread is not running"));
> +	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
> +	KASSERT(tdq == TDQ_CPU(td->td_sched->ts_cpu),
> +	    ("tdq does not contain td"));
> +
> +	if (tdq == TDQ_SELF()) {
> +		if (sched_shouldpreempt(tdq->tdq_lowpri, td->td_priority, 0))
> +			td->td_owepreempt = 1;
> +	} else
> +		tdq_notify(tdq, tdq->tdq_lowpri);
> +}
> +
>  /*
>   * Adjust the priority of a thread.  Move it to the appropriate run-queue
>   * if necessary.  This is the back-end for several priority related
> @@ -1726,6 +1740,9 @@ sched_thread_priority(struct thread *td,
>  			tdq->tdq_lowpri = prio;
>  		else if (tdq->tdq_lowpri == oldpri)
>  			tdq_setlowpri(tdq, td);
> +
> +		if (oldpri < prio)
> +			sched_check_preempt(tdq, td);
>  		return;
>  	}
>  	td->td_priority = prio;
> @@ -1854,7 +1871,7 @@ sched_switch_migrate(struct tdq *tdq, st
>  	 */
>  	tdq_lock_pair(tdn, tdq);
>  	tdq_add(tdn, td, flags);
> -	tdq_notify(tdn, td);
> +	tdq_notify(tdn, td->td_priority);
>  	TDQ_UNLOCK(tdn);
>  	spinlock_exit();
>  #endif
> @@ -2429,7 +2446,7 @@ sched_add(struct thread *td, int flags)
>  	tdq = sched_setcpu(td, cpu, flags);
>  	tdq_add(tdq, td, flags);
>  	if (cpu != PCPU_GET(cpuid)) {
> -		tdq_notify(tdq, td);
> +		tdq_notify(tdq, td->td_priority);
>  		return;
>  	}
>  #else
> 
> 





Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201702162031.v1GKVmjA097376>