Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 19 May 2009 05:17:42 +0000 (UTC)
From:      Kip Macy <kmacy@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-user@freebsd.org
Subject:   svn commit: r192371 - in user/kmacy/releng_7_2_fcs/sys: kern sys ufs/ffs vm
Message-ID:  <200905190517.n4J5HgQD085995@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kmacy
Date: Tue May 19 05:17:41 2009
New Revision: 192371
URL: http://svn.freebsd.org/changeset/base/192371

Log:
  merge 177368, 177374, 183297
  - Relax requirements for p_numthreads, p_threads, p_swtick, and p_nice from
     requiring the per-process spinlock to only requiring the process lock.
   - Reflect these changes in the proc.h documentation and consumers throughout
     the kernel.  This is a substantial reduction in locking cost for these
     fields and was made possible by recent changes to threading support.

Modified:
  user/kmacy/releng_7_2_fcs/sys/kern/kern_cpuset.c
  user/kmacy/releng_7_2_fcs/sys/kern/kern_exit.c
  user/kmacy/releng_7_2_fcs/sys/kern/kern_proc.c
  user/kmacy/releng_7_2_fcs/sys/kern/kern_resource.c
  user/kmacy/releng_7_2_fcs/sys/kern/kern_sig.c
  user/kmacy/releng_7_2_fcs/sys/kern/kern_thr.c
  user/kmacy/releng_7_2_fcs/sys/kern/kern_thread.c
  user/kmacy/releng_7_2_fcs/sys/kern/sched_4bsd.c
  user/kmacy/releng_7_2_fcs/sys/kern/sched_ule.c
  user/kmacy/releng_7_2_fcs/sys/kern/sys_generic.c
  user/kmacy/releng_7_2_fcs/sys/kern/sys_process.c
  user/kmacy/releng_7_2_fcs/sys/kern/tty.c
  user/kmacy/releng_7_2_fcs/sys/sys/proc.h
  user/kmacy/releng_7_2_fcs/sys/ufs/ffs/ffs_snapshot.c
  user/kmacy/releng_7_2_fcs/sys/vm/vm_glue.c
  user/kmacy/releng_7_2_fcs/sys/vm/vm_meter.c
  user/kmacy/releng_7_2_fcs/sys/vm/vm_pageout.c

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_cpuset.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_cpuset.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_cpuset.c	Tue May 19 05:17:41 2009	(r192371)
@@ -418,11 +418,9 @@ cpuset_which(cpuwhich_t which, id_t id, 
 		sx_slock(&allproc_lock);
 		FOREACH_PROC_IN_SYSTEM(p) {
 			PROC_LOCK(p);
-			PROC_SLOCK(p);
 			FOREACH_THREAD_IN_PROC(p, td)
 				if (td->td_tid == id)
 					break;
-			PROC_SUNLOCK(p);
 			if (td != NULL)
 				break;
 			PROC_UNLOCK(p);
@@ -542,11 +540,9 @@ cpuset_setproc(pid_t pid, struct cpuset 
 		error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
 		if (error)
 			goto out;
-		PROC_SLOCK(p);
 		if (nfree >= p->p_numthreads)
 			break;
 		threads = p->p_numthreads;
-		PROC_SUNLOCK(p);
 		PROC_UNLOCK(p);
 		for (; nfree < threads; nfree++) {
 			nset = uma_zalloc(cpuset_zone, M_WAITOK);
@@ -554,7 +550,6 @@ cpuset_setproc(pid_t pid, struct cpuset 
 		}
 	}
 	PROC_LOCK_ASSERT(p, MA_OWNED);
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
 	/*
 	 * Now that the appropriate locks are held and we have enough cpusets,
 	 * make sure the operation will succeed before applying changes.  The
@@ -588,8 +583,8 @@ cpuset_setproc(pid_t pid, struct cpuset 
 	}
 	/*
 	 * Replace each thread's cpuset while using deferred release.  We
-	 * must do this because the PROC_SLOCK has to be held while traversing
-	 * the thread list and this limits the type of operations allowed.
+	 * must do this because the thread lock must be held while operating
+	 * on the thread and this limits the type of operations allowed.
 	 */
 	FOREACH_THREAD_IN_PROC(p, td) {
 		thread_lock(td);
@@ -623,7 +618,6 @@ cpuset_setproc(pid_t pid, struct cpuset 
 		thread_unlock(td);
 	}
 unlock_out:
-	PROC_SUNLOCK(p);
 	PROC_UNLOCK(p);
 out:
 	while ((nset = LIST_FIRST(&droplist)) != NULL)
@@ -952,13 +946,11 @@ cpuset_getaffinity(struct thread *td, st
 			thread_unlock(ttd);
 			break;
 		case CPU_WHICH_PID:
-			PROC_SLOCK(p);
 			FOREACH_THREAD_IN_PROC(p, ttd) {
 				thread_lock(ttd);
 				CPU_OR(mask, &ttd->td_cpuset->cs_mask);
 				thread_unlock(ttd);
 			}
-			PROC_SUNLOCK(p);
 			break;
 		case CPU_WHICH_CPUSET:
 		case CPU_WHICH_JAIL:

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_exit.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_exit.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_exit.c	Tue May 19 05:17:41 2009	(r192371)
@@ -550,9 +550,7 @@ retry:
 	 * proc lock.
 	 */
 	wakeup(p->p_pptr);
-	PROC_SLOCK(p->p_pptr);
 	sched_exit(p->p_pptr, td);
-	PROC_SUNLOCK(p->p_pptr);
 	PROC_SLOCK(p);
 	p->p_state = PRS_ZOMBIE;
 	PROC_UNLOCK(p->p_pptr);

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_proc.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_proc.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_proc.c	Tue May 19 05:17:41 2009	(r192371)
@@ -682,11 +682,11 @@ fill_kinfo_proc_only(struct proc *p, str
 	struct ucred *cred;
 	struct sigacts *ps;
 
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 	bzero(kp, sizeof(*kp));
 
 	kp->ki_structsize = sizeof(*kp);
 	kp->ki_paddr = p;
-	PROC_LOCK_ASSERT(p, MA_OWNED);
 	kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
 	kp->ki_args = p->p_args;
 	kp->ki_textvp = p->p_textvp;
@@ -818,7 +818,7 @@ fill_kinfo_thread(struct thread *td, str
 	struct proc *p;
 
 	p = td->td_proc;
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 
 	thread_lock(td);
 	if (td->td_wmesg != NULL)
@@ -893,10 +893,8 @@ fill_kinfo_proc(struct proc *p, struct k
 {
 
 	fill_kinfo_proc_only(p, kp);
-	PROC_SLOCK(p);
 	if (FIRST_THREAD_IN_PROC(p) != NULL)
 		fill_kinfo_thread(FIRST_THREAD_IN_PROC(p), kp, 0);
-	PROC_SUNLOCK(p);
 }
 
 struct pstats *
@@ -963,15 +961,12 @@ sysctl_out_proc(struct proc *p, struct s
 
 	fill_kinfo_proc_only(p, &kinfo_proc);
 	if (flags & KERN_PROC_NOTHREADS) {
-		PROC_SLOCK(p);
 		if (FIRST_THREAD_IN_PROC(p) != NULL)
 			fill_kinfo_thread(FIRST_THREAD_IN_PROC(p),
 			    &kinfo_proc, 0);
-		PROC_SUNLOCK(p);
 		error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
 				   sizeof(kinfo_proc));
 	} else {
-		PROC_SLOCK(p);
 		if (FIRST_THREAD_IN_PROC(p) != NULL)
 			FOREACH_THREAD_IN_PROC(p, td) {
 				fill_kinfo_thread(td, &kinfo_proc, 1);
@@ -983,7 +978,6 @@ sysctl_out_proc(struct proc *p, struct s
 		else
 			error = SYSCTL_OUT(req, (caddr_t)&kinfo_proc,
 					   sizeof(kinfo_proc));
-		PROC_SUNLOCK(p);
 	}
 	PROC_UNLOCK(p);
 	if (error)
@@ -1725,7 +1719,7 @@ sysctl_kern_proc_kstack(SYSCTL_HANDLER_A
 
 	lwpidarray = NULL;
 	numthreads = 0;
-	PROC_SLOCK(p);
+	PROC_LOCK(p);
 repeat:
 	if (numthreads < p->p_numthreads) {
 		if (lwpidarray != NULL) {
@@ -1733,13 +1727,12 @@ repeat:
 			lwpidarray = NULL;
 		}
 		numthreads = p->p_numthreads;
-		PROC_SUNLOCK(p);
+		PROC_UNLOCK(p);
 		lwpidarray = malloc(sizeof(*lwpidarray) * numthreads, M_TEMP,
 		    M_WAITOK | M_ZERO);
-		PROC_SLOCK(p);
+		PROC_LOCK(p);
 		goto repeat;
 	}
-	PROC_SUNLOCK(p);
 	i = 0;
 
 	/*
@@ -1751,7 +1744,6 @@ repeat:
 	 * have changed, in which case the right to extract debug info might
 	 * no longer be assured.
 	 */
-	PROC_LOCK(p);
 	FOREACH_THREAD_IN_PROC(p, td) {
 		KASSERT(i < numthreads,
 		    ("sysctl_kern_proc_kstack: numthreads"));

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_resource.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_resource.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_resource.c	Tue May 19 05:17:41 2009	(r192371)
@@ -264,9 +264,7 @@ donice(struct thread *td, struct proc *p
 		n = PRIO_MIN;
  	if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
 		return (EACCES);
-	PROC_SLOCK(p);
 	sched_nice(p, n);
-	PROC_SUNLOCK(p);
 	return (0);
 }
 
@@ -307,7 +305,6 @@ rtprio_thread(struct thread *td, struct 
 	case RTP_LOOKUP:
 		if ((error = p_cansee(td, p)))
 			break;
-		PROC_SLOCK(p);
 		if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
 			td1 = td;
 		else
@@ -316,7 +313,6 @@ rtprio_thread(struct thread *td, struct 
 			pri_to_rtp(td1, &rtp);
 		else
 			error = ESRCH;
-		PROC_SUNLOCK(p);
 		PROC_UNLOCK(p);
 		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
 	case RTP_SET:
@@ -341,7 +337,6 @@ rtprio_thread(struct thread *td, struct 
 				break;
 		}
 
-		PROC_SLOCK(p);
 		if (uap->lwpid == 0 || uap->lwpid == td->td_tid)
 			td1 = td;
 		else
@@ -350,7 +345,6 @@ rtprio_thread(struct thread *td, struct 
 			error = rtp_to_pri(&rtp, td1);
 		else
 			error = ESRCH;
-		PROC_SUNLOCK(p);
 		break;
 	default:
 		error = EINVAL;
@@ -401,7 +395,6 @@ rtprio(td, uap)
 	case RTP_LOOKUP:
 		if ((error = p_cansee(td, p)))
 			break;
-		PROC_SLOCK(p);
 		/*
 		 * Return OUR priority if no pid specified,
 		 * or if one is, report the highest priority
@@ -429,7 +422,6 @@ rtprio(td, uap)
 				}
 			}
 		}
-		PROC_SUNLOCK(p);
 		PROC_UNLOCK(p);
 		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
 	case RTP_SET:
@@ -460,7 +452,6 @@ rtprio(td, uap)
 		 * do all the threads on that process. If we
 		 * specify our own pid we do the latter.
 		 */
-		PROC_SLOCK(p);
 		if (uap->pid == 0) {
 			error = rtp_to_pri(&rtp, td);
 		} else {
@@ -469,7 +460,6 @@ rtprio(td, uap)
 					break;
 			}
 		}
-		PROC_SUNLOCK(p);
 		break;
 	default:
 		error = EINVAL;
@@ -702,9 +692,7 @@ kern_setrlimit(td, which, limp)
 		if (limp->rlim_cur != RLIM_INFINITY &&
 		    p->p_cpulimit == RLIM_INFINITY)
 			callout_reset(&p->p_limco, hz, lim_cb, p);
-		PROC_SLOCK(p);
 		p->p_cpulimit = limp->rlim_cur;
-		PROC_SUNLOCK(p);
 		break;
 	case RLIMIT_DATA:
 		if (limp->rlim_cur > maxdsiz)
@@ -960,11 +948,12 @@ kern_getrusage(td, who, rup)
 	struct rusage *rup;
 {
 	struct proc *p;
+	int error;
 
+	error = 0;
 	p = td->td_proc;
 	PROC_LOCK(p);
 	switch (who) {
-
 	case RUSAGE_SELF:
 		rufetchcalc(p, rup, &rup->ru_utime,
 		    &rup->ru_stime);
@@ -976,11 +965,10 @@ kern_getrusage(td, who, rup)
 		break;
 
 	default:
-		PROC_UNLOCK(p);
-		return (EINVAL);
+		error = EINVAL;
 	}
 	PROC_UNLOCK(p);
-	return (0);
+	return (error);
 }
 
 void

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_sig.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_sig.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_sig.c	Tue May 19 05:17:41 2009	(r192371)
@@ -512,10 +512,8 @@ sigqueue_delete_set_proc(struct proc *p,
 	sigqueue_init(&worklist, NULL);
 	sigqueue_move_set(&p->p_sigqueue, &worklist, set);
 
-	PROC_SLOCK(p);
 	FOREACH_THREAD_IN_PROC(p, td0)
 		sigqueue_move_set(&td0->td_sigqueue, &worklist, set);
-	PROC_SUNLOCK(p);
 
 	sigqueue_flush(&worklist);
 }
@@ -1958,7 +1956,6 @@ sigtd(struct proc *p, int sig, int prop)
 	if (curproc == p && !SIGISMEMBER(curthread->td_sigmask, sig))
 		return (curthread);
 	signal_td = NULL;
-	PROC_SLOCK(p);
 	FOREACH_THREAD_IN_PROC(p, td) {
 		if (!SIGISMEMBER(td->td_sigmask, sig)) {
 			signal_td = td;
@@ -1967,7 +1964,6 @@ sigtd(struct proc *p, int sig, int prop)
 	}
 	if (signal_td == NULL)
 		signal_td = FIRST_THREAD_IN_PROC(p);
-	PROC_SUNLOCK(p);
 	return (signal_td);
 }
 
@@ -2020,27 +2016,6 @@ psignal_event(struct proc *p, struct sig
 int
 tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
 {
-#ifdef KSE
-	sigset_t saved;
-	int ret;
-
-	if (p->p_flag & P_SA)
-		saved = p->p_sigqueue.sq_signals;
-	ret = do_tdsignal(p, td, sig, ksi);
-	if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
-		if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) {
-			/* pending set changed */
-			p->p_flag |= P_SIGEVENT;
-			wakeup(&p->p_siglist);
-		}
-	}
-	return (ret);
-}
-
-static int
-do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
-{
-#endif
 	sig_t action;
 	sigqueue_t *sigqueue;
 	int prop;
@@ -2173,7 +2148,6 @@ do_tdsignal(struct proc *p, struct threa
 	 * waking up threads so that they can cross the user boundary.
 	 * We try do the per-process part here.
 	 */
-	PROC_SLOCK(p);
 	if (P_SHOULDSTOP(p)) {
 		/*
 		 * The process is in stopped mode. All the threads should be
@@ -2185,7 +2159,6 @@ do_tdsignal(struct proc *p, struct threa
 			 * so no further action is necessary.
 			 * No signal can restart us.
 			 */
-			PROC_SUNLOCK(p);
 			goto out;
 		}
 
@@ -2211,6 +2184,7 @@ do_tdsignal(struct proc *p, struct threa
 			 * Otherwise, process goes back to sleep state.
 			 */
 			p->p_flag &= ~P_STOPPED_SIG;
+			PROC_SLOCK(p);
 			if (p->p_numthreads == p->p_suspcount) {
 				PROC_SUNLOCK(p);
 				p->p_flag |= P_CONTINUED;
@@ -2227,22 +2201,7 @@ do_tdsignal(struct proc *p, struct threa
 				goto out;
 			}
 			if (action == SIG_CATCH) {
-#ifdef KSE
-				/*
-				 * The process wants to catch it so it needs
-				 * to run at least one thread, but which one?
-				 * It would seem that the answer would be to
-				 * run an upcall in the next KSE to run, and
-				 * deliver the signal that way. In a NON KSE
-				 * process, we need to make sure that the
-				 * single thread is runnable asap.
-				 * XXXKSE for now however, make them all run.
-				 */
-#endif
-				/*
-				 * The process wants to catch it so it needs
-				 * to run at least one thread, but which one?
-				 */
+				PROC_SUNLOCK(p);
 				goto runfast;
 			}
 			/*
@@ -2259,7 +2218,6 @@ do_tdsignal(struct proc *p, struct threa
 			 * (If we did the shell could get confused).
 			 * Just make sure the signal STOP bit set.
 			 */
-			PROC_SUNLOCK(p);
 			p->p_flag |= P_STOPPED_SIG;
 			sigqueue_delete(sigqueue, sig);
 			goto out;
@@ -2274,6 +2232,7 @@ do_tdsignal(struct proc *p, struct threa
 		 * It may run a bit until it hits a thread_suspend_check().
 		 */
 		wakeup_swapper = 0;
+		PROC_SLOCK(p);
 		thread_lock(td);
 		if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
 			wakeup_swapper = sleepq_abort(td, intrval);
@@ -2288,22 +2247,18 @@ do_tdsignal(struct proc *p, struct threa
 		 */
 	} else if (p->p_state == PRS_NORMAL) {
 		if (p->p_flag & P_TRACED || action == SIG_CATCH) {
-			thread_lock(td);
 			tdsigwakeup(td, sig, action, intrval);
-			thread_unlock(td);
-			PROC_SUNLOCK(p);
 			goto out;
 		}
 
 		MPASS(action == SIG_DFL);
 
 		if (prop & SA_STOP) {
-			if (p->p_flag & P_PPWAIT) {
-				PROC_SUNLOCK(p);
+			if (p->p_flag & P_PPWAIT)
 				goto out;
-			}
 			p->p_flag |= P_STOPPED_SIG;
 			p->p_xstat = sig;
+			PROC_SLOCK(p);
 			sig_suspend_threads(td, p, 1);
 			if (p->p_numthreads == p->p_suspcount) {
 				/*
@@ -2319,13 +2274,9 @@ do_tdsignal(struct proc *p, struct threa
 			} else
 				PROC_SUNLOCK(p);
 			goto out;
-		} 
-		else
-			goto runfast;
-		/* NOTREACHED */
+		}
 	} else {
 		/* Not in "NORMAL" state. discard the signal. */
-		PROC_SUNLOCK(p);
 		sigqueue_delete(sigqueue, sig);
 		goto out;
 	}
@@ -2334,11 +2285,9 @@ do_tdsignal(struct proc *p, struct threa
 	 * The process is not stopped so we need to apply the signal to all the
 	 * running threads.
 	 */
-
 runfast:
-	thread_lock(td);
 	tdsigwakeup(td, sig, action, intrval);
-	thread_unlock(td);
+	PROC_SLOCK(p);
 	thread_unsuspend(p);
 	PROC_SUNLOCK(p);
 out:
@@ -2361,17 +2310,16 @@ tdsigwakeup(struct thread *td, int sig, 
 
 	wakeup_swapper = 0;
 	PROC_LOCK_ASSERT(p, MA_OWNED);
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
-	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	prop = sigprop(sig);
 
+	PROC_SLOCK(p);
+	thread_lock(td);
 	/*
 	 * Bring the priority of a thread up if we want it to get
 	 * killed in this lifetime.
 	 */
 	if (action == SIG_DFL && (prop & SA_KILL) && td->td_priority > PUSER)
 		sched_prio(td, PUSER);
-
 	if (TD_ON_SLEEPQ(td)) {
 		/*
 		 * If thread is sleeping uninterruptibly
@@ -2380,7 +2328,7 @@ tdsigwakeup(struct thread *td, int sig, 
 		 * trap() or syscall().
 		 */
 		if ((td->td_flags & TDF_SINTR) == 0)
-			return;
+			goto out;
 		/*
 		 * If SIGCONT is default (or ignored) and process is
 		 * asleep, we are finished; the process should not
@@ -2395,8 +2343,6 @@ tdsigwakeup(struct thread *td, int sig, 
 			 * Remove from both for now.
 			 */
 			sigqueue_delete(&td->td_sigqueue, sig);
-			PROC_SLOCK(p);
-			thread_lock(td);
 			return;
 		}
 
@@ -2418,8 +2364,9 @@ tdsigwakeup(struct thread *td, int sig, 
 			forward_signal(td);
 #endif
 	}
-	if (wakeup_swapper)
-		kick_proc0();
+out:
+	PROC_SUNLOCK(p);
+	thread_unlock(td);
 }
 
 static void

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_thr.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_thr.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_thr.c	Tue May 19 05:17:41 2009	(r192371)
@@ -230,13 +230,11 @@ create_thread(struct thread *td, mcontex
 	PROC_LOCK(td->td_proc);
 	td->td_proc->p_flag |= P_HADTHREADS;
 	newtd->td_sigmask = td->td_sigmask;
-	PROC_SLOCK(p);
 	thread_link(newtd, p); 
 	thread_lock(td);
 	/* let the scheduler know about these things. */
 	sched_fork_thread(td, newtd);
 	thread_unlock(td);
-	PROC_SUNLOCK(p);
 	PROC_UNLOCK(p);
 	thread_lock(newtd);
 	if (rtp != NULL) {

Modified: user/kmacy/releng_7_2_fcs/sys/kern/kern_thread.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/kern_thread.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/kern_thread.c	Tue May 19 05:17:41 2009	(r192371)
@@ -415,7 +415,6 @@ thread_exit(void)
 #ifdef AUDIT
 	AUDIT_SYSCALL_EXIT(0, td);
 #endif
-
 #ifdef KSE
 	if (td->td_standin != NULL) {
 		/*
@@ -428,7 +427,6 @@ thread_exit(void)
 #endif
 
 	umtx_thread_exit(td);
-
 	/*
 	 * drop FPU & debug register state storage, or any other
 	 * architecture specific resources that
@@ -455,13 +453,7 @@ thread_exit(void)
 	 */
 	if (p->p_flag & P_HADTHREADS) {
 		if (p->p_numthreads > 1) {
-			thread_lock(td);
-#ifdef KSE
-			kse_unlink(td);
-#else
 			thread_unlink(td);
-#endif
-			thread_unlock(td);
 			td2 = FIRST_THREAD_IN_PROC(p);
 			sched_exit_thread(td2, td);
 
@@ -568,8 +560,8 @@ thread_link(struct thread *td, struct pr
 
 	/*
 	 * XXX This can't be enabled because it's called for proc0 before
-	 * it's spinlock has been created.
-	 * PROC_SLOCK_ASSERT(p, MA_OWNED);
+	 * its lock has been created.
+	 * PROC_LOCK_ASSERT(p, MA_OWNED);
 	 */
 	td->td_state    = TDS_INACTIVE;
 	td->td_proc     = p;
@@ -619,7 +611,7 @@ thread_unlink(struct thread *td)
 {
 	struct proc *p = td->td_proc;
 
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 	TAILQ_REMOVE(&p->p_threads, td, td_plist);
 	p->p_numthreads--;
 	/* could clear a few other things here */
@@ -1020,11 +1012,9 @@ thread_find(struct proc *p, lwpid_t tid)
 	struct thread *td;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
-	PROC_SLOCK(p);
 	FOREACH_THREAD_IN_PROC(p, td) {
 		if (td->td_tid == tid)
 			break;
 	}
-	PROC_SUNLOCK(p);
 	return (td);
 }

Modified: user/kmacy/releng_7_2_fcs/sys/kern/sched_4bsd.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/sched_4bsd.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/sched_4bsd.c	Tue May 19 05:17:41 2009	(r192371)
@@ -368,7 +368,7 @@ schedcpu(void)
 	realstathz = stathz ? stathz : hz;
 	sx_slock(&allproc_lock);
 	FOREACH_PROC_IN_SYSTEM(p) {
-		PROC_SLOCK(p);
+		PROC_LOCK(p);
 		FOREACH_THREAD_IN_PROC(p, td) {
 			awake = 0;
 			thread_lock(td);
@@ -448,7 +448,7 @@ schedcpu(void)
 			resetpriority_thread(td);
 			thread_unlock(td);
 		}
-		PROC_SUNLOCK(p);
+		PROC_UNLOCK(p);
 	}
 	sx_sunlock(&allproc_lock);
 }
@@ -629,7 +629,7 @@ sched_exit(struct proc *p, struct thread
 
 	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
 	    td, td->td_proc->p_comm, td->td_priority);
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
 }
 
@@ -670,7 +670,6 @@ sched_nice(struct proc *p, int nice)
 	struct thread *td;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
 	p->p_nice = nice;
 	FOREACH_THREAD_IN_PROC(p, td) {
 		thread_lock(td);

Modified: user/kmacy/releng_7_2_fcs/sys/kern/sched_ule.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/sched_ule.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/sched_ule.c	Tue May 19 05:17:41 2009	(r192371)
@@ -1971,7 +1971,6 @@ sched_nice(struct proc *p, int nice)
 	struct thread *td;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
 
 	p->p_nice = nice;
 	FOREACH_THREAD_IN_PROC(p, td) {
@@ -2132,7 +2131,7 @@ sched_exit(struct proc *p, struct thread
 	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
 	    child, child->td_proc->p_comm, child->td_priority);
 
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 	td = FIRST_THREAD_IN_PROC(p);
 	sched_exit_thread(td, child);
 }

Modified: user/kmacy/releng_7_2_fcs/sys/kern/sys_generic.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/sys_generic.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/sys_generic.c	Tue May 19 05:17:41 2009	(r192371)
@@ -585,14 +585,13 @@ ioctl(struct thread *td, struct ioctl_ar
 		return (ENOTTY);
 
 	if (size > 0) {
-		if (!(com & IOC_VOID))
-			data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
-		else {
+		if (com & IOC_VOID) {
 			/* Integer argument. */
 			arg = (intptr_t)uap->data;
 			data = (void *)&arg;
 			size = 0;
-		}
+		} else
+			data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
 	} else
 		data = (void *)&uap->data;
 	if (com & IOC_IN) {
@@ -957,21 +956,8 @@ poll(td, uap)
 	size_t ni;
 
 	nfds = uap->nfds;
-
-	/*
-	 * This is kinda bogus.  We have fd limits, but that is not
-	 * really related to the size of the pollfd array.  Make sure
-	 * we let the process use at least FD_SETSIZE entries and at
-	 * least enough for the current limits.  We want to be reasonably
-	 * safe, but not overly restrictive.
-	 */
-	PROC_LOCK(td->td_proc);
-	if ((nfds > lim_cur(td->td_proc, RLIMIT_NOFILE)) &&
-	    (nfds > FD_SETSIZE)) {
-		PROC_UNLOCK(td->td_proc);
+	if (nfds > maxfilesperproc && nfds > FD_SETSIZE) 
 		return (EINVAL);
-	}
-	PROC_UNLOCK(td->td_proc);
 	ni = nfds * sizeof(struct pollfd);
 	if (ni > sizeof(smallbits))
 		bits = malloc(ni, M_TEMP, M_WAITOK);

Modified: user/kmacy/releng_7_2_fcs/sys/kern/sys_process.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/sys_process.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/sys_process.c	Tue May 19 05:17:41 2009	(r192371)
@@ -528,12 +528,10 @@ kern_ptrace(struct thread *td, int req, 
 			sx_slock(&allproc_lock);
 			FOREACH_PROC_IN_SYSTEM(p) {
 				PROC_LOCK(p);
-				PROC_SLOCK(p);
 				FOREACH_THREAD_IN_PROC(p, td2) {
 					if (td2->td_tid == pid)
 						break;
 				}
-				PROC_SUNLOCK(p);
 				if (td2 != NULL)
 					break; /* proc lock held */
 				PROC_UNLOCK(p);
@@ -789,7 +787,6 @@ kern_ptrace(struct thread *td, int req, 
 			thread_unlock(td2);
 			td2->td_xsig = data;
 
-			PROC_SLOCK(p);
 			if (req == PT_DETACH) {
 				struct thread *td3;
 				FOREACH_THREAD_IN_PROC(p, td3) {
@@ -803,11 +800,7 @@ kern_ptrace(struct thread *td, int req, 
 			 * you should use PT_SUSPEND to suspend it before
 			 * continuing process.
 			 */
-#ifdef KSE
-			PROC_SUNLOCK(p);
-			thread_continued(p);
 			PROC_SLOCK(p);
-#endif
 			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
 			thread_unsuspend(p);
 			PROC_SUNLOCK(p);
@@ -972,13 +965,11 @@ kern_ptrace(struct thread *td, int req, 
 		buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
 		tmp = 0;
 		PROC_LOCK(p);
-		PROC_SLOCK(p);
 		FOREACH_THREAD_IN_PROC(p, td2) {
 			if (tmp >= num)
 				break;
 			buf[tmp++] = td2->td_tid;
 		}
-		PROC_SUNLOCK(p);
 		PROC_UNLOCK(p);
 		error = copyout(buf, addr, tmp * sizeof(lwpid_t));
 		free(buf, M_TEMP);

Modified: user/kmacy/releng_7_2_fcs/sys/kern/tty.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/kern/tty.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/kern/tty.c	Tue May 19 05:17:41 2009	(r192371)
@@ -2582,7 +2582,7 @@ ttyinfo(struct tty *tp)
 		if (proc_compare(pick, p))
 			pick = p;
 
-	PROC_SLOCK(pick);
+	PROC_LOCK(pick);
 	picktd = NULL;
 	td = FIRST_THREAD_IN_PROC(pick);
 	FOREACH_THREAD_IN_PROC(pick, td)
@@ -2618,7 +2618,7 @@ ttyinfo(struct tty *tp)
 		rss = 0;
 	else
 		rss = pgtok(vmspace_resident_count(pick->p_vmspace));
-	PROC_SUNLOCK(pick);
+	PROC_UNLOCK(pick);
 	PROC_LOCK(pick);
 	PGRP_UNLOCK(tp->t_pgrp);
 	rufetchcalc(pick, &ru, &utime, &stime);
@@ -2747,12 +2747,12 @@ proc_compare(struct proc *p1, struct pro
 	 * Fetch various stats about these processes.  After we drop the
 	 * lock the information could be stale but the race is unimportant.
 	 */
-	PROC_SLOCK(p1);
+	PROC_LOCK(p1);
 	runa = proc_sum(p1, &esta);
-	PROC_SUNLOCK(p1);
-	PROC_SLOCK(p2);
+	PROC_UNLOCK(p1);
+	PROC_LOCK(p2);
 	runb = proc_sum(p2, &estb);
-	PROC_SUNLOCK(p2);
+	PROC_UNLOCK(p2);
 	
 	/*
 	 * see if at least one of them is runnable

Modified: user/kmacy/releng_7_2_fcs/sys/sys/proc.h
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/sys/proc.h	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/sys/proc.h	Tue May 19 05:17:41 2009	(r192371)
@@ -493,7 +493,7 @@ struct rusage_ext {
  */
 struct proc {
 	LIST_ENTRY(proc) p_list;	/* (d) List of all processes. */
-	TAILQ_HEAD(, thread) p_threads;	/* (j) all threads. */
+	TAILQ_HEAD(, thread) p_threads;	/* (c) all threads. */
 	TAILQ_HEAD(, kse_upcall) p_upcalls; /* (j) All upcalls in the proc. */
 	struct mtx	p_slock;	/* process spin lock */
 	struct ucred	*p_ucred;	/* (c) Process owner's identity. */
@@ -530,7 +530,7 @@ struct proc {
 #define	p_startzero	p_oppid
 	pid_t		p_oppid;	/* (c + e) Save ppid in ptrace. XXX */
 	struct vmspace	*p_vmspace;	/* (b) Address space. */
-	u_int		p_swtick;	/* (j) Tick when swapped in or out. */
+	u_int		p_swtick;	/* (c) Tick when swapped in or out. */
 	struct itimerval p_realtimer;	/* (c) Alarm timer. */
 	struct rusage	p_ru;		/* (a) Exit information. */
 	struct rusage_ext p_rux;	/* (cj) Internal resource usage. */
@@ -576,14 +576,14 @@ struct proc {
 	struct sysentvec *p_sysent;	/* (b) Syscall dispatch info. */
 	struct pargs	*p_args;	/* (c) Process arguments. */
 	rlim_t		p_cpulimit;	/* (c) Current CPU limit in seconds. */
-	signed char	p_nice;		/* (c + j) Process "nice" value. */
+	signed char	p_nice;		/* (c) Process "nice" value. */
 	int		p_fibnum;	/* in this routing domain XXX MRT */
 /* End area that is copied on creation. */
 #define	p_endcopy	p_xstat
 
 	u_short		p_xstat;	/* (c) Exit status; also stop sig. */
 	struct knlist	p_klist;	/* (c) Knotes attached to this proc. */
-	int		p_numthreads;	/* (j) Number of threads. */
+	int		p_numthreads;	/* (c) Number of threads. */
 	struct mdproc	p_md;		/* Any machine-dependent fields. */
 	struct callout	p_itcallout;	/* (h + c) Interval timer callout. */
 	u_short		p_acflag;	/* (c) Accounting flags. */

Modified: user/kmacy/releng_7_2_fcs/sys/ufs/ffs/ffs_snapshot.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/ufs/ffs/ffs_snapshot.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/ufs/ffs/ffs_snapshot.c	Tue May 19 05:17:41 2009	(r192371)
@@ -397,10 +397,8 @@ restart:
 
 		p = td->td_proc;
 		PROC_LOCK(p);
-		PROC_SLOCK(p);
 		saved_nice = p->p_nice;
 		sched_nice(p, 0);
-		PROC_SUNLOCK(p);
 		PROC_UNLOCK(p);
 	}
 	/*
@@ -820,9 +818,7 @@ out:
 
 		p = td->td_proc;
 		PROC_LOCK(p);
-		PROC_SLOCK(p);
 		sched_nice(td->td_proc, saved_nice);
-		PROC_SUNLOCK(p);
 		PROC_UNLOCK(td->td_proc);
 	}
 	UFS_LOCK(ump);

Modified: user/kmacy/releng_7_2_fcs/sys/vm/vm_glue.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/vm/vm_glue.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/vm/vm_glue.c	Tue May 19 05:17:41 2009	(r192371)
@@ -334,6 +334,7 @@ vm_thread_new(struct thread *td, int pag
 	 * Allocate an object for the kstack.
 	 */
 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
+	
 	/*
 	 * Get a kernel virtual address for this thread's kstack.
 	 */
@@ -641,10 +642,8 @@ faultin(p)
 		FOREACH_THREAD_IN_PROC(p, td)
 			vm_thread_swapin(td);
 		PROC_LOCK(p);
-		PROC_SLOCK(p);
 		swapclear(p);
 		p->p_swtick = ticks;
-		PROC_SUNLOCK(p);
 
 		wakeup(&p->p_flag);
 
@@ -695,7 +694,6 @@ loop:
 			continue;
 		}
 		swtime = (ticks - p->p_swtick) / hz;
-		PROC_SLOCK(p);
 		FOREACH_THREAD_IN_PROC(p, td) {
 			/*
 			 * An otherwise runnable thread of a process
@@ -721,7 +719,6 @@ loop:
 			}
 			thread_unlock(td);
 		}
-		PROC_SUNLOCK(p);
 		PROC_UNLOCK(p);
 	}
 	sx_sunlock(&allproc_lock);
@@ -838,7 +835,7 @@ retry:
 		if (p->p_lock != 0 ||
 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
 		    ) != 0) {
-			goto nextproc2;
+			goto nextproc;
 		}
 		/*
 		 * only aiod changes vmspace, however it will be
@@ -846,7 +843,7 @@ retry:
 		 * for P_SYSTEM
 		 */
 		if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
-			goto nextproc2;
+			goto nextproc;
 
 		switch (p->p_state) {
 		default:
@@ -855,7 +852,6 @@ retry:
 			break;
 
 		case PRS_NORMAL:
-			PROC_SLOCK(p);
 			/*
 			 * do not swapout a realtime process
 			 * Check all the thread groups..
@@ -917,17 +913,14 @@ retry:
 				 (minslptime > swap_idle_threshold2))) {
 				if (swapout(p) == 0)
 					didswap++;
-				PROC_SUNLOCK(p);
 				PROC_UNLOCK(p);
 				vm_map_unlock(&vm->vm_map);
 				vmspace_free(vm);
 				sx_sunlock(&allproc_lock);
 				goto retry;
 			}
-nextproc:			
-			PROC_SUNLOCK(p);
 		}
-nextproc2:
+nextproc:
 		PROC_UNLOCK(p);
 		vm_map_unlock(&vm->vm_map);
 nextproc1:
@@ -950,7 +943,6 @@ swapclear(p)
 	struct thread *td;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
 
 	FOREACH_THREAD_IN_PROC(p, td) {
 		thread_lock(td);
@@ -981,7 +973,6 @@ swapout(p)
 	struct thread *td;
 
 	PROC_LOCK_ASSERT(p, MA_OWNED);
-	PROC_SLOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED);
 #if defined(SWAP_DEBUG)
 	printf("swapping out %d\n", p->p_pid);
 #endif
@@ -1016,7 +1007,6 @@ swapout(p)
 	}
 	td = FIRST_THREAD_IN_PROC(p);
 	++td->td_ru.ru_nswap;
-	PROC_SUNLOCK(p);
 	PROC_UNLOCK(p);
 
 	/*
@@ -1029,7 +1019,6 @@ swapout(p)
 
 	PROC_LOCK(p);
 	p->p_flag &= ~P_SWAPPINGOUT;
-	PROC_SLOCK(p);
 	p->p_swtick = ticks;
 	return (0);
 }

Modified: user/kmacy/releng_7_2_fcs/sys/vm/vm_meter.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/vm/vm_meter.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/vm/vm_meter.c	Tue May 19 05:17:41 2009	(r192371)
@@ -130,13 +130,16 @@ vmtotal(SYSCTL_HANDLER_ARGS)
 	FOREACH_PROC_IN_SYSTEM(p) {
 		if (p->p_flag & P_SYSTEM)
 			continue;
+		PROC_LOCK(p);
 		PROC_SLOCK(p);
 		switch (p->p_state) {
 		case PRS_NEW:
 			PROC_SUNLOCK(p);
+			PROC_UNLOCK(p);
 			continue;
 			break;
 		default:
+			PROC_SUNLOCK(p);
 			FOREACH_THREAD_IN_PROC(p, td) {
 				thread_lock(td);
 				switch (td->td_state) {
@@ -164,7 +167,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
 				thread_unlock(td);
 			}
 		}
-		PROC_SUNLOCK(p);
+		PROC_UNLOCK(p);
 		/*
 		 * Note active objects.
 		 */

Modified: user/kmacy/releng_7_2_fcs/sys/vm/vm_pageout.c
==============================================================================
--- user/kmacy/releng_7_2_fcs/sys/vm/vm_pageout.c	Tue May 19 04:47:30 2009	(r192370)
+++ user/kmacy/releng_7_2_fcs/sys/vm/vm_pageout.c	Tue May 19 05:17:41 2009	(r192371)
@@ -1205,7 +1205,6 @@ unlock_and_continue:
 			 * If the process is in a non-running type state,
 			 * don't touch it.  Check all the threads individually.
 			 */
-			PROC_SLOCK(p);
 			breakout = 0;
 			FOREACH_THREAD_IN_PROC(p, td) {
 				thread_lock(td);
@@ -1218,7 +1217,6 @@ unlock_and_continue:
 				}
 				thread_unlock(td);
 			}
-			PROC_SUNLOCK(p);
 			if (breakout) {
 				PROC_UNLOCK(p);
 				continue;
@@ -1248,9 +1246,7 @@ unlock_and_continue:
 		sx_sunlock(&allproc_lock);
 		if (bigproc != NULL) {
 			killproc(bigproc, "out of swap space");
-			PROC_SLOCK(bigproc);
 			sched_nice(bigproc, PRIO_MIN);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200905190517.n4J5HgQD085995>