Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 21 Jul 2004 20:30:32 GMT
From:      Julian Elischer <julian@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 57869 for review
Message-ID:  <200407212030.i6LKUWrH070264@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=57869

Change 57869 by julian@julian_desk on 2004/07/21 20:29:40

	Learn to spell "concurrency"

Affected files ...

.. //depot/projects/nsched/sys/kern/kern_exec.c#8 edit
.. //depot/projects/nsched/sys/kern/kern_exit.c#15 edit
.. //depot/projects/nsched/sys/kern/kern_kse.c#19 edit
.. //depot/projects/nsched/sys/kern/kern_thr.c#11 edit
.. //depot/projects/nsched/sys/kern/kern_thread.c#29 edit
.. //depot/projects/nsched/sys/kern/sched_4bsd.c#28 edit
.. //depot/projects/nsched/sys/kern/sched_ule.c#16 edit

Differences ...

==== //depot/projects/nsched/sys/kern/kern_exec.c#8 (text+ko) ====

@@ -268,7 +268,7 @@
 		 * upcalls and kses we may have picked up along the way.
 		 */
 		mtx_lock_spin(&sched_lock);
-		sched_set_concurrancy(td->td_ksegrp, 1);
+		sched_set_concurrency(td->td_ksegrp, 1);
 		upcall_remove(td);
 		mtx_unlock_spin(&sched_lock);
 		p->p_flag &= ~(P_SA|P_HADTHREADS);

==== //depot/projects/nsched/sys/kern/kern_exit.c#15 (text+ko) ====

@@ -168,7 +168,7 @@
 		 * Turn off threading support.
 		 */
 		mtx_lock_spin(&sched_lock);
-		sched_set_concurrancy(td->td_ksegrp, 1);
+		sched_set_concurrency(td->td_ksegrp, 1);
 		upcall_remove(td);
 		mtx_unlock_spin(&sched_lock);
 		p->p_flag &= ~(P_SA|P_HADTHREADS);

==== //depot/projects/nsched/sys/kern/kern_kse.c#19 (text+ko) ====

@@ -313,7 +313,7 @@
 	 * would have been discarded in previous calls to thread_exit().
 	 * Effectively we have left threading mode..
 	 * The only real thing left to do is ensure that the 
-	 * scheduler sets out concurrancy back to 1 as that may be a 
+	 * scheduler sets out concurrency back to 1 as that may be a 
 	 * resource leak otherwise.
 	 * This is an A[PB]I issue.. what SHOULD we do?
 	 * One possibility is to return to the user. It may not cope well.
@@ -321,7 +321,7 @@
 	 */
 	p->p_flag &= ~P_SA;
 	mtx_unlock_spin(&sched_lock);
-	sched_set_concurrancy(td->td_ksegrp, 1);
+	sched_set_concurrency(td->td_ksegrp, 1);
 	PROC_UNLOCK(p);
 #if 1
 	return (0);
@@ -552,7 +552,7 @@
 		/*
 		 * Initialize KSE group
 		 *
-		 * For multiplxed group, set concurrancy equal to physical
+		 * For multiplxed group, set concurrency equal to physical
 		 * cpus. This increases concurrent even if userland
 		 * is not MP safe and can only run on single CPU.
 		 * In ideal world, every physical cpu should execute a thread.
@@ -567,7 +567,7 @@
 		 * kind of group will never schedule an upcall when blocked,
 		 * this intends to simulate pthread system scope thread.
 		 */
-		sched_set_concurrancy(newkg, ncpus);
+		sched_set_concurrency(newkg, ncpus);
 	}
 	newku = upcall_alloc();
 	newku->ku_mailbox = uap->mbx;

==== //depot/projects/nsched/sys/kern/kern_thr.c#11 (text+ko) ====

@@ -151,7 +151,7 @@
 		ncpus = mp_ncpus;
 		if (virtual_cpu != 0)
 			ncpus = virtual_cpu;
-		sched_set_concurrancy(td->td_ksegrp, ncpus);
+		sched_set_concurrency(td->td_ksegrp, ncpus);
 		td->td_proc->p_flag |= P_HADTHREADS;
 	}
 	PROC_UNLOCK(td->td_proc);

==== //depot/projects/nsched/sys/kern/kern_thread.c#29 (text+ko) ====

@@ -603,7 +603,7 @@
 			td->td_ksegrp	= NULL;
 			PCPU_SET(deadthread, td);
 		} else {
-			sched_set_concurrancy(kg, 1);
+			sched_set_concurrency(kg, 1);
 		}
 	}
 	td->td_state	= TDS_INACTIVE;

==== //depot/projects/nsched/sys/kern/sched_4bsd.c#28 (text+ko) ====

@@ -116,12 +116,12 @@
 					   /* to the system scheduler */
 	int		skg_runq_threads;	/* (j) Num KSEs on runq. */
 	int		skg_avail_opennings;	/* (j) Num KSEs on iq. */
-	int		skg_concurrancy; /* (j) desired concurrancy */
+	int		skg_concurrency; /* (j) desired concurrency */
 }; 
 #define	kg_last_assigned 	kg_sched->skg_last_assigned
 #define	kg_runq_threads		kg_sched->skg_runq_threads
 #define	kg_avail_opennings	kg_sched->skg_avail_opennings
-#define	kg_concurrancy		kg_sched->skg_concurrancy
+#define	kg_concurrency		kg_sched->skg_concurrency
 
 
 /****************************************************************
@@ -813,7 +813,7 @@
 	newstd->std_state = STDS_THREAD;
 	newstd->std_cpticks = 0;
 
-	newtd->td_ksegrp->kg_concurrancy = 1;
+	newtd->td_ksegrp->kg_concurrency = 1;
 	/* non threaded process. ignore thread fairness stuff */
 	newtd->td_ksegrp->kg_avail_opennings = 1; 
 	/* Our child inherrits our estimated cpu requirement */
@@ -829,7 +829,7 @@
 void
 sched_fork_ksegrp(struct thread *td, struct ksegrp *newkg)
 {
-	newkg->kg_concurrancy = 1;
+	newkg->kg_concurrency = 1;
 	newkg->kg_avail_opennings = 1;
 	newkg->kg_estcpu = td->td_ksegrp->kg_estcpu;
 }
@@ -1215,7 +1215,7 @@
 	 */
 	td_sched0.std_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */
 	td_sched0.std_state = STDS_THREAD;
-	kg_sched0.skg_concurrancy = 1;
+	kg_sched0.skg_concurrency = 1;
 	kg_sched0.skg_avail_opennings = 0; /* we are already running */
 
 }
@@ -1283,7 +1283,7 @@
 
 	kg->kg_runq_threads  = 0; /* XXXKSE change name */
 	kg->kg_avail_opennings  = 1;
-	kg->kg_concurrancy = 1;
+	kg->kg_concurrency = 1;
 }
 
 /*
@@ -1316,13 +1316,13 @@
 
 	kg = FIRST_KSEGRP_IN_PROC(p);
 	
-	KASSERT((kg->kg_concurrancy == 1), ("Cached proc with > 1 opennings "));
+	KASSERT((kg->kg_concurrency == 1), ("Cached proc with > 1 opennings "));
 
 }
 
 /*
  * (Re) assign resources to allow the ksegrp to implement
- * teh requested concurrancy. At this time it means allocating
+ * teh requested concurrency. At this time it means allocating
  * or freeing KSE structures.
  * Called from:
  *  kern_execve()  (reverting to non threaded)
@@ -1333,15 +1333,15 @@
  *  kse_create() (increasing)
  */
 void
-sched_set_concurrancy(struct ksegrp *kg, int concurrancy)
+sched_set_concurrency(struct ksegrp *kg, int concurrency)
 {
 	struct kg_sched *skg;
 
 	skg = kg->kg_sched;
 	mtx_lock_spin(&sched_lock);
-	if (concurrancy > skg->skg_concurrancy) {
-		skg->skg_concurrancy = concurrancy;
-		skg->skg_avail_opennings += concurrancy - skg->skg_concurrancy;
+	if (concurrency > skg->skg_concurrency) {
+		skg->skg_concurrency = concurrency;
+		skg->skg_avail_opennings += concurrency - skg->skg_concurrency;
 		slot_fill(kg);
 	} else {
 		/*
@@ -1349,8 +1349,8 @@
 		 * they'll just get used up as they run.
 		 * XXXKSE Make this a true statement..
 		 */
-		skg->skg_concurrancy = concurrancy;
-		skg->skg_avail_opennings += concurrancy - skg->skg_concurrancy;
+		skg->skg_concurrency = concurrency;
+		skg->skg_avail_opennings += concurrency - skg->skg_concurrency;
 		if (skg->skg_avail_opennings < 0)
 			skg->skg_avail_opennings = 0;
 	}
@@ -1562,7 +1562,7 @@
  * Given a surplus system slot, try assign a new runnable thread to it.
  * Called from:
  *  sched_thread_exit()  (local)
- *  sched_set_concurrancy()  (local)
+ *  sched_set_concurrency()  (local)
  *  sched_switch()  (local)
  *  sched_thread_exit()  (local)
  *  remrunqueue()  (local) (commented out)
@@ -1697,7 +1697,7 @@
 
 /*
  * Decide whether to pass this newly runnable thread on to the 
- * system scheduler. This basically implements the concurrancy
+ * system scheduler. This basically implements the concurrency
  * limit for a KSE. (e.g. "threading fairness")
  * Called from:
  *  kick_init()

==== //depot/projects/nsched/sys/kern/sched_ule.c#16 (text+ko) ====

@@ -285,7 +285,7 @@
 	int	skg_runq_kses;		/* (j) Num KSEs on runq. */
 	int	skg_idle_kses;		/* (j) Num KSEs on iq. */
 	int	skg_kses;		/* (j) Num KSEs in group. */
-	int	skg_concurrancy;	/* (j) desired concurrancy */
+	int	skg_concurrency;	/* (j) desired concurrency */
 
 };
 #define	kg_slptime	kg_sched->skg_slptime
@@ -2195,7 +2195,7 @@
  *  sched_newproc()  (local)
  *  sched_thr_newthread()  (local)
  *  schedinit()  (local)
- *  sched_set_concurrancy()  (local)
+ *  sched_set_concurrency()  (local)
  *  
  */
 static void
@@ -2275,7 +2275,7 @@
  * Called from:
  *  sched_destroyproc()
  *  sched_thr_exit()
- *  sched_set_concurrancy() via REDUCE_KSES()
+ *  sched_set_concurrency() via REDUCE_KSES()
  *  kse_reassign() via REDUCE_KSES()
  */
 static void
@@ -2299,13 +2299,13 @@
 }
 
 /* 
- * Whenever we have idle KSEs and there are too many for the concurrancy,
+ * Whenever we have idle KSEs and there are too many for the concurrency,
  * then free as many as we can. Don't free too many if we have threads
  * to run/kill.
  */
 #define REDUCE_KSES(kg, skg) 					\
 do {								\
-	while ((skg->skg_concurrancy < skg->skg_kses) &&	\
+	while ((skg->skg_concurrency < skg->skg_kses) &&	\
     	    (skg->skg_idle_kses > 0) &&				\
 	    (skg->skg_kses > kg->kg_numthreads)) {			\
 		kse_unlink(TAILQ_FIRST(&skg->skg_iq));		\
@@ -2338,7 +2338,7 @@
 
 /*
  * (Re) assign resources to allow the ksegrp to implement
- * teh requested concurrancy. At this time it means allocating
+ * teh requested concurrency. At this time it means allocating
  * or freeing KSE structures.
  * Called from:
  *  kern_execve()  (reverting to non threaded)
@@ -2349,15 +2349,15 @@
  *  kse_create() (increasing)
  */
 void
-sched_set_concurrancy(struct ksegrp *kg, int concurrancy)
+sched_set_concurrency(struct ksegrp *kg, int concurrency)
 {
 	struct kse *newke;
 	struct kg_sched *skg;
 
 	skg = kg->kg_sched;
-	skg->skg_concurrancy = concurrancy;
+	skg->skg_concurrency = concurrency;
 	REDUCE_KSES(kg, skg);
-	while (skg->skg_kses < skg->skg_concurrancy) {
+	while (skg->skg_kses < skg->skg_concurrency) {
 		newke = kse_alloc();
 		bzero(&newke->ke_startzero, RANGEOF(struct kse,
 		      ke_startzero, ke_endzero));
@@ -2470,7 +2470,7 @@
  * Assumes that the original thread is not runnable.
  * Called from:
  *  sched_thread_exit()  (local)
- *  sched_set_concurrancy()  (local)
+ *  sched_set_concurrency()  (local)
  *  sched_switch()  (local)
  *  sched_thread_exit()  (local)
  *  remrunqueue()  (local) (commented out)
@@ -2618,7 +2618,7 @@
 
 /*
  * Decide whether to pass this newly runnable thread on to the 
- * system scheduler. This basically implements the concurrancy
+ * system scheduler. This basically implements the concurrency
  * limit for a KSE. (e.g. "threading fairness")
  * Called from:
  *  kick_init()



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200407212030.i6LKUWrH070264>