Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 17 Nov 2011 13:14:59 +0000 (UTC)
From:      "Jayachandran C." <jchandra@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r227623 - head/sys/mips/mips
Message-ID:  <201111171314.pAHDEx9n008822@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jchandra
Date: Thu Nov 17 13:14:59 2011
New Revision: 227623
URL: http://svn.freebsd.org/changeset/base/227623

Log:
  Do pmap update only on active CPUs.
  
  The pmap update_page/invalidate_page/invalidate_all operations has to be
  done only on active cpus. In the simplest case, if the process is not
  active on any other CPUs, we can just do the operation on the current CPU.
  
  This change replaces the call to smp_rendezvous() for these operations with
  smp_rendezvous_cpus() in case there more than one active CPU, or with a direct
  function call if there is just one active CPU.
  
  This change give significant performance increase in fork/exec benchmarks
  on XLR/XLS/XLP with 32 cpus.
  
  Reviewed by:	alc

Modified:
  head/sys/mips/mips/pmap.c

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c	Thu Nov 17 13:14:07 2011	(r227622)
+++ head/sys/mips/mips/pmap.c	Thu Nov 17 13:14:59 2011	(r227623)
@@ -181,7 +181,6 @@ static pt_entry_t init_pte_prot(vm_offse
 
 #ifdef SMP
 static void pmap_invalidate_page_action(void *arg);
-static void pmap_invalidate_all_action(void *arg);
 static void pmap_update_page_action(void *arg);
 #endif
 
@@ -622,119 +621,94 @@ pmap_init(void)
  * Low level helper routines.....
  ***************************************************/
 
+#ifdef	SMP
 static __inline void
-pmap_invalidate_all_local(pmap_t pmap)
+pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
 {
-	u_int cpuid;
+	int	cpuid, cpu, self;
+	cpuset_t active_cpus;
 
+	sched_pin();
+	if (is_kernel_pmap(pmap)) {
+		smp_rendezvous(NULL, fn, NULL, arg);
+		goto out;
+	}
+	/* Force ASID update on inactive CPUs */
+	CPU_FOREACH(cpu) {
+		if (!CPU_ISSET(cpu, &pmap->pm_active))
+			pmap->pm_asid[cpu].gen = 0;
+	}
 	cpuid = PCPU_GET(cpuid);
+	/* 
+	 * XXX: barrier/locking for active? 
+	 *
+	 * Take a snapshot of active here, any further changes are ignored.
+	 * tlb update/invalidate should be harmless on inactive CPUs
+	 */
+	active_cpus = pmap->pm_active;
+	self = CPU_ISSET(cpuid, &active_cpus);
+	CPU_CLR(cpuid, &active_cpus);
+	/* Optimize for the case where this cpu is the only active one */
+	if (CPU_EMPTY(&active_cpus)) {
+		if (self)
+			fn(arg);
+	} else {
+		if (self)
+			CPU_SET(cpuid, &active_cpus);
+		smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg);
+	}
+out:
+	sched_unpin();
+}
+#else /* !SMP */
+static __inline void
+pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
+{
+	int	cpuid;
 
-	if (pmap == kernel_pmap) {
-		tlb_invalidate_all();
+	if (is_kernel_pmap(pmap)) {
+		fn(arg);
 		return;
 	}
-	if (CPU_ISSET(cpuid, &pmap->pm_active))
-		tlb_invalidate_all_user(pmap);
-	else
+	cpuid = PCPU_GET(cpuid);
+	if (!CPU_ISSET(cpuid, &pmap->pm_active))
 		pmap->pm_asid[cpuid].gen = 0;
+	else
+		fn(arg);
 }
+#endif /* SMP */
 
-#ifdef SMP
 static void
 pmap_invalidate_all(pmap_t pmap)
 {
 
-	smp_rendezvous(0, pmap_invalidate_all_action, 0, pmap);
+	pmap_call_on_active_cpus(pmap,
+	    (void (*)(void *))tlb_invalidate_all_user, pmap);
 }
 
-static void
-pmap_invalidate_all_action(void *arg)
-{
-
-	pmap_invalidate_all_local((pmap_t)arg);
-}
-#else
-static void
-pmap_invalidate_all(pmap_t pmap)
-{
-
-	pmap_invalidate_all_local(pmap);
-}
-#endif
-
-static __inline void
-pmap_invalidate_page_local(pmap_t pmap, vm_offset_t va)
-{
-	u_int cpuid;
-
-	cpuid = PCPU_GET(cpuid);
-
-	if (is_kernel_pmap(pmap)) {
-		tlb_invalidate_address(pmap, va);
-		return;
-	}
-	if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
-		return;
-	else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
-		pmap->pm_asid[cpuid].gen = 0;
-		return;
-	}
-	tlb_invalidate_address(pmap, va);
-}
-
-#ifdef SMP
 struct pmap_invalidate_page_arg {
 	pmap_t pmap;
 	vm_offset_t va;
 };
 
 static void
-pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
-{
-	struct pmap_invalidate_page_arg arg;
-
-	arg.pmap = pmap;
-	arg.va = va;
-	smp_rendezvous(0, pmap_invalidate_page_action, 0, &arg);
-}
-
-static void
 pmap_invalidate_page_action(void *arg)
 {
 	struct pmap_invalidate_page_arg *p = arg;
 
-	pmap_invalidate_page_local(p->pmap, p->va);
+	tlb_invalidate_address(p->pmap, p->va);
 }
-#else
+
 static void
 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
 {
+	struct pmap_invalidate_page_arg arg;
 
-	pmap_invalidate_page_local(pmap, va);
-}
-#endif
-
-static __inline void
-pmap_update_page_local(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
-{
-	u_int cpuid;
-
-	cpuid = PCPU_GET(cpuid);
-
-	if (is_kernel_pmap(pmap)) {
-		tlb_update(pmap, va, pte);
-		return;
-	}
-	if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
-		return;
-	else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
-		pmap->pm_asid[cpuid].gen = 0;
-		return;
-	}
-	tlb_update(pmap, va, pte);
+	arg.pmap = pmap;
+	arg.va = va;
+	pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg);
 }
 
-#ifdef SMP
 struct pmap_update_page_arg {
 	pmap_t pmap;
 	vm_offset_t va;
@@ -742,31 +716,23 @@ struct pmap_update_page_arg {
 };
 
 static void
-pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
-{
-	struct pmap_update_page_arg arg;
-
-	arg.pmap = pmap;
-	arg.va = va;
-	arg.pte = pte;
-	smp_rendezvous(0, pmap_update_page_action, 0, &arg);
-}
-
-static void
 pmap_update_page_action(void *arg)
 {
 	struct pmap_update_page_arg *p = arg;
 
-	pmap_update_page_local(p->pmap, p->va, p->pte);
+	tlb_update(p->pmap, p->va, p->pte);
 }
-#else
+
 static void
 pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
 {
+	struct pmap_update_page_arg arg;
 
-	pmap_update_page_local(pmap, va, pte);
+	arg.pmap = pmap;
+	arg.va = va;
+	arg.pte = pte;
+	pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg);
 }
-#endif
 
 /*
  *	Routine:	pmap_extract
@@ -3213,7 +3179,7 @@ pmap_emulate_modified(pmap_t pmap, vm_of
 #ifdef SMP
 	/* It is possible that some other CPU changed m-bit */
 	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
-		pmap_update_page_local(pmap, va, *pte);
+		tlb_update(pmap, va, *pte);
 		PMAP_UNLOCK(pmap);
 		return (0);
 	}
@@ -3227,7 +3193,7 @@ pmap_emulate_modified(pmap_t pmap, vm_of
 		return (1);
 	}
 	pte_set(pte, PTE_D);
-	pmap_update_page_local(pmap, va, *pte);
+	tlb_update(pmap, va, *pte);
 	pa = TLBLO_PTE_TO_PA(*pte);
 	if (!page_is_managed(pa))
 		panic("pmap_emulate_modified: unmanaged page");



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201111171314.pAHDEx9n008822>