Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 2 Oct 2012 07:14:22 +0000 (UTC)
From:      Alan Cox <alc@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r241123 - in head/sys/mips: include mips
Message-ID:  <201210020714.q927EMUX090257@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: alc
Date: Tue Oct  2 07:14:22 2012
New Revision: 241123
URL: http://svn.freebsd.org/changeset/base/241123

Log:
  Introduce a new TLB invalidation function for efficiently invalidating
  address ranges, and use this function in pmap_remove().
  
  Tested by:	jchandra

Modified:
  head/sys/mips/include/tlb.h
  head/sys/mips/mips/pmap.c
  head/sys/mips/mips/tlb.c

Modified: head/sys/mips/include/tlb.h
==============================================================================
--- head/sys/mips/include/tlb.h	Tue Oct  2 06:37:46 2012	(r241122)
+++ head/sys/mips/include/tlb.h	Tue Oct  2 07:14:22 2012	(r241123)
@@ -53,6 +53,7 @@ void tlb_insert_wired(unsigned, vm_offse
 void tlb_invalidate_address(struct pmap *, vm_offset_t);
 void tlb_invalidate_all(void);
 void tlb_invalidate_all_user(struct pmap *);
+void tlb_invalidate_range(struct pmap *, vm_offset_t, vm_offset_t);
 void tlb_save(void);
 void tlb_update(struct pmap *, vm_offset_t, pt_entry_t);
 

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c	Tue Oct  2 06:37:46 2012	(r241122)
+++ head/sys/mips/mips/pmap.c	Tue Oct  2 07:14:22 2012	(r241123)
@@ -190,10 +190,9 @@ static vm_page_t _pmap_allocpte(pmap_t p
 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
 static pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot);
 
-#ifdef SMP
 static void pmap_invalidate_page_action(void *arg);
+static void pmap_invalidate_range_action(void *arg);
 static void pmap_update_page_action(void *arg);
-#endif
 
 #ifndef __mips_n64
 /*
@@ -711,6 +710,31 @@ pmap_invalidate_page(pmap_t pmap, vm_off
 	pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg);
 }
 
+struct pmap_invalidate_range_arg {
+	pmap_t pmap;
+	vm_offset_t sva;
+	vm_offset_t eva;
+};
+
+static void
+pmap_invalidate_range_action(void *arg)
+{
+	struct pmap_invalidate_range_arg *p = arg;
+
+	tlb_invalidate_range(p->pmap, p->sva, p->eva);
+}
+
+static void
+pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+	struct pmap_invalidate_range_arg arg;
+
+	arg.pmap = pmap;
+	arg.sva = sva;
+	arg.eva = eva;
+	pmap_call_on_active_cpus(pmap, pmap_invalidate_range_action, &arg);
+}
+
 struct pmap_update_page_arg {
 	pmap_t pmap;
 	vm_offset_t va;
@@ -1737,12 +1761,15 @@ pmap_remove_page(struct pmap *pmap, vm_o
  *	rounded to the page size.
  */
 void
-pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 {
-	vm_offset_t va_next;
 	pd_entry_t *pde, *pdpe;
 	pt_entry_t *pte;
+	vm_offset_t va, va_next;
 
+	/*
+	 * Perform an unsynchronized read.  This is, however, safe.
+	 */
 	if (pmap->pm_stats.resident_count == 0)
 		return;
 
@@ -1772,17 +1799,36 @@ pmap_remove(struct pmap *pmap, vm_offset
 			va_next = eva;
 
 		pde = pmap_pdpe_to_pde(pdpe, sva);
-		if (*pde == 0)
+		if (*pde == NULL)
 			continue;
+
+		/*
+		 * Limit our scan to either the end of the va represented
+		 * by the current page table page, or to the end of the
+		 * range being removed.
+		 */
 		if (va_next > eva)
 			va_next = eva;
+
+		va = va_next;
 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
 		    sva += PAGE_SIZE) {
-			if (!pte_test(pte, PTE_V))
+			if (!pte_test(pte, PTE_V)) {
+				if (va != va_next) {
+					pmap_invalidate_range(pmap, va, sva);
+					va = va_next;
+				}
 				continue;
-			pmap_remove_pte(pmap, pte, sva, *pde);
-			pmap_invalidate_page(pmap, sva);
+			}
+			if (va == va_next)
+				va = sva;
+			if (pmap_remove_pte(pmap, pte, sva, *pde)) {
+				sva += PAGE_SIZE;
+				break;
+			}
 		}
+		if (va != va_next)
+			pmap_invalidate_range(pmap, va, sva);
 	}
 out:
 	rw_wunlock(&pvh_global_lock);

Modified: head/sys/mips/mips/tlb.c
==============================================================================
--- head/sys/mips/mips/tlb.c	Tue Oct  2 06:37:46 2012	(r241122)
+++ head/sys/mips/mips/tlb.c	Tue Oct  2 07:14:22 2012	(r241123)
@@ -35,7 +35,7 @@
 #include <sys/smp.h>
 
 #include <vm/vm.h>
-#include <vm/vm_page.h>
+#include <vm/pmap.h>
 
 #include <machine/pte.h>
 #include <machine/tlb.h>
@@ -187,6 +187,79 @@ tlb_invalidate_all_user(struct pmap *pma
 	intr_restore(s);
 }
 
+/*
+ * Invalidates any TLB entries that map a virtual page from the specified
+ * address range.  If "end" is zero, then every virtual page is considered to
+ * be within the address range's upper bound.
+ */
+void
+tlb_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
+{
+	register_t asid, end_hi, hi, hi_pagemask, s, save_asid, start_hi;
+	int i;
+
+	KASSERT(start < end || (end == 0 && start > 0),
+	    ("tlb_invalidate_range: invalid range"));
+
+	/*
+	 * Truncate the virtual address "start" to an even page frame number,
+	 * and round the virtual address "end" to an even page frame number.
+	 */
+	start &= ~((1 << TLBMASK_SHIFT) - 1);
+	end = (end + (1 << TLBMASK_SHIFT) - 1) & ~((1 << TLBMASK_SHIFT) - 1);
+
+	s = intr_disable();
+	save_asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
+
+	asid = pmap_asid(pmap);
+	start_hi = TLBHI_ENTRY(start, asid);
+	end_hi = TLBHI_ENTRY(end, asid);
+
+	/*
+	 * Select the fastest method for invalidating the TLB entries.
+	 */
+	if (end - start < num_tlbentries << TLBMASK_SHIFT || (end == 0 &&
+	    start >= -(num_tlbentries << TLBMASK_SHIFT))) {
+		/*
+		 * The virtual address range is small compared to the size of
+		 * the TLB.  Probe the TLB for each even numbered page frame
+		 * within the virtual address range.
+		 */
+		for (hi = start_hi; hi != end_hi; hi += 1 << TLBMASK_SHIFT) {
+			mips_wr_pagemask(0);
+			mips_wr_entryhi(hi);
+			tlb_probe();
+			i = mips_rd_index();
+			if (i >= 0)
+				tlb_invalidate_one(i);
+		}
+	} else {
+		/*
+		 * The virtual address range is large compared to the size of
+		 * the TLB.  Test every non-wired TLB entry.
+		 */
+		for (i = mips_rd_wired(); i < num_tlbentries; i++) {
+			mips_wr_index(i);
+			tlb_read();
+			hi = mips_rd_entryhi();
+			if ((hi & TLBHI_ASID_MASK) == asid && (hi < end_hi ||
+			    end == 0)) {
+				/*
+				 * If "hi" is a large page that spans
+				 * "start_hi", then it must be invalidated.
+				 */
+				hi_pagemask = mips_rd_pagemask();
+				if (hi >= (start_hi & ~(hi_pagemask <<
+				    TLBMASK_SHIFT)))
+					tlb_invalidate_one(i);
+			}
+		}
+	}
+
+	mips_wr_entryhi(save_asid);
+	intr_restore(s);
+}
+
 /* XXX Only if DDB?  */
 void
 tlb_save(void)



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201210020714.q927EMUX090257>