Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 14 Feb 2010 16:24:10 +0000 (UTC)
From:      Nathan Whitehorn <nwhitehorn@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r203881 - projects/ppc64/sys/powerpc/aim
Message-ID:  <201002141624.o1EGOA2u022357@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: nwhitehorn
Date: Sun Feb 14 16:24:10 2010
New Revision: 203881
URL: http://svn.freebsd.org/changeset/base/203881

Log:
  Revisit page table synchronization requirements while trying to track
  down a memory corruption bug during page out/in. This does not fix that
  bug, but does give both a performance and correctness improvement.

Modified:
  projects/ppc64/sys/powerpc/aim/mmu_oea64.c

Modified: projects/ppc64/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/mmu_oea64.c	Sun Feb 14 16:21:00 2010	(r203880)
+++ projects/ppc64/sys/powerpc/aim/mmu_oea64.c	Sun Feb 14 16:24:10 2010	(r203881)
@@ -168,6 +168,7 @@ cntlzd(volatile register_t a) {
 	return b;
 }
 
+#define	PTESYNC()	__asm __volatile("ptesync");
 #define	TLBSYNC()	__asm __volatile("tlbsync; ptesync");
 #define	SYNC()		__asm __volatile("sync");
 #define	EIEIO()		__asm __volatile("eieio");
@@ -196,6 +197,7 @@ TLBIE(pmap_t pmap, vm_offset_t va) {
 	vpn = (uint64_t)(va & ADDR_PIDX);
 	if (pmap != NULL)
 		vpn |= (va_to_vsid(pmap,va) << 28);
+	vpn &= ~(0xffffULL << 48);
 
 	mtx_lock_spin(&tlbie_mutex);
 #ifdef __powerpc64__
@@ -205,15 +207,13 @@ TLBIE(pmap_t pmap, vm_offset_t va) {
 	    eieio; \
 	    tlbsync; \
 	    ptesync;" 
-	:: "r"(vpn));
+	:: "r"(vpn) : "memory");
 #else
 	vpn_hi = (uint32_t)(vpn >> 32);
 	vpn_lo = (uint32_t)vpn;
 
 	__asm __volatile("\
 	    mfmsr %0; \
-	    clrldi %1,%0,49; \
-	    mtmsr %1; \
 	    insrdi %1,%5,1,0; \
 	    mtmsrd %1; \
 	    ptesync; \
@@ -226,7 +226,8 @@ TLBIE(pmap_t pmap, vm_offset_t va) {
 	    eieio; \
 	    tlbsync; \
 	    ptesync;" 
-	: "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1));
+	: "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
+	    : "memory");
 #endif
 	mtx_unlock_spin(&tlbie_mutex);
 }
@@ -237,13 +238,13 @@ TLBIE(pmap_t pmap, vm_offset_t va) {
 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
 
-#define	PVO_PTEGIDX_MASK	0x007		/* which PTEG slot */
-#define	PVO_PTEGIDX_VALID	0x008		/* slot is valid */
-#define	PVO_WIRED		0x010		/* PVO entry is wired */
-#define	PVO_MANAGED		0x020		/* PVO entry is managed */
-#define	PVO_BOOTSTRAP		0x080		/* PVO entry allocated during
+#define	PVO_PTEGIDX_MASK	0x007UL		/* which PTEG slot */
+#define	PVO_PTEGIDX_VALID	0x008UL		/* slot is valid */
+#define	PVO_WIRED		0x010UL		/* PVO entry is wired */
+#define	PVO_MANAGED		0x020UL		/* PVO entry is managed */
+#define	PVO_BOOTSTRAP		0x080UL		/* PVO entry allocated during
 						   bootstrap */
-#define PVO_FAKE		0x100		/* fictitious phys page */
+#define PVO_FAKE		0x100UL		/* fictitious phys page */
 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
 #define PVO_ISFAKE(pvo)		((pvo)->pvo_vaddr & PVO_FAKE)
 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
@@ -533,23 +534,6 @@ moea64_attr_save(vm_page_t m, u_int64_t 
 	m->md.mdpg_attrs |= ptebit;
 }
 
-static __inline int
-moea64_pte_compare(const struct lpte *pt, const struct lpte *pvo_pt)
-{
-	if (pt->pte_hi == pvo_pt->pte_hi)
-		return (1);
-
-	return (0);
-}
-
-static __inline int
-moea64_pte_match(struct lpte *pt, uint64_t vsid, vm_offset_t va, int which)
-{
-	return (pt->pte_hi & ~LPTE_VALID) ==
-	    ((vsid << LPTE_VSID_SHIFT) |
-	    ((uint64_t)(va >> ADDR_API_SHFT64) & LPTE_API) | which);
-}
-
 static __inline void
 moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 
     uint64_t pte_lo)
@@ -604,7 +588,7 @@ moea64_pte_set(struct lpte *pt, struct l
 	pt->pte_lo = pvo_pt->pte_lo;
 	EIEIO();
 	pt->pte_hi = pvo_pt->pte_hi;
-	SYNC();
+	PTESYNC();
 	moea64_pte_valid++;
 }
 
@@ -623,7 +607,6 @@ moea64_pte_unset(struct lpte *pt, struct
 	 * Invalidate the pte.
 	 */
 	pt->pte_hi &= ~LPTE_VALID;
-
 	TLBIE(pmap,va);
 
 	/*
@@ -642,6 +625,8 @@ moea64_pte_change(struct lpte *pt, struc
 	 */
 	moea64_pte_unset(pt, pvo_pt, pmap, va);
 	moea64_pte_set(pt, pvo_pt);
+	if (pmap == kernel_pmap)
+		isync();
 }
 
 static __inline uint64_t
@@ -744,7 +729,7 @@ moea64_bridge_cpu_bootstrap(mmu_t mmup, 
 	 * Install page table
 	 */
 
-	__asm __volatile ("sync; mtsdr1 %0; isync"
+	__asm __volatile ("ptesync; mtsdr1 %0; isync"
 	    :: "r"((uintptr_t)moea64_pteg_table 
 		     | (64 - cntlzd(moea64_pteg_mask >> 11))));
 	tlbia();
@@ -1243,7 +1228,7 @@ void moea64_set_scratchpage_pa(int which
 	EIEIO();
 
 	moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID;
-	TLBIE(kernel_pmap, moea64_scratchpage_va[which]);
+	PTESYNC(); isync();
 }
 
 void
@@ -1263,8 +1248,6 @@ moea64_copy_page(mmu_t mmu, vm_page_t ms
 	kcopy((void *)moea64_scratchpage_va[0], 
 	    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
 
-	__syncicache((void *)moea64_scratchpage_va[1],PAGE_SIZE);
-
 	mtx_unlock(&moea64_scratchpage_mtx);
 }
 
@@ -1282,8 +1265,6 @@ moea64_zero_page_area(mmu_t mmu, vm_page
 
 	moea64_set_scratchpage_pa(0,pa);
 	bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
-	__syncicache((void *)moea64_scratchpage_va[0],PAGE_SIZE);
-
 	mtx_unlock(&moea64_scratchpage_mtx);
 }
 
@@ -1374,9 +1355,6 @@ moea64_enter_locked(pmap_t pmap, vm_offs
 	error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
 	    pte_lo, pvo_flags);
 
-	if (pmap == kernel_pmap)
-		TLBIE(pmap, va);
-
 	/*
 	 * Flush the page from the instruction cache if this page is
 	 * mapped executable and cacheable.
@@ -1549,8 +1527,6 @@ moea64_uma_page_alloc(uma_zone_t zone, i
 	    &moea64_pvo_kunmanaged, va,  VM_PAGE_TO_PHYS(m), LPTE_M, 
 	    PVO_WIRED | PVO_BOOTSTRAP);
 
-	TLBIE(kernel_pmap, va);
-
 	if (needed_lock)
 		PMAP_UNLOCK(kernel_pmap);
 	
@@ -1639,7 +1615,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t
 				lo |= pvo->pvo_pte.lpte.pte_lo;
 				pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
 				moea64_pte_change(pt, &pvo->pvo_pte.lpte,
-				    pvo->pvo_pmap, pvo->pvo_vaddr);
+				    pvo->pvo_pmap, PVO_VADDR(pvo));
 			}
 			UNLOCK_TABLE();
 		}
@@ -1699,8 +1675,6 @@ moea64_kenter(mmu_t mmu, vm_offset_t va,
 	    &moea64_pvo_kunmanaged, va, pa, pte_lo, 
 	    PVO_WIRED | VM_PROT_EXECUTE);
 
-	TLBIE(kernel_pmap, va);
-
 	if (error != 0 && error != ENOENT)
 		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
 		    pa, error);
@@ -1959,7 +1933,7 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_
 		 */
 		if (pt != NULL) {
 			moea64_pte_change(pt, &pvo->pvo_pte.lpte, 
-			    pvo->pvo_pmap, pvo->pvo_vaddr);
+			    pvo->pvo_pmap, PVO_VADDR(pvo));
 			if ((pvo->pvo_pte.lpte.pte_lo & 
 			    (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
 				moea64_syncicache(pm, sva,
@@ -2197,7 +2171,6 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z
 	 */
 	va &= ~ADDR_POFF;
 	vsid = va_to_vsid(pm, va);
-
 	ptegidx = va_to_pteg(vsid, va);
 
 	/*
@@ -2276,7 +2249,7 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z
 		first = 1;
 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
 
-	if (pvo->pvo_pte.lpte.pte_lo & PVO_WIRED)
+	if (pvo->pvo_vaddr & PVO_WIRED)
 		pm->pm_stats.wired_count++;
 	pm->pm_stats.resident_count++;
 
@@ -2291,6 +2264,9 @@ moea64_pvo_enter(pmap_t pm, uma_zone_t z
 		moea64_pte_overflow++;
 	}
 
+	if (pm == kernel_pmap)
+		isync();
+
 	UNLOCK_TABLE();
 
 	return (first ? ENOENT : 0);
@@ -2309,7 +2285,7 @@ moea64_pvo_remove(struct pvo_entry *pvo,
 	pt = moea64_pvo_to_pte(pvo, pteidx);
 	if (pt != NULL) {
 		moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_pmap,
-		    pvo->pvo_vaddr);
+		    PVO_VADDR(pvo));
 		PVO_PTEGIDX_CLR(pvo);
 	} else {
 		moea64_pte_overflow--;
@@ -2320,7 +2296,7 @@ moea64_pvo_remove(struct pvo_entry *pvo,
 	 * Update our statistics.
 	 */
 	pvo->pvo_pmap->pm_stats.resident_count--;
-	if (pvo->pvo_pte.lpte.pte_lo & PVO_WIRED)
+	if (pvo->pvo_vaddr & PVO_WIRED)
 		pvo->pvo_pmap->pm_stats.wired_count--;
 
 	/*
@@ -2347,7 +2323,7 @@ moea64_pvo_remove(struct pvo_entry *pvo,
 	 */
 	LIST_REMOVE(pvo, pvo_olink);
 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
-		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea64_mpvo_zone :
+		uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
 		    moea64_upvo_zone, pvo);
 	moea64_pvo_entries--;
 	moea64_pvo_remove_calls++;
@@ -2406,8 +2382,8 @@ moea64_pvo_to_pte(const struct pvo_entry
 		int		ptegidx;
 		uint64_t	vsid;
 
-		vsid = va_to_vsid(pvo->pvo_pmap, pvo->pvo_vaddr);
-		ptegidx = va_to_pteg(vsid, pvo->pvo_vaddr);
+		vsid = va_to_vsid(pvo->pvo_pmap, PVO_VADDR(pvo));
+		ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo));
 		pteidx = moea64_pvo_pte_index(pvo, ptegidx);
 	}
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201002141624.o1EGOA2u022357>