Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 15 Mar 2010 00:29:15 +0000 (UTC)
From:      Nathan Whitehorn <nwhitehorn@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r205164 - projects/ppc64/sys/powerpc/aim
Message-ID:  <201003150029.o2F0TFic099287@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: nwhitehorn
Date: Mon Mar 15 00:29:15 2010
New Revision: 205164
URL: http://svn.freebsd.org/changeset/base/205164

Log:
  Set memory access as guarded on large page direct maps that edge into
  physical memory holes, and optimize moea64_kextract(), thereby avoiding
  a LOR at boot.

Modified:
  projects/ppc64/sys/powerpc/aim/mmu_oea64.c

Modified: projects/ppc64/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/mmu_oea64.c	Mon Mar 15 00:27:40 2010	(r205163)
+++ projects/ppc64/sys/powerpc/aim/mmu_oea64.c	Mon Mar 15 00:29:15 2010	(r205164)
@@ -818,6 +818,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_o
 	register_t msr;
 	vm_paddr_t pa;
 	vm_offset_t size, off;
+	uint64_t pte_lo;
 	int i;
 
 	if (moea64_large_page_size == 0) 
@@ -827,9 +828,26 @@ moea64_setup_direct_map(mmu_t mmup, vm_o
 	if (hw_direct_map) {
 		PMAP_LOCK(kernel_pmap);
 		for (i = 0; i < pregions_sz; i++) {
-		  for (pa = pregions[i].mr_start & ~moea64_large_page_mask; 
-			pa < (pregions[i].mr_start + pregions[i].mr_size);
-			pa += moea64_large_page_size) {
+		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
+		     pregions[i].mr_size; pa += moea64_large_page_size) {
+			pte_lo = LPTE_M;
+
+			/*
+			 * Set memory access as guarded if prefetch within
+			 * the page could exit the available physmem area.
+			 */
+			if (pa & moea64_large_page_mask) {
+				pa &= moea64_large_page_mask;
+				pte_lo |= LPTE_G;
+			}
+			if (pa + moea64_large_page_size >
+			    pregions[i].mr_start + pregions[i].mr_size)
+				pte_lo |= LPTE_G;
+
+			/*
+			 * Allocate a new SLB entry to make sure it is
+			 * for large pages.
+			 */
 			if (va_to_slb_entry(kernel_pmap, pa) == NULL)
 			  allocate_vsid(kernel_pmap, pa, 1 /* large */);
 	
@@ -1796,6 +1814,13 @@ moea64_kextract(mmu_t mmu, vm_offset_t v
 	struct		pvo_entry *pvo;
 	vm_paddr_t pa;
 
+	/*
+	 * Shortcut the direct-mapped case when applicable.  We never put
+	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
+	 */
+	if (va < VM_MIN_KERNEL_ADDRESS)
+		return (va);
+
 	PMAP_LOCK(kernel_pmap);
 	pvo = moea64_pvo_find_va(kernel_pmap, va, NULL);
 	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#lx", va));
@@ -2778,7 +2803,7 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm,
 		len = MIN(lim - va, sz);
 		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
 		if (pvo != NULL) {
-			pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
+			pa = (pvo->pvo_pte.pte.pte_lo & LPTE_RPGN) |
 			    (va & ADDR_POFF);
 			moea64_syncicache(pm, va, pa, len);
 		}



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201003150029.o2F0TFic099287>