Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 6 Feb 2006 14:39:37 GMT
From:      Olivier Houchard <cognet@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 91237 for review
Message-ID:  <200602061439.k16EdbrQ054683@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=91237

Change 91237 by cognet@cognet on 2006/02/06 14:38:54

	Enable promotion of kmem_object pages. For now, map all the PTE
	the same way other pages are, and write-back it everytime it's
	needed. Keeping PTE mapped write-through would require much more
	work.

Affected files ...

.. //depot/projects/superpages/src/sys/arm/arm/pmap.c#6 edit
.. //depot/projects/superpages/src/sys/arm/include/pmap.h#6 edit

Differences ...

==== //depot/projects/superpages/src/sys/arm/arm/pmap.c#6 (text+ko) ====

@@ -1,4 +1,4 @@
-//* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */
+/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */
 /*-
  * Copyright 2004 Olivier Houchard.
  * Copyright 2003 Wasabi Systems, Inc.
@@ -493,7 +493,7 @@
 	 * we have a write-back cache, then we assume setting
 	 * only C will make those pages write-through.
 	 */
-	if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
+	if (1 || cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
 		pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
 		pte_l2_l_cache_mode_pt = L2_B|L2_C;
 		pte_l2_s_cache_mode_pt = L2_B|L2_C;
@@ -569,9 +569,11 @@
 	pte_l2_l_cache_mode = L2_B | L2_C;
 	pte_l2_s_cache_mode = L2_B | L2_C;
 
+#if 0
 	pte_l1_s_cache_mode_pt = L1_S_C;
 	pte_l2_l_cache_mode_pt = L2_C;
 	pte_l2_s_cache_mode_pt = L2_C;
+#endif
 
 }
 #endif /* CPU_ARM10 */
@@ -617,9 +619,11 @@
 	pte_l2_s_cache_mode = L2_B|L2_C;
 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
 
+#if 0
 	pte_l1_s_cache_mode_pt = L1_S_C;
 	pte_l2_l_cache_mode_pt = L2_C;
 	pte_l2_s_cache_mode_pt = L2_C;
+#endif
 #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
 	/*
 	 * The XScale core has an enhanced mode where writes that
@@ -678,6 +682,9 @@
 	xscale_use_minidata = 1;
 #endif
 
+	pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode;
+	pte_l2_l_cache_mode_pt = pte_l2_s_cache_mode;
+	pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode;
 	pte_l2_s_prot_u = L2_S_PROT_U_xscale;
 	pte_l2_s_prot_w = L2_S_PROT_W_xscale;
 	pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
@@ -1558,10 +1565,11 @@
 			/* There's no way we can do it. */
 			return;
 		}
-		if ((ptep[i] & pte_l2_s_cache_mask) == 
+		if (((ptep[i] & pte_l2_s_cache_mask) == 
 		    pte_l2_s_cache_mode_pt || 
 		    (ptep[i - 1] & pte_l2_s_cache_mask) ==
-		    pte_l2_s_cache_mode_pt)
+		    pte_l2_s_cache_mode_pt) && pte_l2_s_cache_mode_pt
+		    != pte_l2_s_cache_mode)
 			panic("fuck");
 	}
 #ifdef SP_DEBUG
@@ -1572,10 +1580,11 @@
 	if (*ptep & L2_S_PROT_W)
 		pa0 |= L2_L_PROT_W;
 	/* Let's do it. */
-	for (i = 0; i < 0x10; i++)
+	for (i = 0; i < 0x10; i++) {
+		pmap_tlb_flushID_SE(pmap, va + i * PAGE_SIZE);
 		ptep[i] = pa0 | L2_L_PROTO |
 		    pte_l2_l_cache_mode;
-	pmap_tlb_flushID(pmap);
+	}
 }
 
 static void
@@ -1614,8 +1623,10 @@
 			pa = (pt[i] & L2_L_FRAME) + (i & 0xf) * PAGE_SIZE;
 		else
 			pa = pt[i] & L2_S_FRAME;
-		if ((pt[i - 1] & pte_l2_s_cache_mask) == pte_l2_s_cache_mode_pt
-		    || (pt[i] & pte_l2_s_cache_mask) == pte_l2_s_cache_mode_pt)
+		if (((pt[i - 1] & pte_l2_s_cache_mask) == pte_l2_s_cache_mode_pt
+		    ||
+		    (pt[i] & pte_l2_s_cache_mask) == pte_l2_s_cache_mode_pt) &&
+		    pte_l2_s_cache_mode_pt != pte_l2_s_cache_mode)
 			panic("fuck2");
 		if ((pt[i - 1] & L2_TYPE_MASK) == L2_TYPE_L)
 			pa2 = (pt[i - 1] & L2_L_FRAME) +
@@ -1639,9 +1650,6 @@
 	bzero(pt, 0x100 * sizeof(*pt));
 #endif
 	pmap_free_l2_bucket(pmap, &l2->l2_bucket[L2_BUCKET(l1idx)], 0x100);
-	if (pmap != pmap_kernel() && l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva != NULL)
-		panic("missed");
-	pmap_tlb_flushID(pmap);
 }
 
 static void
@@ -3306,7 +3314,6 @@
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
 	    pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE);
 }
-
 /*
  * Add a list of wired pages to the kva
  * this routine is only used for temporary
@@ -3349,7 +3356,6 @@
 	}
 }
 
-
 /*
  * pmap_object_init_pt preloads the ptes for a given object
  * into the specified pmap.  This eliminates the blast of soft
@@ -3676,7 +3682,6 @@
 	mtx_unlock(&Giant);
 }
 
-
 /*
  *	Insert the given physical page (p) at
  *	the specified virtual address (v) in the
@@ -3949,7 +3954,7 @@
 		pmap_vac_me_harder(m, pmap, va);
 		if ((va < VM_MAXUSER_ADDRESS ||
 		    m->object == kernel_object || 
-		    (m->object == kmem_object && FALSE)) && m->reserv) {
+		    (m->object == kmem_object)) && m->reserv) {
 			if (m->reserv->refcnt > 0 &&
 			    m->reserv->refcnt != SP_LARGE &&
 			    (m->reserv->refcnt % SMALL) == 0)
@@ -4430,11 +4435,11 @@
 		pmap_free_l2_bucket(pm, l2b, mappings);
 	}
 
+	vm_page_unlock_queues();
 	if (flushall) {
 		pmap_idcache_wbinv_all(pm);
 		pmap_tlb_flushID(pm);
 	}
-	vm_page_unlock_queues();
 #if 0
 	pmap_release_pmap_lock(pm);
 	PMAP_MAP_TO_HEAD_UNLOCK();

==== //depot/projects/superpages/src/sys/arm/include/pmap.h#6 (text+ko) ====

@@ -254,7 +254,8 @@
 #define	L1_S_PROT_MASK		(L1_S_PROT_U|L1_S_PROT_W)
 
 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
-#define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
+#define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)| \
+    L1_S_XSCALE_P)
 
 #define	L2_L_PROT_U		(L2_AP(AP_U))
 #define	L2_L_PROT_W		(L2_AP(AP_W))
@@ -335,7 +336,7 @@
 #define	PMAP_NEEDS_PTE_SYNC	1
 #define	PMAP_INCLUDE_PTE_SYNC
 #elif (ARM_MMU_SA1 == 0)
-#if defined(CPU_ARM9) && !defined(ARM9_CACHE_WRITE_THROUGH)
+#if 1
 #define PMAP_NEEDS_PTE_SYNC	1
 #define PMAP_INCLUDE_PTE_SYNC
 #else
@@ -527,6 +528,7 @@
 
 #endif
 extern vm_paddr_t dump_avail[];
+
 #endif	/* _KERNEL */
 
 #endif	/* !LOCORE */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200602061439.k16EdbrQ054683>