Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 1 Sep 2008 05:40:22 GMT
From:      Nathan Whitehorn <nwhitehorn@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 148968 for review
Message-ID:  <200809010540.m815eM2V086189@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=148968

Change 148968 by nwhitehorn@nwhitehorn_trantor on 2008/09/01 05:39:38

	Fix WITNESS builds for good. This sacrifices some of our KVA space, and there are still some LORs, but my machine will now boot multiuser with WITNESS and INVARIANTS enabled. This is also my first G5 kernel build built on the G5...

Affected files ...

.. //depot/projects/ppc-g5/sys/powerpc/aim/mmu_oea64.c#4 edit

Differences ...

==== //depot/projects/ppc-g5/sys/powerpc/aim/mmu_oea64.c#4 (text+ko) ====

@@ -193,20 +193,19 @@
 
 	SYNC();
 
-#if 0
+#if 1
 	/*
 	 * CPU documentation says that tlbie takes the VPN, not the
-	 * VA. However, my attempts to compute the VPN don't work, and
-	 * all implementations I have seen pass tlbie the VA, so disable
-	 * this for now. 
+	 * VA. I think the code below does this correctly. We will see.
 	 */
 
-	vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT);
+	vpn = (uint64_t)(va & ADDR_PIDX);
 	if (pmap != NULL)
 		vpn |= ((uint64_t)(va_to_sr(pmap->pm_sr,va) & SR_VSID_MASK) 
-		    << 16);
+		    << 28);
+#else
+	vpn = va;
 #endif
-	vpn = va;
 
 	vpn_hi = (uint32_t)(vpn >> 32);
 	vpn_lo = (uint32_t)vpn;
@@ -305,6 +304,9 @@
 uma_zone_t	moea64_upvo_zone;	/* zone for pvo entries for unmanaged pages */
 uma_zone_t	moea64_mpvo_zone;	/* zone for pvo entries for managed pages */
 
+vm_offset_t	pvo_allocator_start;
+vm_offset_t	pvo_allocator_end;
+
 #define	BPVO_POOL_SIZE	327680
 static struct	pvo_entry *moea64_bpvo_pool;
 static int	moea64_bpvo_pool_index = 0;
@@ -352,7 +354,7 @@
  * PVO calls.
  */
 static int	moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
-		    vm_offset_t, vm_offset_t, uint64_t, int);
+		    vm_offset_t, vm_offset_t, uint64_t, int, int);
 static void	moea64_pvo_remove(struct pvo_entry *, int);
 static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *);
 static struct	lpte *moea64_pvo_to_pte(const struct pvo_entry *, int);
@@ -971,6 +973,14 @@
 	virtual_end = VM_MAX_KERNEL_ADDRESS;
 
 	/*
+	 * Allocate some stupid buffer regions.
+	 */
+
+	pvo_allocator_start = virtual_avail;
+	virtual_avail += SEGMENT_LENGTH/4;
+	pvo_allocator_end = virtual_avail;
+
+	/*
 	 * Allocate some things for page zeroing
 	 */
 
@@ -1123,10 +1133,9 @@
 	
 	moea64_scratchpage_pte[which]->pte_lo = 
 	    moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo;
+
 	moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID;
-	
 	EIEIO();
-
 	TLBIE(kernel_pmap, moea64_scratchpage_va[which]);
 }
 
@@ -1253,7 +1262,7 @@
 		pvo_flags |= PVO_FAKE;
 
 	error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
-	    pte_lo, pvo_flags);
+	    pte_lo, pvo_flags, 0);
 
 	if (pmap == kernel_pmap)
 		TLBIE(pmap, va);
@@ -1383,6 +1392,66 @@
 	return (m);
 }
 
+static void *
+moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 
+{
+	/*
+	 * This entire routine is a horrible hack to avoid bothering kmem
+	 * for new KVA addresses. Because this can get called from inside
+	 * kmem allocation routines, calling kmem for a new address here
+	 * can lead to multiply locking non-recursive mutexes.
+	 */
+	static vm_pindex_t color;
+        vm_offset_t va;
+
+        vm_page_t m;
+        int pflags, needed_lock;
+
+	*flags = UMA_SLAB_PRIV;
+	needed_lock = !PMAP_LOCKED(kernel_pmap);
+
+	if (needed_lock)
+		PMAP_LOCK(kernel_pmap);
+
+        if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
+                pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
+        else
+                pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
+        if (wait & M_ZERO)
+                pflags |= VM_ALLOC_ZERO;
+
+        for (;;) {
+                m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
+                if (m == NULL) {
+                        if (wait & M_NOWAIT)
+                                return (NULL);
+                        VM_WAIT;
+                } else
+                        break;
+        }
+
+	va = pvo_allocator_start;
+	pvo_allocator_start += PAGE_SIZE;
+
+	if (pvo_allocator_start >= pvo_allocator_end)
+		panic("Ran out of PVO allocator buffer space!");
+
+	/* Now call pvo_enter in recursive mode */
+	moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
+	    &moea64_pvo_kunmanaged, va,  VM_PAGE_TO_PHYS(m), LPTE_M, 
+	    PVO_WIRED | PVO_BOOTSTRAP, 1);
+
+	TLBIE(kernel_pmap, va);
+	
+	if (needed_lock)
+		PMAP_UNLOCK(kernel_pmap);
+
+	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
+                bzero((void *)va, PAGE_SIZE);
+
+	return (void *)va;
+}
+
 void
 moea64_init(mmu_t mmu)
 {
@@ -1395,6 +1464,12 @@
 	moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
+
+	if (!hw_direct_map) {
+		uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
+		uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
+	}
+
 	moea64_initialized = TRUE;
 }
 
@@ -1518,7 +1593,7 @@
 	PMAP_LOCK(kernel_pmap);
 	error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
 	    &moea64_pvo_kunmanaged, va, pa, pte_lo, 
-	    PVO_WIRED | VM_PROT_EXECUTE);
+	    PVO_WIRED | VM_PROT_EXECUTE, 0);
 
 	TLBIE(kernel_pmap, va);
 
@@ -1641,7 +1716,6 @@
 	int	i, mask;
 	u_int	entropy;
 
-	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea64_pinit: virt pmap"));
 	PMAP_LOCK_INIT(pmap);
 
 	entropy = 0;
@@ -1914,7 +1988,7 @@
 
 static int
 moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
-    vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags)
+    vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags, int recurse)
 {
 	struct	pvo_entry *pvo;
 	u_int	sr;
@@ -1934,7 +2008,10 @@
 
 	moea64_pvo_enter_calls++;
 	first = 0;
-	bootstrap = 0;
+	bootstrap = (flags & PVO_BOOTSTRAP);
+
+	if (!moea64_initialized)
+		bootstrap = 1;
 
 	/*
 	 * Compute the PTE Group index.
@@ -1947,14 +2024,16 @@
 	 * Remove any existing mapping for this page.  Reuse the pvo entry if
 	 * there is a mapping.
 	 */
-	LOCK_TABLE();
+	if (!recurse)
+		LOCK_TABLE();
 
 	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
 			if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
 			    (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) ==
 			    (pte_lo & LPTE_PP)) {
-				UNLOCK_TABLE();
+				if (!recurse)
+					UNLOCK_TABLE();
 				return (0);
 			}
 			moea64_pvo_remove(pvo, -1);
@@ -1965,9 +2044,7 @@
 	/*
 	 * If we aren't overwriting a mapping, try to allocate.
 	 */
-	if (moea64_initialized && !mtx_recursed(&moea64_table_mutex)) {
-		pvo = uma_zalloc(zone, M_NOWAIT);
-	} else {
+	if (bootstrap) {
 		if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
 			panic("moea64_enter: bpvo pool exhausted, %d, %d, %d",
 			      moea64_bpvo_pool_index, BPVO_POOL_SIZE, 
@@ -1976,10 +2053,13 @@
 		pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
 		moea64_bpvo_pool_index++;
 		bootstrap = 1;
+	} else {
+		pvo = uma_zalloc(zone, M_NOWAIT);
 	}
 
 	if (pvo == NULL) {
-		UNLOCK_TABLE();
+		if (!recurse)
+			UNLOCK_TABLE();
 		return (ENOMEM);
 	}
 
@@ -2024,7 +2104,9 @@
 		panic("moea64_pvo_enter: overflow");
 		moea64_pte_overflow++;
 	}
-	UNLOCK_TABLE();
+
+	if (!recurse)
+		UNLOCK_TABLE();
 
 	return (first ? ENOENT : 0);
 }



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200809010540.m815eM2V086189>