Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 3 May 2006 05:00:02 GMT
From:      Kip Macy <kmacy@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 96586 for review
Message-ID:  <200605030500.k435020C082584@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=96586

Change 96586 by kmacy@kmacy_storage:sun4v_rwbuf on 2006/05/03 04:59:12

	simplify tte_hash interface 
	- hide locking from outside callers
	- add tte_hash_update to get an atomic view of the tte before a change
	- change tte_hash_delete to return the value of the tte being deleted
	  to avoid redundant lookup 

Affected files ...

.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte_hash.h#11 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#40 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte.c#7 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#16 edit

Differences ...

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte_hash.h#11 (text+ko) ====

@@ -14,22 +14,23 @@
 
 tte_hash_t tte_hash_create(uint64_t context, uint64_t *scratchval);
 
-void tte_hash_bucket_unlock(tte_hash_t hash, vm_offset_t va);
-
 void tte_hash_destroy(tte_hash_t th);
 
-int tte_hash_delete(tte_hash_t hash, vm_offset_t va, int locked);
+tte_t tte_hash_delete(tte_hash_t hash, vm_offset_t va);
 
 void tte_hash_delete_all(tte_hash_t hash);
 
 void tte_hash_insert(tte_hash_t hash, vm_offset_t va, tte_t data);
 
-tte_t *tte_hash_lookup(tte_hash_t hash, vm_offset_t va, int leave_locked);
+tte_t tte_hash_lookup(tte_hash_t hash, vm_offset_t va);
+
+tte_t tte_hash_lookup_nolock(tte_hash_t hash, vm_offset_t va);
 
 uint64_t tte_hash_set_scratchpad_kernel(tte_hash_t th);
 
 uint64_t tte_hash_set_scratchpad_user(tte_hash_t th, uint64_t context);
 
+tte_t tte_hash_update(tte_hash_t hash, vm_offset_t va, tte_t tte_data);
 
 
 #endif /* _MACHINE_TTE_HASH_H_ */

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#40 (text+ko) ====

@@ -126,7 +126,7 @@
 static uma_zone_t pvzone;
 static struct vm_object pvzone_obj;
 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
-int pmap_debug = 0;
+int pmap_debug = 1;
 
 static struct mtx pmap_ctx_lock;
 static uint16_t ctx_stack[PMAP_CONTEXT_MAX];
@@ -196,7 +196,7 @@
 
 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
-static void pmap_remove_tte(pmap_t pmap, uint64_t *tte, vm_offset_t va);
+static void pmap_remove_tte(pmap_t pmap, tte_t tte_data, vm_offset_t va);
 
 /*
  * Quick sort callout for comparing memory regions.
@@ -277,7 +277,7 @@
 	static const struct timeval printinterval = { 60, 0 };
 	static struct timeval lastprint;
 	struct vpgqueues *vpq;
-	uint64_t *tte, tte_data;
+	uint64_t tte_data;
 	pmap_t pmap;
 	pv_entry_t allocated_pv, next_pv, pv;
 	vm_offset_t va;
@@ -319,9 +319,7 @@
 				continue;
 			pmap->pm_stats.resident_count--;
 
-			tte = tte_hash_lookup(pmap->pm_hash, va, TRUE);
-			tte_data = *tte;
-			tte_hash_delete(pmap->pm_hash, va, TRUE);
+			tte_data = tte_hash_delete(pmap->pm_hash, va);
 
 			KASSERT((tte_data & VTD_WIRED) == 0,
 			    ("get_pv_entry: wired pte %#jx", (uintmax_t)tte_data));
@@ -815,12 +813,8 @@
 {
 	vm_offset_t addr, end_addr;
 
-
 	end_addr = src_addr + len;
 
-	if (dst_addr != src_addr)
-		return;
-
 	/*
 	 * Don't let optional prefaulting of pages make us go
 	 * way below the low water mark of free pages or way
@@ -840,13 +834,13 @@
 	}
 	sched_pin();
 	for (addr = src_addr; addr < end_addr; addr += PAGE_SIZE) {
-		tte_t *src_tte, *dst_tte, tte_data;
+		tte_t tte_data;
 		vm_page_t m;
 
-		src_tte = tte_hash_lookup(src_pmap->pm_hash, addr, FALSE);
-		tte_data = src_tte ? *src_tte : 0;
+		tte_data = tte_hash_lookup_nolock(src_pmap->pm_hash, addr);
+
 		if ((tte_data & VTD_MANAGED) != 0) {
-			if ((dst_tte = tte_hash_lookup(dst_pmap->pm_hash, addr, FALSE)) == NULL) {
+			if (tte_hash_lookup_nolock(dst_pmap->pm_hash, addr) == 0) {
 				m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
 				tte_hash_insert(dst_pmap->pm_hash, addr, tte_data & ~(VTD_W|VTD_REF));
 				dst_pmap->pm_stats.resident_count++;
@@ -885,7 +879,7 @@
 	   boolean_t wired)
 {
 	vm_paddr_t pa, opa;
-	uint64_t tte_data, otte_data, *otte;
+	uint64_t tte_data, otte_data;
 	vm_page_t om;
 	int invlva;
 
@@ -900,8 +894,7 @@
 	sched_pin();
 
 	tte_data = pa = VM_PAGE_TO_PHYS(m);
-	otte = tte_hash_lookup(pmap->pm_hash, va, TRUE);
-	otte_data = otte ? *otte : 0;
+	otte_data = tte_hash_lookup(pmap->pm_hash, va);
 	opa = TTE_GET_PA(otte_data);
 	/*
 	 * Mapping has not changed, must be protection or wiring change.
@@ -972,9 +965,10 @@
 	else
 		tte_data |= TTE_MINFLAGS;
 
-	if ((otte_data & ~(VTD_W|VTD_REF|VTD_LOCK)) != tte_data) {
+	otte_data = tte_hash_update(pmap->pm_hash, va, tte_data);
+	invlva = FALSE;
+	if ((otte_data & ~(VTD_W|VTD_REF)) != tte_data) {
 		if (otte_data & VTD_V) {
-			invlva = FALSE;
 			if (otte_data & VTD_REF) {
 				if (otte_data & VTD_MANAGED)
 					vm_page_flag_set(om, PG_REFERENCED);
@@ -985,28 +979,16 @@
 				if ((otte_data & VTD_MANAGED) &&
 				    pmap_track_modified(pmap, va))
 					vm_page_dirty(om);
-				if ((prot & VM_PROT_WRITE) == 0)
+#if 0
+				if ((prot & VM_PROT_WRITE) == 0) /* XXX double check */
+#endif
 					invlva = TRUE;
 			}
-			DPRINTF("update *otte 0x%lx -> 0x%lx\n", otte_data, tte_data);
-			*otte = tte_data;
+		}
+	} 
 
-			if (!(otte_data & VTD_LOCK))
-				tte_hash_bucket_unlock(pmap->pm_hash, va);
-#ifdef notyet
-			if (invlva)
-#endif
-				pmap_invalidate_page(pmap, va);
-		} else {
-			tte_hash_insert(pmap->pm_hash, va, tte_data);
-			membar(Sync);
-		}
-	} else {
-		if (!(*otte & VTD_LOCK))
-			tte_hash_bucket_unlock(pmap->pm_hash, va);
-		else
-			*otte = (*otte & ~VTD_LOCK); 
-	}
+	if (invlva)
+		pmap_invalidate_page(pmap, va);
 
 	sched_unpin();
 	PMAP_UNLOCK(pmap);
@@ -1067,10 +1049,10 @@
 pmap_extract(pmap_t pmap, vm_offset_t va)
 {
 	vm_paddr_t pa;
-	tte_t *tte;
+	tte_t tte_data;
 
-	tte = tte_hash_lookup(pmap->pm_hash, va, FALSE);
-	pa = TTE_GET_PA(*tte) | (va & TTE_GET_PAGE_MASK(*tte));
+	tte_data = tte_hash_lookup(pmap->pm_hash, va);
+	pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
 
 	return (pa);
 }
@@ -1083,17 +1065,17 @@
 vm_page_t
 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 {
-	tte_t *tte_data;
+	tte_t tte_data;
 	vm_page_t m;
 
 	m = NULL;
 	vm_page_lock_queues();
 	PMAP_LOCK(pmap);
 	sched_pin();
-	tte_data = tte_hash_lookup(pmap->pm_hash, va, FALSE);
+	tte_data = tte_hash_lookup_nolock(pmap->pm_hash, va);
 	if (tte_data != 0 && 
-	    ((*tte_data & VTD_SW_W) || (prot & VM_PROT_WRITE) == 0)) {
-		m = PHYS_TO_VM_PAGE(TTE_GET_PA(*tte_data));
+	    ((tte_data & VTD_SW_W) || (prot & VM_PROT_WRITE) == 0)) {
+		m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
 		vm_page_hold(m);
 	}
 	sched_unpin();
@@ -1252,7 +1234,6 @@
 	invlpg(va, pmap->pm_context);
 	
 #ifdef SMP
-
 	pmap_ipi(pmap, (void *)tl_invlpg, (uint64_t)va, (uint64_t)pmap->pm_context);
 #endif
 	spinlock_exit();
@@ -1349,7 +1330,7 @@
 boolean_t 
 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
 {
-	return (tte_hash_lookup(pmap->pm_hash, va, FALSE) == NULL);
+	return (tte_hash_lookup(pmap->pm_hash, va) == 0);
 }
 
 /*
@@ -1369,7 +1350,7 @@
 vm_paddr_t
 pmap_kextract(vm_offset_t va)
 {
-	uint64_t *tte, tte_data;
+	tte_t tte_data;
 	vm_paddr_t pa;
 
         pa = 0;
@@ -1381,8 +1362,8 @@
 	if ((pa == 0) && (tte_data = tsb_lookup_tte(va, 0)) != 0)
 		pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
 
-	if ((pa == 0) && (tte = tte_hash_lookup(kernel_pmap->pm_hash, va, FALSE)) != NULL)
-		pa = TTE_GET_PA(*tte) | (va & TTE_GET_PAGE_MASK(*tte));
+	if ((pa == 0) && (tte_data = tte_hash_lookup(kernel_pmap->pm_hash, va)) != 0)
+		pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
 
 	return pa;
 }
@@ -1390,7 +1371,7 @@
 static void
 pmap_kremove(vm_offset_t va)
 { 
-	tte_hash_delete(kernel_pmap->pm_hash, va, FALSE);
+	tte_hash_delete(kernel_pmap->pm_hash, va);
 }
 
 static void
@@ -1540,7 +1521,6 @@
 {
 
 	int anychanged;
-	uint64_t *tte;
 	vm_offset_t tva;
 
 	DPRINTF("pmap_protect(0x%lx, 0x%lx, %d)\n", sva, eva, prot);
@@ -1562,40 +1542,32 @@
 	for (tva = sva; tva < eva; tva += PAGE_SIZE) {
 		uint64_t otte_data, tte_data;
 		vm_page_t m;
-	retry:
-		tte = tte_hash_lookup(pmap->pm_hash, tva, TRUE);
-		if (tte == NULL)
+
+		if ((otte_data = tte_hash_lookup(pmap->pm_hash, tva)) == 0)
 			continue;
 
-		otte_data = tte_data = *tte;
+		tte_data = otte_data;
+		
+		tte_data &= ~(VTD_SW_W | VTD_W | VTD_REF);
+		otte_data = tte_hash_update(pmap->pm_hash, tva, tte_data);
+		
+		if (tte_data != otte_data) 
+			anychanged = 1;
 
-		if (tte_data & VTD_MANAGED) {
+		if (otte_data & VTD_MANAGED) {
 			m = NULL;
-			if (tte_data & VTD_REF) {
-				m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
+			
+			if (otte_data & VTD_REF) {
+				m = PHYS_TO_VM_PAGE(TTE_GET_PA(otte_data));
 				vm_page_flag_set(m, PG_REFERENCED);
-				tte_data &= ~VTD_REF;
 			}
-			if ((tte_data & VTD_W) && pmap_track_modified(pmap, tva)) {
-				m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
+			if ((otte_data & VTD_W) && pmap_track_modified(pmap, tva)) {
+				m = PHYS_TO_VM_PAGE(TTE_GET_PA(otte_data));
 				vm_page_dirty(m);
 			}
-		}
-		
-		tte_data &= ~(VTD_SW_W | VTD_W);
-		
-		if (tte_data != otte_data) {
-			if (!atomic_cmpset_long(tte, otte_data, tte_data)) {
-                                /* XXX this should never happen with hash bucket locks  - FIXME */
-				panic("bucket locked but tte data changed");
-				tte_hash_bucket_unlock(pmap->pm_hash, tva); 
-				goto retry;
-			}
-			anychanged = 1;
+
 		}
-		tte_hash_bucket_unlock(pmap->pm_hash, tva); 
 	}
-	
 
 	sched_unpin();
 	if (anychanged)
@@ -1669,33 +1641,30 @@
 {
 	int invlva;
 	vm_offset_t tva;
-	uint64_t *tte;
+	uint64_t tte_data;
 	/*
 	 * Perform an unsynchronized read.  This is, however, safe.
 	 */
 	if (pmap->pm_stats.resident_count == 0)
 		return;
 	
+	DPRINTF("pmap_remove(start=0x%lx, end=0x%lx)\n", 
+		start, end);
 	invlva = 0;
 	vm_page_lock_queues();
 	sched_pin();
 	PMAP_LOCK(pmap);
 	for (tva = start; tva < end; tva += PAGE_SIZE) {
-		if ((tte = tte_hash_lookup(pmap->pm_hash, tva, TRUE)) == NULL)
+		if ((tte_data = tte_hash_delete(pmap->pm_hash, tva)) == 0)
 			continue;
-		pmap_remove_tte(pmap, tte, tva);
-		tte_hash_delete(pmap->pm_hash, tva, TRUE);
-
+		pmap_remove_tte(pmap, tte_data, tva);
 		invlva = 1;
 	}
 	sched_unpin();
 	vm_page_unlock_queues();
-	if (invlva) {
-		if (pmap == kernel_pmap)
-			pmap_invalidate_range(pmap, start, end);
-		else
-			pmap_invalidate_all(pmap);
-	}
+	if (invlva) 
+		pmap_invalidate_range(pmap, start, end);
+
 	PMAP_UNLOCK(pmap);
 
 }
@@ -1717,7 +1686,7 @@
 pmap_remove_all(vm_page_t m)
 {
 	pv_entry_t pv;
-	uint64_t *tte, tte_data;
+	uint64_t tte_data;
 	DPRINTF("pmap_remove_all 0x%lx\n", VM_PAGE_TO_PHYS(m));
 
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@@ -1725,9 +1694,9 @@
 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 		PMAP_LOCK(pv->pv_pmap);
 		pv->pv_pmap->pm_stats.resident_count--;
-		tte = tte_hash_lookup(pv->pv_pmap->pm_hash, pv->pv_va, TRUE);
-		tte_data = *tte;
-		tte_hash_delete(pv->pv_pmap->pm_hash, pv->pv_va, TRUE);
+
+		tte_data = tte_hash_delete(pv->pv_pmap->pm_hash, pv->pv_va);
+
 		if (tte_data & VTD_WIRED)
 			pv->pv_pmap->pm_stats.wired_count--;
 		if (tte_data & VTD_REF)
@@ -1790,17 +1759,17 @@
 	
 	vm_page_t m;
 	pv_entry_t pv, npv;
-	tte_t *tte, tte_data;
-
+	tte_t tte_data;
+	
+	DPRINTF("pmap_remove_pages(ctx=0x%lx)\n", pmap->pm_context);
 	vm_page_lock_queues();
 	PMAP_LOCK(pmap);
 	sched_pin();
 	for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
-		tte = tte_hash_lookup(pmap->pm_hash, pv->pv_va, FALSE);
-		tte_data = tte ? *tte : 0;
+		tte_data = tte_hash_delete(pmap->pm_hash, pv->pv_va);
 		
 		if (tte_data == 0) {
-		       printf("TTE at %p  IS ZERO @ VA %016lx\n", tte, pv->pv_va);
+		       printf("TTE IS ZERO @ VA %016lx\n", pv->pv_va);
 		       panic("bad tte");
 		}
 
@@ -1815,7 +1784,9 @@
 		}
 		m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
 		pmap->pm_stats.resident_count--;
-		
+		if ((uint64_t)m < KERNBASE)
+			panic("m=%p va=0x%lx tte_data=0x%lx",
+			      m, pv->pv_va, tte_data);
 		
 		if (tte_data & VTD_W) {
 			vm_page_dirty(m);
@@ -1823,7 +1794,7 @@
 		
 		npv = TAILQ_NEXT(pv, pv_plist);
 		TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
-
+		
 		m->md.pv_list_count--;
 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 		if (TAILQ_EMPTY(&m->md.pv_list))
@@ -1831,7 +1802,6 @@
 
 		free_pv_entry(pv);
 	}
-	tte_hash_delete_all(pmap->pm_hash);
 
 	sched_unpin();
 	pmap_invalidate_all(pmap);
@@ -1852,31 +1822,31 @@
 	}
 }
 static void
-pmap_remove_tte(pmap_t pmap, uint64_t *tte, vm_offset_t va)
+pmap_remove_tte(pmap_t pmap, tte_t tte_data, vm_offset_t va)
 {
 	
 	vm_page_t m;
 
 	if (pmap != kernel_pmap)
-		DPRINTF("pmap_remove_tte(va=0x%lx, pa=0x%lx)\n", va, TTE_GET_PA(*tte));
+		DPRINTF("pmap_remove_tte(va=0x%lx, pa=0x%lx)\n", va, TTE_GET_PA(tte_data));
+
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-	if (*tte & VTD_WIRED)
-		pmap->pm_stats.wired_count -= 1;
+	if (tte_data & VTD_WIRED)
+		pmap->pm_stats.wired_count--;
 
-	pmap->pm_stats.resident_count -= 1;
+	pmap->pm_stats.resident_count--;
 	
-	if (*tte & VTD_MANAGED) {
-		m = PHYS_TO_VM_PAGE(TTE_GET_PA(*tte));
-		if (*tte & VTD_W) {
+	if (tte_data & VTD_MANAGED) {
+		m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
+		if (tte_data & VTD_W) {
 			if (pmap_track_modified(pmap, va))
 				vm_page_dirty(m);	
 		}
-		if (*tte & VTD_REF) 
+		if (tte_data & VTD_REF) 
 			vm_page_flag_set(m, PG_REFERENCED);
 		pmap_remove_entry(pmap, m, va);
 	}
-	
 }
 
 /*
@@ -1921,21 +1891,6 @@
 void
 pmap_zero_page_idle(vm_page_t m)
 {
-	int i;
-	for (i = 0; i < (nucleus_memory >> PAGE_SHIFT_4M); i++) {
-		if (VM_PAGE_TO_PHYS(m) >= nucleus_mappings[i] &&  
-		    VM_PAGE_TO_PHYS(m) < nucleus_mappings[i] + PAGE_SIZE_4M)
-			panic("zeroing nucleus");
-
-		if (pmap_kextract(TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m))) >= 
-		    nucleus_mappings[i] &&  
-		    pmap_kextract(TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m))) < 
-		    nucleus_mappings[i] + PAGE_SIZE_4M)
-			panic("zeroing nucleus in direct area");
-	}
-	
-	
-
 	bzero((char *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)), PAGE_SIZE);
 }
 

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte.c#7 (text+ko) ====

@@ -24,8 +24,6 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
-
-
  * $ Exp $
  */
 
@@ -59,7 +57,6 @@
 tte_clear_phys_bit(vm_page_t m, uint64_t flags)
 {
 	pv_entry_t pv;
-	uint64_t tte_data, *tte;
 
 	if ((m->flags & PG_FICTITIOUS) ||
 	    (flags == VTD_SW_W && (m->flags & PG_WRITEABLE) == 0))
@@ -71,6 +68,7 @@
 	 * setting RO do we need to clear the VAC?
 	 */
 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		tte_t otte_data, tte_data;
 		/*
 		 * don't write protect pager mappings
 		 */
@@ -78,24 +76,23 @@
 			if (!pmap_track_modified(pv->pv_pmap, pv->pv_va))
 				continue;
 		}
-		PMAP_LOCK(pv->pv_pmap);
-		tte = tte_hash_lookup(pv->pv_pmap->pm_hash, pv->pv_va, FALSE);
-	retry:
-		tte_data = *tte;
-		if (tte_data & flags) {
+		otte_data = tte_hash_lookup(pv->pv_pmap->pm_hash, pv->pv_va);
+
+		if (otte_data & flags) {
 			if (flags == VTD_SW_W) {
-				if (!atomic_cmpset_long((u_long *)tte, tte_data,
-				    tte_data & ~(VTD_SW_W | VTD_W)))
-					goto retry;
-				if (tte_data & VTD_W) {
-					vm_page_dirty(m);
-				}
+				tte_data = otte_data & ~(VTD_SW_W | VTD_W);
 			} else {
-				atomic_clear_long((u_long *)tte, flags);
+				tte_data = otte_data & ~flags;
 			}
+			otte_data = tte_hash_update(pv->pv_pmap->pm_hash, pv->pv_va, tte_data);
+
+			if (otte_data & VTD_W) 
+				vm_page_dirty(m);
+
 			pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
 		}
-		PMAP_UNLOCK(pv->pv_pmap);
+		    
+		
 	}
 	if (flags == VTD_SW_W)
 		vm_page_flag_clear(m, PG_WRITEABLE);
@@ -118,12 +115,12 @@
 void 
 tte_clear_virt_bit(struct pmap *pmap, vm_offset_t va, uint64_t flags)
 {
-	tte_t *tte;
+	tte_t tte_data;
 
-	tte = tte_hash_lookup(pmap->pm_hash, va, FALSE);
+	tte_data = tte_hash_lookup(pmap->pm_hash, va);
 
-	if (tte) {
-		atomic_clear_long((u_long *)tte, flags);
+	if (tte_data) {
+		tte_hash_update(pmap->pm_hash, va,  tte_data & ~flags);
 		pmap_invalidate_page(pmap, va);
 	}
 }
@@ -137,11 +134,9 @@
 boolean_t 
 tte_get_virt_bit(struct pmap *pmap, vm_offset_t va, uint64_t flags)
 {
-	tte_t ttedata, *tte;
+	tte_t tte_data;
 	
-	tte = tte_hash_lookup(pmap->pm_hash, va, FALSE);
+	tte_data = tte_hash_lookup(pmap->pm_hash, va);
 	
-	ttedata = tte ? *tte : 0; 
-
-	return ((ttedata & flags) == flags);
+	return ((tte_data & flags) == flags);
 }

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#16 (text+ko) ====

@@ -59,6 +59,8 @@
 #define HASH_MASK(th) ((th->th_size << (PAGE_SHIFT - THE_SHIFT)) - 1)
 #define HASH_VALID     0x1
 
+#define DEBUG
+
 struct tte_hash_entry;
 
 typedef union {
@@ -121,7 +123,8 @@
 hash_bucket_lock(tte_hash_field_t fields) 
 {
 	uint64_t data;
-	
+
+	spinlock_enter();
 	data = fields[0].tte.data & ~VTD_LOCK;
 	while (atomic_cmpset_long(&fields[0].tte.data, data, data | VTD_LOCK))
 		data = fields[0].tte.data & ~VTD_LOCK;
@@ -130,25 +133,6 @@
 		
 }
 
-void
-tte_hash_bucket_unlock(tte_hash_t th, vm_offset_t va)
-{
-	uint64_t hash_shift, hash_index;
-	tte_hash_field_t fields;
-
-	/* XXX - only handle 8K pages for now */
-	hash_shift = PAGE_SHIFT;
-	hash_index = (va >> hash_shift) & HASH_MASK(th);
-	fields = (th->th_hashtable[hash_index].the_fields);
-
-#ifdef DEBUG
-	if ((fields[0].tte.data & VTD_LOCK) == 0)
-		panic("trying to unlock bucket that isn't locked");
-#endif
-	fields[0].tte.data &= ~VTD_LOCK;
-	membar(StoreLoad);
-}
-
 static __inline void
 hash_bucket_unlock_inline(tte_hash_field_t fields) 
 {
@@ -158,6 +142,7 @@
 #endif
 	fields[0].tte.data &= ~VTD_LOCK;
 	membar(StoreLoad);
+	spinlock_exit();
 }
 
 void 
@@ -233,12 +218,6 @@
 }
 
 void
-tte_hash_clear(tte_hash_t th)
-{
-	bzero(th->th_hashtable, th->th_size*PAGE_SIZE);
-}
-
-void
 tte_hash_destroy(tte_hash_t th)
 {
 	vm_page_t m, hash_pages[MAX_HASH_SIZE];
@@ -264,52 +243,74 @@
 }
 
 
-int
-tte_hash_delete(tte_hash_t th, vm_offset_t va, int locked)
+static __inline tte_t 
+tte_hash_lookup_inline(tte_hash_t th, vm_offset_t va, int *index)
+{
+	uint64_t hash_shift, hash_index;
+	tte_hash_field_t fields;
+	int i;
+	tte_t entry;
+	/* XXX - only handle 8K pages for now */
+
+	hash_shift = PAGE_SHIFT;
+	hash_index = (va >> hash_shift) & HASH_MASK(th);
+	fields = (th->th_hashtable[hash_index].the_fields);
+	entry = 0;
+
+	for (i = 0; i < 4 && fields[i].tte.tag != 0; i++) {
+		if (((fields[i].tte.tag << TTARGET_VA_SHIFT) == (va & ~PAGE_MASK_4M))) {
+			entry = (fields[i].tte.data & ~VTD_LOCK);
+			break;
+		}
+	}
+	if (index)
+		*index = i;
+        /* 
+	 * XXX handle the case of collisions > 3
+	 */
+	return (entry);
+}
+
+
+tte_t
+tte_hash_delete(tte_hash_t th, vm_offset_t va)
 {
 	uint64_t hash_shift, hash_index;
 	tte_hash_field_t fields;
 	int i, vaindex, lastindex;
+	tte_t tte_data;
+	
 	/* XXX - only handle 8K pages for now */
 
 	hash_shift = PAGE_SHIFT;
 	hash_index = (va >> hash_shift) & HASH_MASK(th);
 	fields = (th->th_hashtable[hash_index].the_fields);
+	
+	tte_data = 0;
 
-#if 0
-	KASSERT(tte_hash_lookup(th, va, FALSE) != 0, ("attempting to delete non-existent entry"));
-#endif	
-	if (locked == FALSE)
-		hash_bucket_lock(fields);
+	hash_bucket_lock(fields);
+	
+	tte_data = tte_hash_lookup_inline(th, va, &vaindex);
 
-	for (i = 0; i <= 3; i++) 
-		if ((fields[i].tte.tag << TTARGET_VA_SHIFT) == (va & ~PAGE_MASK_4M)) 
-			break;
-	vaindex = i;
+	if (tte_data == 0)
+		goto done;
 
-	for (i = 0; i <= 3; i++) 
-		if (fields[i].tte.tag == 0)
-			break;
+	for (i = 0; (i < 4) && (fields[i + 1].tte.tag != 0); i++) 
+		;
 	lastindex = i;
 
-	if (vaindex != lastindex && (vaindex < 4)) {
+	if (vaindex != lastindex) {
 		fields[vaindex].tte.tag = fields[lastindex].tte.tag;
-		fields[vaindex].tte.data = fields[lastindex].tte.data;
+		fields[vaindex].tte.data = fields[lastindex].tte.data | 
+			(fields[vaindex].tte.data & VTD_LOCK);
 	} 
 	fields[lastindex].tte.tag = 0;
-	fields[lastindex].tte.data = 0;
-	
-	if (lastindex != 0)
-		hash_bucket_unlock_inline(fields);
+	fields[lastindex].tte.data = 0 | (fields[lastindex].tte.data & VTD_LOCK);
 
-	return (vaindex < 4);
-}
+done:	
+	hash_bucket_unlock_inline(fields);
 
-void
-tte_hash_delete_all(tte_hash_t th)
-{
-	
-	bzero(th->th_hashtable, th->th_size*PAGE_SIZE);
+	return (tte_data);
 }
 
 void
@@ -318,77 +319,69 @@
 
 	uint64_t hash_shift, hash_index, tte_tag;
 	tte_hash_field_t fields;
-	int i;
+	int cookie;
+	tte_t otte_data;
+	
 
-	
-#if 0
-	tte_t *tte;
-	tte = tte_hash_lookup(th, va, FALSE);
-	if (tte)
-		panic("mapping for va=0x%lx already exists tte_data=0x%lx\n", va, *tte);
-#endif
 	/* XXX - only handle 8K pages for now */
 	hash_shift = PAGE_SHIFT;
 	hash_index = (va >> hash_shift) & HASH_MASK(th);
 	fields = (th->th_hashtable[hash_index].the_fields);
 
-	hash_bucket_lock(fields);
 	tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
 
-	for (i = 0; i <= 3; i++) {
-		if ((fields[i].tte.tag == 0) || (fields[i].tte.tag == tte_tag)) {
-			fields[i].tte.data = tte_data | (i ? 0 : VTD_LOCK);
-			fields[i].tte.tag = tte_tag;
-			goto done;
-		} 
+	hash_bucket_lock(fields);
+	otte_data = tte_hash_lookup_inline(th, va, &cookie);
+#ifdef DEBUG
+	if (otte_data)
+		panic("mapping for va=0x%lx already exists tte_data=0x%lx\n", va, otte_data);
+#endif
+	fields[cookie].tte.data = tte_data | (fields[cookie].tte.data & VTD_LOCK);
+	fields[cookie].tte.tag = tte_tag;
 		
-	}
-	panic("collision handling unimplemented - please re-consider");
+	if (cookie == 4)
+		panic("collision handling unimplemented - please re-consider");
 	
-done:
 	hash_bucket_unlock_inline(fields);
 	th->th_entries++;
 }
 
+
+
+tte_t 
+tte_hash_lookup_nolock(tte_hash_t th, vm_offset_t va)
+{
+	return tte_hash_lookup_inline(th, va, NULL);
+}
+
+
 /* 
  * If leave_locked is true the tte's data field will be returned to
  * the caller with the hash bucket left locked
  */
 
 
-tte_t *
-tte_hash_lookup(tte_hash_t th, vm_offset_t va, int leave_locked)
+tte_t 
+tte_hash_lookup(tte_hash_t th, vm_offset_t va)
 {
 	uint64_t hash_shift, hash_index;
 	tte_hash_field_t fields;
-	int i;
-	tte_t *entry;
+	tte_t tte_data;
 	/* XXX - only handle 8K pages for now */
 
 	hash_shift = PAGE_SHIFT;
 	hash_index = (va >> hash_shift) & HASH_MASK(th);
 	fields = (th->th_hashtable[hash_index].the_fields);
-	entry = NULL;
 
 	hash_bucket_lock(fields);
-	for (i = 0; i <= 3; i++) {
-		if (((fields[i].tte.tag << TTARGET_VA_SHIFT) == (va & ~PAGE_MASK_4M)) &&
-		    (fields[i].tte.data != 0)) {
-			entry = &(fields[i].tte.data);
-			break;
-		}
-	}
-	if (entry == NULL || leave_locked == FALSE)
-		hash_bucket_unlock_inline(fields);
+	tte_data = tte_hash_lookup_inline(th, va, NULL);
+	hash_bucket_unlock_inline(fields);
 	
-        /* 
-	 * XXX handle the case of collisions > 3
-	 *
-	 */
-	return (entry);
+	return (tte_data);
 }
 
 
+
 uint64_t
 tte_hash_set_scratchpad_kernel(tte_hash_t th)
 {
@@ -418,3 +411,30 @@
 	return hash_scratch;
 }
 
+tte_t
+tte_hash_update(tte_hash_t th, vm_offset_t va, tte_t tte_data)
+{
+	uint64_t hash_shift, hash_index;
+	tte_hash_field_t fields;
+	int cookie;
+	tte_t otte_data;
+	
+	/* XXX - only handle 8K pages for now */
+	hash_shift = PAGE_SHIFT;
+	hash_index = (va >> hash_shift) & HASH_MASK(th);
+	fields = (th->th_hashtable[hash_index].the_fields);
+
+	hash_bucket_lock(fields);
+	otte_data = tte_hash_lookup_inline(th, va, &cookie);
+#ifdef TTE_DEBUG
+	printf("tte_hash_update(va=0x%lx, tte_data=0x%lx, index=%d)\n", va, tte_data, cookie);
+#endif
+	fields[cookie].tte.tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
+	fields[cookie].tte.data = tte_data | (fields[cookie].tte.data & VTD_LOCK);
+	hash_bucket_unlock_inline(fields);
+
+	if (otte_data == 0)
+		th->th_entries++;
+
+	return otte_data;
+}



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200605030500.k435020C082584>