Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 07 Jun 2003 11:35:09 -0000
From:      Juli Mallett <jmallett@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 32741 for review
Message-ID:  <200306071135.h57BZ6am035822@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=32741

Change 32741 by jmallett@jmallett_dalek on 2003/06/07 04:34:36

	Move out a fair bit of code to tlb.c, and cripple a few things
	along the way.  Easier this way for now...

Affected files ...

.. //depot/projects/mips/sys/mips/mips/pmap.c#16 edit

Differences ...

==== //depot/projects/mips/sys/mips/mips/pmap.c#16 (text+ko) ====

@@ -113,15 +113,8 @@
 
 #include <machine/locore.h>
 #include <machine/md_var.h>
+#include <machine/tlb.h>
 
-/*
- * The joy of indexing.
- *
- * User addresses don't have the bits set that XKSEG has, best way to
- * index the page table is to remove those bits, and get a page number.
- */
-#define	pmap_index(va)	(((va) & ~VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT)
-
 #ifndef PMAP_SHPGPERPROC
 #define PMAP_SHPGPERPROC 200
 #endif
@@ -153,7 +146,6 @@
 #define pmap_pte_pa(pte)	MIPS_PTE_TO_PA(*(pte))
 #define pmap_pte_prot(pte)	(*(pte) & PG_PROT)
 
-#define pmap_pte_set_w(pte, v) ((v)?(*pte |= PG_W):(*pte &= ~PG_W))
 #define pmap_pte_set_prot(pte, v) ((*pte &= ~PG_PROT), (*pte |= (v)))
 
 /*
@@ -189,9 +181,6 @@
  */
 #define pmap_k0seg_to_pte(va)	MIPS_PA_TO_PFN(pmap_k0seg_to_pfn(va))
 
-pt_entry_t	*kptmap;
-vm_size_t	kptsize;
-
 /*
  * Statically allocated kernel pmap
  */
@@ -228,7 +217,6 @@
 static pv_entry_t get_pv_entry(void);
 
 static int pmap_remove_pte(pmap_t pmap, pt_entry_t* ptq, vm_offset_t sva);
-static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
 static int pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va,
 		vm_page_t mpte, vm_page_t m);
@@ -245,7 +233,7 @@
 {
 	if (pmap == NULL || pmap->pm_lev1 == NULL)
 		return NULL;
-	return &pmap->pm_lev1[pmap_index(va)];
+	return tlb_pte_find(pmap->pm_lev1, va);
 }
 
 
@@ -292,36 +280,23 @@
  *	Bootstrap the system enough to run with virtual memory.
  *
  *	This sets up the ASID generator, message buffer, and page
- *	table.  XXX Probably want to move page table and related to
- *	a TLB-specific file.  It also sets up some very important
- *	values for MI VM code to run.
+ *	table.
  */
 void
 pmap_bootstrap(void)
 {
-	pt_entry_t *pte;
 	int i;
 
 	/*
-	 * Setup ASIDs. PCPU_GET(next_asid) and PCPU_GET(current_asidgen) are set
-	 * up already.
-	 */
-	pmap_maxasid = MIPS3_TLB_NUM_ASIDS;
-
-	/*
 	 * Steal the message buffer from the beginning of memory.
 	 */
 	msgbufp = (struct msgbuf *) pmap_steal_memory(MSGBUF_SIZE);
 	msgbufinit(msgbufp, MSGBUF_SIZE);
 
 	/*
-	 * Set up kernel page table.
+	 * Initialise TLB management, and have it allocate page tables.
 	 */
-	kptsize = physsz >> PAGE_SHIFT;
-	printf("Kernel page table indexes %ld %dK pages.\n",
-	    kptsize, PAGE_SIZE / 1024);
-	kptmap = (pt_entry_t *)
-	    pmap_steal_memory(kptsize * sizeof (pt_entry_t));
+	tlb_bootstrap(physsz >> PAGE_SHIFT, pmap_steal_memory);
 
 	avail_start = phys_avail[0];
 	for (i = 0; phys_avail[i+2]; i+= 2) ;
@@ -345,20 +320,6 @@
 	 */
 	LIST_INIT(&allpmaps);
 	LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
-
-	/*
-	 * Lock in the current ASID, and set the global bit on each PTE.
-	 */
-	mips_wr_entryhi(kernel_pmap->pm_asid);
-	for (i = 0; i < kptsize; i++) {
-		pte = &kptmap[i];
-		*pte = PG_G;
-	}
-
-	/*
-	 * Clear the TLB.
-	 */
-	MIPS_TBIAP();
 }
 
 /*
@@ -616,48 +577,35 @@
  ***************************************************/
 
 /*
- * Add a list of wired pages to the kva
- * this routine is only used for temporary
- * kernel mappings that do not need to have
- * page modification or references recorded.
- * Note that old mappings are simply written
- * over.  The page *must* be wired.
+ * Map a list of wired pages into kernel virtual address space.  This is
+ * intended for temporary mappings which do not need page modification or
+ * references recorded.  Existing mappings in the region are overwritten.
  */
 void
-pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
+pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
 {
-	int i;
-	pt_entry_t *pte;
+	vm_offset_t va;
 
-	for (i = 0; i < count; i++) {
-		vm_offset_t tva = va + i * PAGE_SIZE;
-		pt_entry_t npte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m[i]))
-			| PG_V;
-		pt_entry_t opte;
-		pte = pmap_pte(kernel_pmap, tva);
-		opte = *pte;
-		*pte = npte;
-		if (opte & PG_V)
-			pmap_invalidate_page(kernel_pmap, tva);
+	va = sva;
+	while (count-- > 0) {
+		pmap_kenter(va, *m);
+		va += PAGE_SIZE;
+		m++;
 	}
 }
 
 /*
- * this routine jerks page mappings from the
- * kernel -- it is meant only for temporary mappings.
+ * Remove page mappings from kernel virtual address space.  Intended for
+ * temporary mappings entered by pmap_qenter.
  */
 void
-pmap_qremove(va, count)
+pmap_qremove(vm_offset_t sva, int count)
+{
 	vm_offset_t va;
-	int count;
-{
-	int i;
-	register pt_entry_t *pte;
 
-	for (i = 0; i < count; i++) {
-		pte = pmap_pte(kernel_pmap, va);
-		*pte = 0;
-		pmap_invalidate_page(kernel_pmap, va);
+	va = sva;
+	while (count-- > 0) {
+		pmap_kremove(va);
 		va += PAGE_SIZE;
 	}
 }
@@ -670,16 +618,8 @@
 PMAP_INLINE void 
 pmap_kenter(vm_offset_t va, vm_offset_t pa)
 {
-	pt_entry_t *pte;
-	pt_entry_t npte, opte;
 
-	npte = pmap_phys_to_pte(pa) | PG_V;
-	pte = pmap_pte(kernel_pmap, va);
-	opte = *pte;
-	*pte = npte;
-	if (opte & PG_V)
-		pmap_invalidate_page(kernel_pmap, va);
-	MachTLBUpdate(va & ~PAGE_MASK, npte);
+	tlb_enter(kernel_pmap, va, pa, PG_V | PG_W);
 }
 
 /*
@@ -688,11 +628,8 @@
 PMAP_INLINE void
 pmap_kremove(vm_offset_t va)
 {
-	register pt_entry_t *pte;
 
-	pte = pmap_pte(kernel_pmap, va);
-	*pte = 0;
-	pmap_invalidate_page(kernel_pmap, va);
+	tlb_remove(kernel_pmap, va);
 }
 
 /*
@@ -813,18 +750,16 @@
 	vm_object_t ksobj;
 	vm_offset_t ks;
 	vm_page_t m;
-	pt_entry_t *ptek;
 
 	pages = td->td_kstack_pages;
 	ksobj = td->td_kstack_obj;
 	ks = td->td_kstack;
-	ptek = pmap_pte(kernel_pmap, ks);
+
+	tlb_remove_pages(kernel_pmap, ks, pages);
 	for (i = 0; i < pages; i++) {
 		m = vm_page_lookup(ksobj, i);
 		if (m == NULL)
 			panic("pmap_dispose_thread: kstack already missing?");
-		ptek[i] = 0;
-		pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE);
 		vm_page_lock_queues();
 		vm_page_busy(m);
 		vm_page_unwire(m, 0);
@@ -1217,31 +1152,6 @@
 }
 
 /*
- * Remove a single page from a process address space
- */
-static void
-pmap_remove_page(pmap_t pmap, vm_offset_t va)
-{
-	register pt_entry_t *ptq;
-
-	ptq = pmap_pte(pmap, va);
-	
-	/*
-	 * if there is no pte for this address, just skip it!!!
-	 */
-	if (!ptq || !pmap_pte_v(ptq))
-		return;
-
-	/*
-	 * get a local va for mappings for this pmap.
-	 */
-	(void) pmap_remove_pte(pmap, ptq, va);
-	pmap_invalidate_page(pmap, va);
-
-	return;
-}
-
-/*
  *	Remove the given range of addresses from the specified map.
  *
  *	It is assumed that the start and end are properly
@@ -1250,7 +1160,6 @@
 void
 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 {
-	vm_offset_t va, nva;
 
 	if (pmap == NULL)
 		return;
@@ -1258,20 +1167,7 @@
 	if (pmap->pm_stats.resident_count == 0)
 		return;
 
-	/*
-	 * special handling of removing one page.  a very
-	 * common operation and easy to short circuit some
-	 * code.
-	 */
-	if (sva + PAGE_SIZE == eva) {
-		pmap_remove_page(pmap, sva);
-		return;
-	}
-
-	for (va = sva; va < eva; va = nva) {
-		pmap_remove_page(pmap, va);
-		nva = va + PAGE_SIZE;
-	}
+	tlb_remove_range(pmap, sva, eva);
 }
 
 /*
@@ -1371,7 +1267,7 @@
 
 	while (sva < eva) {
 		/* 
-		 * If level pte is invalid, skip this page
+		 * If pte is invalid, skip this page
 		 */
 		pte = pmap_pte(pmap, sva);
 		if (!pmap_pte_v(pte)) {
@@ -1421,9 +1317,7 @@
 	vm_offset_t pa;
 	pt_entry_t *pte;
 	vm_offset_t opa;
-	pt_entry_t origpte, newpte;
 	vm_page_t mpte;
-	int managed;
 
 	if (pmap == NULL)
 		return;
@@ -1452,59 +1346,38 @@
 		panic("pmap_enter: invalid kernel page tables pmap=%p, va=0x%lx\n", pmap, va);
 	}
 
-	origpte = *pte;
 	pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK;
-	managed = 0;
-	opa = pmap_pte_pa(pte);
 
-	/*
-	 * Mapping has not changed, must be protection or wiring change.
-	 */
-	if (origpte & PG_V && (opa == pa)) {
+	if (pte_valid(pte) && (opa = MIPS_PTE_TO_PA(*pte)) == pa) {
+		if (pte_wired(pte)) {
+			if (!wired)
+				pmap->pm_stats.wired_count--;
+		} else {
+			if (wired)
+				pmap->pm_stats.wired_count++;
+		}
+		if (pte_dirty(pte)) {
+			/*
+			 * If it's not read-only, is managed, and modified, dirty it.
+			 */
+			if (pte_managed(pte) && !pte_ro(pte) && pte_dirty(pte)) {
+				KASSERT(pmap_track_modified(va), ("pmap modified"));
+				vm_page_dirty(PHYS_TO_VM_PAGE(opa));
+			}
+		}
+	} else {
 		/*
-		 * Wiring change, just update stats. We don't worry about
-		 * wiring PT pages as they remain resident as long as there
-		 * are valid mappings in them. Hence, if a user page is wired,
-		 * the PT page will be also.
+		 * Valid but not the same page, we need to change the mapping.
 		 */
-		if (wired && ((origpte & PG_W) == 0))
-			pmap->pm_stats.wired_count++;
-		else if (!wired && (origpte & PG_W))
-			pmap->pm_stats.wired_count--;
+		if (pte_valid(pte)) {
+			int err;
 
-		/*
-		 * Remove extra pte reference
-		 */
-		if (mpte)
-			mpte->hold_count--;
-
-		/*
-		 * We might be turning off write access to the page,
-		 * so we go ahead and sense modify status.
-		 */
-		if (pmap_pte_managed(&origpte)) {
-			if ((origpte & PG_RO) != PG_RO
-			    && pmap_track_modified(va)) {
-				vm_page_t om;
-				om = PHYS_TO_VM_PAGE(opa);
-				vm_page_dirty(om);
-			}
-			managed = 1;
+			vm_page_lock_queues();
+			err = pmap_remove_pte(pmap, pte, va);
+			vm_page_unlock_queues();
+			if (err)
+				panic("pmap_enter: pte vanished, va: 0x%lx", va);
 		}
-
-		goto validate;
-	} 
-	/*
-	 * Mapping has changed, invalidate old range and fall through to
-	 * handle validating new mapping.
-	 */
-	if (opa) {
-		int err;
-		vm_page_lock_queues();
-		err = pmap_remove_pte(pmap, pte, va);
-		vm_page_unlock_queues();
-		if (err)
-			panic("pmap_enter: pte vanished, va: 0x%lx", va);
 	}
 
 	/*
@@ -1514,7 +1387,6 @@
 	 */
 	if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) {
 		pmap_insert_entry(pmap, va, mpte, m);
-		managed |= PG_M;
 	}
 
 	/*
@@ -1524,39 +1396,7 @@
 	if (wired)
 		pmap->pm_stats.wired_count++;
 
-validate:
-	/*
-	 * Now validate mapping with desired protection/wiring.
-	 */
-	newpte = pmap_phys_to_pte(pa) | pte_prot(pmap, prot) | PG_V | managed;
-
-	if (managed) {
-		/*
-		 * Set up referenced/modified emulation for the new
-		 * mapping. Any old referenced/modified emulation
-		 * results for the old mapping will have been recorded
-		 * either in pmap_remove_pte() or above in the code
-		 * which handles protection and/or wiring changes.
-		 */
-		newpte |= (PG_RO | PG_D);
-	}
-
-	if (wired)
-		newpte |= PG_W;
-
-	/*
-	 * if the mapping or permission bits are different, we need
-	 * to update the pte.
-	 */
-	if (origpte != newpte) {
-		if ((newpte & PG_V) == 0)
-			panic("pmap_enter invalid mapping?\n");
-		*pte = newpte;
-		if (origpte & PG_V)
-			pmap_invalidate_page(pmap, va);
-		if (prot & VM_PROT_EXECUTE)
-			/* XXX invalidate Icache */;
-	}
+	tlb_enter(pmap, va, pa, PG_V | (wired ? PG_W : 0));
 }
 
 /*
@@ -1617,16 +1457,17 @@
 
 	pte = pmap_pte(pmap, va);
 
-	if (wired && !pmap_pte_w(pte))
-		pmap->pm_stats.wired_count++;
-	else if (!wired && pmap_pte_w(pte))
-		pmap->pm_stats.wired_count--;
-
-	/*
-	 * Wiring is not a hardware characteristic so there is no need to
-	 * invalidate TLB.
-	 */
-	pmap_pte_set_w(pte, wired);
+	if (pte_wired(pte)) {
+		if (!wired)
+			pmap->pm_stats.wired_count--;
+	} else {
+		if (wired)
+			pmap->pm_stats.wired_count++;
+	}
+	if (wired)
+		pte_wire(pte);
+	else
+		pte_unwire(pte);
 }
 
 
@@ -2146,43 +1987,4 @@
 	return addr;
 }
 
-/*
- * PTE was not dirty and is being written to.  XXX kernel only for now.
- */
-void
-pmap_tlb_modified(void *badvaddr)
-{
-	pt_entry_t *pte, entry;
-	vm_offset_t va;
 
-	va = (vm_offset_t) badvaddr;
-	pte = pmap_pte(kernel_pmap, va);
-	/*
-	 * Do we really want to dirty this page?
-	 */
-#if 0 /* XXX ? */
-	if (!pmap_pte_managed(pte))
-		panic("tlb modified unmanaged page");
-#endif
-	if (!pmap_pte_v(pte))
-		panic("tlb modified invalid page");
-	if (pmap_pte_ro(pte))
-		panic("write to ro page");
-	if (*pte & PG_D)
-		panic("dirty page caused a TLBMod");
-	/*
-	 * Mark the page dirty.
-	 */
-	*pte |= PG_D;
-
-	/*
-	 * Make a PTE purely to insert into the TLB.
-	 */
-	entry = *pte & MIPS_PFN_MASK;
-	entry |= *pte & 0x07; /* XXX PG_??? */
-
-	/*
-	 * Lock in the TLB entry for this page.
-	 */
-	MachTLBUpdate(va & ~PAGE_MASK, *pte);
-}



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200306071135.h57BZ6am035822>