Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 10 Aug 2002 00:43:12 -0700 (PDT)
From:      Peter Wemm <peter@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 15776 for review
Message-ID:  <200208100743.g7A7hCdC074176@freefall.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://people.freebsd.org/~peter/p4db/chv.cgi?CH=15776

Change 15776 by peter@peter_overcee on 2002/08/10 00:42:29

	Checkpoint some WIP before I lose it again.  GRRRR.

Affected files ...

.. //depot/projects/pmap/sys/i386/i386/pmap.c#31 edit
.. //depot/projects/pmap/sys/i386/include/pmap.h#13 edit
.. //depot/projects/pmap/sys/i386/include/types.h#4 edit
.. //depot/projects/pmap/sys/vm/pmap.h#4 edit

Differences ...

==== //depot/projects/pmap/sys/i386/i386/pmap.c#31 (text+ko) ====

@@ -127,28 +127,6 @@
 #define PMAP_INLINE
 #endif
 
-/*
- * Get PDEs and PTEs for user/kernel address space
- */
-#define	pmap_pde(m, v)	(&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
-#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
-
-#define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
-#define pmap_pte_w(pte)		((*(int *)pte & PG_W) != 0)
-#define pmap_pte_m(pte)		((*(int *)pte & PG_M) != 0)
-#define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
-#define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
-
-#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
-#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
-
-/*
- * Given a map and a machine independent protection code,
- * convert to a vax protection code.
- */
-#define pte_prot(m, p)	(protection_codes[p])
-static int protection_codes[8];
-
 struct pmap kernel_pmap_store;
 LIST_HEAD(pmaplist, pmap);
 struct pmaplist allpmaps;
@@ -200,16 +178,14 @@
 static PMAP_INLINE void	free_pv_entry(pv_entry_t pv);
 static pt_entry_t *get_ptbase(pmap_t pmap);
 static pv_entry_t get_pv_entry(void);
-static void	i386_protection_init(void);
 static __inline void	pmap_changebit(vm_page_t m, int bit, boolean_t setem);
 
 static void	pmap_remove_all(vm_page_t m);
 static vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va,
 				      vm_page_t m, vm_page_t mpte);
 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva);
-static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
-static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
-					vm_offset_t va);
+static void pmap_remove_page(pmap_t pmap, vm_offset_t va);
+static int pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va);
 static boolean_t pmap_testbit(vm_page_t m, int bit);
 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va,
 		vm_page_t mpte, vm_page_t m);
@@ -226,7 +202,139 @@
 
 static pd_entry_t pdir4mb;
 
+pt_entry_t phys_to_pte(vm_physaddr_t pa);
+
+pt_entry_t pmap_pte_set(pt_entry_t *ptep, pt_entry_t newval);
+pt_entry_t pmap_pte_get(pt_entry_t *ptep);
+pt_entry_t pmap_pte_setbits(pt_entry_t *ptep, u_int32_t newbits);
+u_int32_t pmap_pte_getbits(pt_entry_t *ptep);
+pt_entry_t pmap_pte_clearbits(pt_entry_t *ptep, u_int32_t clearbits);
+boolean_t pmap_pte_testbits(pt_entry_t *ptep, u_int32_t testbits);
+
+pd_entry_t pmap_pde_set(pd_entry_t *pdep, pd_entry_t newval);
+pd_entry_t pmap_pde_get(pd_entry_t *pdep);
+pd_entry_t pmap_pde_setbits(pd_entry_t *pdep, u_int32_t newbits);
+u_int32_t pmap_pde_getbits(pd_entry_t *pdep);
+pd_entry_t pmap_pde_clearbits(pd_entry_t *pdep, u_int32_t clearbits);
+boolean_t pmap_pde_testbits(pd_entry_t *pdep, u_int32_t testbits);
+
+#ifndef PAE
+
+PMAP_INLINE pt_entry_t
+phys_to_pte(vm_physaddr_t pa)
+{
+	pt_entry_t newpte;
+
+	newpte.pte = pa;
+	return newpte;
+}
+
+PMAP_INLINE pt_entry_t
+pmap_pte_set(pt_entry_t *ptep, pt_entry_t newval)
+{
+	pt_entry_t oldval;
+
+	oldval = *ptep;
+	*ptep = newval;
+	return oldval;
+}
+
+PMAP_INLINE pt_entry_t
+pmap_pte_get(pt_entry_t *ptep)
+{
+
+	return *ptep;
+}
+
+PMAP_INLINE pt_entry_t
+pmap_pte_setbits(pt_entry_t *ptep, u_int32_t newbits)
+{
+	pt_entry_t oldval;
+
+	oldval = *ptep;
+	ptep->pte |= newbits;
+	return oldval;
+}
+
+PMAP_INLINE u_int32_t
+pmap_pte_getbits(pt_entry_t *ptep)
+{
+
+	return ptep->pte;
+}
+
+PMAP_INLINE pt_entry_t
+pmap_pte_clearbits(pt_entry_t *ptep, u_int32_t clearbits)
+{
+	pt_entry_t oldval;
+
+	oldval = *ptep;
+	ptep->pte &= ~clearbits;
+	return oldval;
+}
+
+PMAP_INLINE boolean_t
+pmap_pte_testbits(pt_entry_t *ptep, u_int32_t testbits)
+{
+
+	return (ptep->pte & testbits) ? 1 : 0;
+}
+
+#else
+
+PMAP_INLINE pt_entry_t
+phys_to_pte(vm_physaddr_t pa)
+{
+
+	return (pt_entry_t)pa;
+}
+
 /*
+ * CMPXHCH8B
+ * This is a larger and more unwieldy version of CMPXCHG: it compares
+ * the 64-bit (eight-byte) value stored at [mem] with the value in EDX:EAX.
+ * If they are equal, it sets the zero flag and stores ECX:EBX into the
+ * memory area. If they are unequal, it clears the zero flag and leaves
+ * the memory area untouched.
+ */
+PMAP_INLINE pt_entry_t
+pmap_ptep_set(pt_entry_t *ptep, pt_entry_t newval)
+{
+	pt_entry_t old;
+	u_int32_t newlo;
+	u_int32_t newhi;
+
+	newlo = newval;
+	newhi = newval >> 32;
+	__asm __volatile(
+	    "1: movl (%1), %%eax\n\t"
+	    "movl 4(%1), %%edx\n\t"
+	    "cmpxchg8b (%1)\n\t"
+	    "jnz 1b"
+	    : "=A,A"(old) : "D,S"(ptep), "b,b"(newlo), "c,c"(newhi) : "memory");
+	return old;
+}
+
+PMAP_INLINE pt_entry_t
+pmap_ptep_get(pt_entry_t *ptep)
+{
+
+	return *ptep;
+}
+
+#endif
+
+/*
+ * Get PDEs for user/kernel address space
+ */
+static __inline pd_entry_t *
+pmap_pde(pmap_t pmap, vm_offset_t va)
+{
+
+	return &(pmap->pm_pdir[va >> PDRSHIFT]);
+}
+
+/*
  *	Routine:	pmap_pte
  *	Function:
  *		Extract the page table entry associated
@@ -234,21 +342,20 @@
  */
 
 PMAP_INLINE pt_entry_t *
-pmap_pte(pmap, va)
-	register pmap_t pmap;
-	vm_offset_t va;
+pmap_pte(pmap_t pmap, vm_offset_t va)
 {
 	pd_entry_t *pdeaddr;
+	u_int32_t pdebits;
 
-	if (pmap) {
-		pdeaddr = pmap_pde(pmap, va);
-		if (*pdeaddr & PG_PS)
-			return pdeaddr;
-		if (*pdeaddr) {
-			return get_ptbase(pmap) + i386_btop(va);
-		}
-	}
-	return (0);
+	if (pmap == NULL)
+		return 0;
+	pdeaddr = pmap_pde(pmap, va);
+	pdebits = pmap_pde_getbits(pdeaddr);
+	if (pdebits & PG_PS)	/* 4MB page, special case */
+		return (pt_entry_t *)pdeaddr;
+	if (pdebits & PG_V)
+		return get_ptbase(pmap) + i386_btop(va);
+	return 0;
 }
 
 /*
@@ -280,9 +387,7 @@
  *	(physical) address starting relative to 0]
  */
 void
-pmap_bootstrap(firstaddr, loadaddr)
-	vm_offset_t firstaddr;
-	vm_offset_t loadaddr;
+pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t loadaddr)
 {
 	vm_offset_t va;
 	pt_entry_t *pte;
@@ -304,11 +409,6 @@
 	virtual_end = VM_MAX_KERNEL_ADDRESS;
 
 	/*
-	 * Initialize protection array.
-	 */
-	i386_protection_init();
-
-	/*
 	 * Initialize the kernel pmap (which is statically allocated).
 	 */
 	kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
@@ -361,9 +461,9 @@
 
 	virtual_avail = va;
 
-	*CMAP1 = *CMAP2 = 0;
+	CMAP1->pte = CMAP2->pte = 0;
 	for (i = 0; i < NKPT; i++)
-		PTD[i] = 0;
+		PTD[i].pde = 0;
 
 	pgeflag = 0;
 #ifndef DISABLE_PG_G
@@ -379,7 +479,7 @@
  * The 4MB page version of the initial
  * kernel page mapping.
  */
-	pdir4mb = 0;
+	pdir4mb.pde = 0;
 
 #ifndef DISABLE_PSE
 	if (cpu_feature & CPUID_PSE) {
@@ -388,9 +488,10 @@
 		 * Note that we have enabled PSE mode
 		 */
 		pseflag = PG_PS;
-		ptditmp = *(PTmap + i386_btop(KERNBASE));
-		ptditmp &= ~(NBPDR - 1);
-		ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
+		/* XXXPAE FIXME */
+		ptditmp.pde = PTmap[i386_btop(KERNBASE)].pte;
+		ptditmp.pde &= ~(NBPDR - 1);
+		ptditmp.pde |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
 		pdir4mb = ptditmp;
 	}
 #endif
@@ -435,7 +536,7 @@
 #endif
 	if (PCPU_GET(cpuid) == 0) {
 #ifndef DISABLE_PSE
-		if (pdir4mb) {
+		if (pdir4mb.pde) {
 			kernel_pmap->pm_pdir[KPTDI] = PTD[KPTDI] = pdir4mb;
 			invltlb();	/* Insurance */
 		}
@@ -452,8 +553,8 @@
 			endva = KERNBASE + KERNend;
 			while (va < endva) {
 				pte = vtopte(va);
-				if (*pte)
-					*pte |= pgeflag;
+				if (pmap_pte_get(pte).pte)
+					pmap_pte_setbits(pte, pgeflag);
 				va += PAGE_SIZE;
 			}
 			invltlb();	/* Insurance */
@@ -469,6 +570,7 @@
 void *
 pmap_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
 {
+
 	*flags = UMA_SLAB_PRIV;
 	return (void *)kmem_alloc(kernel_map, bytes);
 }
@@ -481,8 +583,7 @@
  *	way, discontiguous physical memory.
  */
 void
-pmap_init(phys_start, phys_end)
-	vm_offset_t phys_start, phys_end;
+pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
 {
 	int i;
 	int initial_pvs;
@@ -528,7 +629,7 @@
  * numbers of pv entries.
  */
 void
-pmap_init2()
+pmap_init2(void)
 {
 	int shpgperproc = PMAP_SHPGPERPROC;
 
@@ -553,11 +654,8 @@
 static int
 pmap_nw_modified(pt_entry_t ptea)
 {
-	int pte;
 
-	pte = (int) ptea;
-
-	if ((pte & (PG_M|PG_RW)) == PG_M)
+	if ((ptep & (PG_M|PG_RW)) == PG_M)
 		return 1;
 	else
 		return 0;
@@ -572,6 +670,7 @@
 static PMAP_INLINE int
 pmap_track_modified(vm_offset_t va)
 {
+
 	if ((va < kmi.clean_sva) || (va >= kmi.clean_eva)) 
 		return 1;
 	else
@@ -735,8 +834,7 @@
  * It will map in the alternate PTE space if needed.
  */
 static pt_entry_t *
-get_ptbase(pmap)
-	pmap_t pmap;
+get_ptbase(pmap_t pmap)
 {
 	pd_entry_t frame;
 
@@ -763,9 +861,7 @@
  */
 
 static pt_entry_t * 
-pmap_pte_quick(pmap, va)
-	register pmap_t pmap;
-	vm_offset_t va;
+pmap_pte_quick(pmap_t pmap, vm_offset_t va)
 {
 	pd_entry_t pde, newpf;
 	pde = pmap->pm_pdir[va >> PDRSHIFT];
@@ -792,9 +888,7 @@
  *		with the given map/virtual_address pair.
  */
 vm_offset_t 
-pmap_extract(pmap, va)
-	register pmap_t pmap;
-	vm_offset_t va;
+pmap_extract(pmap_t pmap, vm_offset_t va)
 {
 	vm_offset_t rtval;	/* XXX FIXME */
 	vm_offset_t pdirindex;
@@ -990,8 +1084,7 @@
  * This routine directly impacts the exit perf of a process and thread.
  */
 void
-pmap_dispose_thread(td)
-	struct thread *td;
+pmap_dispose_thread(struct thread *td)
 {
 	int i;
 	vm_object_t ksobj;
@@ -1027,8 +1120,7 @@
  * Allow the Kernel stack for a thread to be prejudicially paged out.
  */
 void
-pmap_swapout_thread(td)
-	struct thread *td;
+pmap_swapout_thread(struct thread *td)
 {
 	int i;
 	vm_object_t ksobj;
@@ -1053,8 +1145,7 @@
  * Bring the kernel stack for a specified thread back in.
  */
 void
-pmap_swapin_thread(td)
-	struct thread *td;
+pmap_swapin_thread(struct thread *td)
 {
 	int i, rv;
 	vm_page_t ma[KSTACK_PAGES];
@@ -1169,8 +1260,7 @@
 }
 
 void
-pmap_pinit0(pmap)
-	struct pmap *pmap;
+pmap_pinit0(pmap_t pmap)
 {
 	pmap->pm_pdir =
 		(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
@@ -1192,8 +1282,7 @@
  * such as one in a vmspace structure.
  */
 void
-pmap_pinit(pmap)
-	register struct pmap *pmap;
+pmap_pinit(pmap_t pmap)
 {
 	vm_page_t ptdpg;
 
@@ -1220,8 +1309,20 @@
 	ptdpg->valid = VM_PAGE_BITS_ALL;
 
 	pmap_qenter((vm_offset_t) pmap->pm_pdir, &ptdpg, 1);
-	if ((ptdpg->flags & PG_ZERO) == 0)
+	if ((ptdpg->flags & PG_ZERO) == 0) {
+#if defined(I686_CPU)
+#if defined(I486_CPU) || defined(I586_CPU)
+		if (cpu_class == CPUCLASS_686)
+			i686_pagezero(pmap->pm_pdir);
+		else
+			bzero(pmap->pm_pdir, PAGE_SIZE);
+#else
+		i686_pagezero(pmap->pm_pdir);
+#endif
+#else
 		bzero(pmap->pm_pdir, PAGE_SIZE);
+#endif
+	}
 
 	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 	/* Wire in kernel global address entries. */
@@ -1248,8 +1349,7 @@
  * but before this pmap is activated.
  */
 void
-pmap_pinit2(pmap)
-	struct pmap *pmap;
+pmap_pinit2(pmap_t pmap)
 {
 	/* XXX: Remove this stub when no longer called */
 }
@@ -1258,6 +1358,7 @@
 pmap_release_free_page(pmap_t pmap, vm_page_t p)
 {
 	pd_entry_t *pde = pmap->pm_pdir;
+
 	/*
 	 * This code optimizes the case of freeing non-busy
 	 * page-table pages.  Those pages are zero now, and
@@ -1306,9 +1407,7 @@
  * mapped correctly.
  */
 static vm_page_t
-_pmap_allocpte(pmap, ptepindex)
-	pmap_t	pmap;
-	unsigned ptepindex;
+_pmap_allocpte(pmap_t pmap, unsigned ptepindex)
 {
 	vm_offset_t pteva, ptepa;	/* XXXPA */
 	vm_page_t m;
@@ -1352,7 +1451,18 @@
 		if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) ==
 		    (PTDpde & PG_FRAME)) {
 			pteva = VM_MAXUSER_ADDRESS + i386_ptob(ptepindex);
-			bzero((caddr_t) pteva, PAGE_SIZE);
+#if defined(I686_CPU)
+#if defined(I486_CPU) || defined(I586_CPU)
+			if (cpu_class == CPUCLASS_686)
+				i686_pagezero((void *)pteva);    
+			else
+				bzero((void *)pteva, PAGE_SIZE);
+#else
+			i686_pagezero((void *)pteva);    
+#endif
+#else
+			bzero((void *)pteva, PAGE_SIZE);
+#endif
 		} else {
 			pmap_zero_page(m);
 		}
@@ -1380,7 +1490,7 @@
 	/*
 	 * Get the page directory entry
 	 */
-	ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
+	ptepa = pmap->pm_pdir[ptepindex];
 
 	/*
 	 * This supports switching from a 4MB page to a
@@ -1486,24 +1596,26 @@
 void
 pmap_growkernel(vm_offset_t addr)
 {
-	struct pmap *pmap;
+	pmap_t pmap;
 	int s;
 	vm_offset_t ptppaddr;
 	vm_page_t nkpg;
 	pd_entry_t newpdir;
+	u_int32_t pdirindex;
 
 	s = splhigh();
 	if (kernel_vm_end == 0) {
 		kernel_vm_end = KERNBASE;
 		nkpt = 0;
-		while (pdir_pde(PTD, kernel_vm_end)) {
+		while (PTD[kernel_vm_end >> PDRSHIFT]) {
 			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 			nkpt++;
 		}
 	}
 	addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 	while (kernel_vm_end < addr) {
-		if (pdir_pde(PTD, kernel_vm_end)) {
+		pdirindex = kernel_vm_end >> PDRSHIFT;
+		if (PTD[pdirindex]) {
 			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 			continue;
 		}
@@ -1521,11 +1633,9 @@
 		pmap_zero_page(nkpg);
 		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
 		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
-		pdir_pde(PTD, kernel_vm_end) = newpdir;
-
-		LIST_FOREACH(pmap, &allpmaps, pm_list) {
-			*pmap_pde(pmap, kernel_vm_end) = newpdir;
-		}
+		PTD[pdirindex] = newpdir;
+		LIST_FOREACH(pmap, &allpmaps, pm_list)
+			pmap->pm_pdir[pdirindex] = newpdir;
 		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 	}
 	splx(s);
@@ -1542,6 +1652,7 @@
 static PMAP_INLINE void
 free_pv_entry(pv_entry_t pv)
 {
+
 	pv_entry_count--;
 	uma_zfree(pvzone, pv);
 }
@@ -1555,10 +1666,11 @@
 static pv_entry_t
 get_pv_entry(void)
 {
+
 	pv_entry_count++;
 	if (pv_entry_high_water &&
-		(pv_entry_count > pv_entry_high_water) &&
-		(pmap_pagedaemon_waken == 0)) {
+	    (pv_entry_count > pv_entry_high_water) &&
+	    (pmap_pagedaemon_waken == 0)) {
 		pmap_pagedaemon_waken = 1;
 		wakeup (&vm_pages_needed);
 	}
@@ -1570,7 +1682,7 @@
  * in a pinch.
  */
 void
-pmap_collect()
+pmap_collect(void)
 {
 	int i;
 	vm_page_t m;
@@ -1672,7 +1784,7 @@
 	vm_page_t m;
 
 	oldpte = atomic_readandclear_int(ptq);
-	if (oldpte & PG_W)
+	if (oldpte & PG_WIRED)
 		pmap->pm_stats.wired_count -= 1;
 	/*
 	 * Machines that don't support invlpg, also don't support
@@ -1682,7 +1794,7 @@
 		pmap_invalidate_page(kernel_pmap, va);
 	pmap->pm_stats.resident_count -= 1;
 	if (oldpte & PG_MANAGED) {
-		m = PHYS_TO_VM_PAGE(oldpte);
+		m = PHYS_TO_VM_PAGE(pte_to_phys(oldpte));
 		if (oldpte & PG_M) {
 #if defined(PMAP_DIAGNOSTIC)
 			if (pmap_nw_modified((pt_entry_t) oldpte)) {
@@ -1710,7 +1822,7 @@
 static void
 pmap_remove_page(pmap_t pmap, vm_offset_t va)
 {
-	register pt_entry_t *ptq;
+	pt_entry_t *ptq;
 
 	/*
 	 * if there is no pte for this address, just skip it!!!
@@ -1739,7 +1851,7 @@
 void
 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 {
-	register pt_entry_t *ptbase;
+	pt_entry_t *ptbase;
 	vm_offset_t pdnxt;
 	pd_entry_t ptpaddr;
 	vm_offset_t sindex, eindex;
@@ -1842,8 +1954,8 @@
 static void
 pmap_remove_all(vm_page_t m)
 {
-	register pv_entry_t pv;
-	pt_entry_t *pte, tpte;
+	pv_entry_t pv;
+	pt_entry_t *pte, oldpte;
 	int s;
 
 #if defined(PMAP_DIAGNOSTIC)
@@ -1862,22 +1974,22 @@
 
 		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
 
-		tpte = atomic_readandclear_int(pte);
-		if (tpte & PG_W)
+		oldpte = atomic_readandclear_int(pte);
+		if (oldpte & PG_WIRED)
 			pv->pv_pmap->pm_stats.wired_count--;
 
-		if (tpte & PG_A)
+		if (oldpte & PG_A)
 			vm_page_flag_set(m, PG_REFERENCED);
 
 		/*
 		 * Update the vm_page_t clean and reference bits.
 		 */
-		if (tpte & PG_M) {
+		if (oldpte & PG_M) {
 #if defined(PMAP_DIAGNOSTIC)
-			if (pmap_nw_modified((pt_entry_t) tpte)) {
+			if (pmap_nw_modified(oldpte)) {
 				printf(
 	"pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
-				    pv->pv_va, tpte);
+				    pv->pv_va, oldpte);
 			}
 #endif
 			if (pmap_track_modified(pv->pv_va))
@@ -1904,7 +2016,7 @@
 void
 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 {
-	register pt_entry_t *ptbase;
+	pt_entry_t *ptbase;
 	vm_offset_t pdnxt;
 	pd_entry_t ptpaddr;
 	vm_offset_t sindex, eindex;
@@ -1913,6 +2025,7 @@
 	if (pmap == NULL)
 		return;
 
+	/* Removing read access means unmap totally */
 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 		pmap_remove(pmap, sva, eva);
 		return;
@@ -1921,6 +2034,7 @@
 	if (prot & VM_PROT_WRITE)
 		return;
 
+	/* We are removing write access */
 	anychanged = 0;
 
 	ptbase = get_ptbase(pmap);
@@ -1950,9 +2064,8 @@
 		if (ptpaddr == 0)
 			continue;
 
-		if (pdnxt > eindex) {
+		if (pdnxt > eindex)
 			pdnxt = eindex;
-		}
 
 		for (; sindex != pdnxt; sindex++) {
 
@@ -1964,14 +2077,14 @@
 			if (pbits & PG_MANAGED) {
 				m = NULL;
 				if (pbits & PG_A) {
-					m = PHYS_TO_VM_PAGE(pbits);
+					m = PHYS_TO_VM_PAGE(pte_to_phys(pbits));
 					vm_page_flag_set(m, PG_REFERENCED);
 					pbits &= ~PG_A;
 				}
 				if (pbits & PG_M) {
 					if (pmap_track_modified(i386_ptob(sindex))) {
 						if (m == NULL)
-							m = PHYS_TO_VM_PAGE(pbits);
+							m = PHYS_TO_VM_PAGE(pte_to_phys(pbits));
 						vm_page_dirty(m);
 						pbits &= ~PG_M;
 					}
@@ -2007,10 +2120,11 @@
 	   boolean_t wired)
 {
 	vm_offset_t pa;
-	register pt_entry_t *pte;
+	pt_entry_t *pte;
 	vm_offset_t opa;
 	pt_entry_t origpte, newpte;
 	vm_page_t mpte;
+	u_int32_t bits;
 
 	if (pmap == NULL)
 		return;
@@ -2069,9 +2183,9 @@
 		 * are valid mappings in them. Hence, if a user page is wired,
 		 * the PT page will be also.
 		 */
-		if (wired && ((origpte & PG_W) == 0))
+		if (wired && ((origpte & PG_WIRED) == 0))
 			pmap->pm_stats.wired_count++;
-		else if (!wired && (origpte & PG_W))
+		else if (!wired && (origpte & PG_WIRED))
 			pmap->pm_stats.wired_count--;
 
 #if defined(PMAP_DIAGNOSTIC)
@@ -2101,12 +2215,8 @@
 		 * so we go ahead and sense modify status.
 		 */
 		if (origpte & PG_MANAGED) {
-			if ((origpte & PG_M) && pmap_track_modified(va)) {
-				vm_page_t om;
-				om = PHYS_TO_VM_PAGE(opa);
-				vm_page_dirty(om);
-			}
-			pa |= PG_MANAGED;
+			if ((origpte & PG_M) && pmap_track_modified(va))
+				vm_page_dirty(m);
 		}
 		goto validate;
 	} 
@@ -2127,10 +2237,8 @@
 	 * called at interrupt time.
 	 */
 	if (pmap_initialized && 
-	    (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+	    (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
 		pmap_insert_entry(pmap, va, mpte, m);
-		pa |= PG_MANAGED;
-	}
 
 	/*
 	 * Increment counters
@@ -2143,24 +2251,28 @@
 	/*
 	 * Now validate mapping with desired protection/wiring.
 	 */
-	newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
-
+	newpte = phys_to_pte(pa);
+	bits = PG_V;
+	if (prot & PROT_WRITE)
+		bits |= PG_RW;
+	if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
+		bits |= PG_MANAGED;
 	if (wired)
-		newpte |= PG_W;
+		bits |= PG_WIRED;
 	if (va < VM_MAXUSER_ADDRESS)
-		newpte |= PG_U;
+		bits |= PG_U;
 	if (pmap == kernel_pmap)
-		newpte |= pgeflag;
+		bits |= pgeflag;
+	pmap_pte_setbits(&newpte, bits);
 
 	/*
 	 * if the mapping or permission bits are different, we need
 	 * to update the pte.
 	 */
 	if ((origpte & ~(PG_M|PG_A)) != newpte) {
-		*pte = newpte | PG_A;
-		/*if (origpte)*/ {
-			pmap_invalidate_page(pmap, va);
-		}
+		pmap_pte_setbits(&newpte, PG_A);
+		pmap_pte_set(pte, newpte);
+		pmap_invalidate_page(pmap, va);
 	}
 }
 
@@ -2178,8 +2290,7 @@
 static vm_page_t
 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
 {
-	pt_entry_t *pte;
-	vm_offset_t pa;
+	pt_entry_t *pte, newpte;
 
 	/*
 	 * In the case that a page table page is not
@@ -2210,7 +2321,7 @@
 				if (ptepa & PG_PS)
 					panic("pmap_enter_quick: unexpected mapping into 4MB page");
 				if (pmap->pm_ptphint &&
-					(pmap->pm_ptphint->pindex == ptepindex)) {
+				    (pmap->pm_ptphint->pindex == ptepindex)) {
 					mpte = pmap->pm_ptphint;
 				} else {
 					mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
@@ -2253,15 +2364,14 @@
 	 */
 	pmap->pm_stats.resident_count++;
 
-	pa = VM_PAGE_TO_PHYS(m);
-
+	newpte = phys_to_pte(VM_PAGE_TO_PHYS(m));
+	newpte |= PG_V | PG_U;
 	/*
 	 * Now validate mapping with RO protection
 	 */
-	if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
-		*pte = pa | PG_V | PG_U;
-	else
-		*pte = pa | PG_V | PG_U | PG_MANAGED;
+	if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
+		newpte |= PG_MANAGED;
+	pmap_pte_set(pte, newpte);
 
 	return mpte;
 }
@@ -2471,10 +2581,7 @@
 };
 
 void
-pmap_prefault(pmap, addra, entry)
-	pmap_t pmap;
-	vm_offset_t addra;
-	vm_map_entry_t entry;
+pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
 {
 	int i;
 	vm_offset_t starta;
@@ -2557,28 +2664,26 @@
  *			The mapping must already exist in the pmap.
  */
 void
-pmap_change_wiring(pmap, va, wired)
-	register pmap_t pmap;
-	vm_offset_t va;
-	boolean_t wired;
+pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 {
-	register pt_entry_t *pte;
+	pt_entry_t *pte;
 
 	if (pmap == NULL)
 		return;
-
 	pte = pmap_pte(pmap, va);
-
-	if (wired && !pmap_pte_w(pte))
+	if (wired && !pmap_pte_testbits(pte, PG_WIRED))
 		pmap->pm_stats.wired_count++;
-	else if (!wired && pmap_pte_w(pte))
+	else if (!wired && pmap_pte_testbits(pte, PG_WIRED))
 		pmap->pm_stats.wired_count--;
 
 	/*
 	 * Wiring is not a hardware characteristic so there is no need to
 	 * invalidate TLB.
 	 */
-	pmap_pte_set_w(pte, wired);
+	if (wired)
+		pmap_pte_setbits(pte, PG_WIRED);
+	else
+		pmap_pte_clearbits(pte, PG_WIRED);
 }
 
 
@@ -2711,6 +2816,7 @@
 static void
 pmap_zpi_switchin12(void)
 {
+
 	invlpg((u_int)CADDR1);
 	invlpg((u_int)CADDR2);
 }
@@ -2718,12 +2824,14 @@
 static void
 pmap_zpi_switchin2(void)
 {
+
 	invlpg((u_int)CADDR2);
 }
 
 static void
 pmap_zpi_switchin3(void)
 {
+
 	invlpg((u_int)CADDR3);
 }
 
@@ -2751,11 +2859,17 @@
 	invlpg((u_int)CADDR2);
 #endif
 #if defined(I686_CPU)
+#if defined(I486_CPU) || defined(I586_CPU)
 	if (cpu_class == CPUCLASS_686)
 		i686_pagezero(CADDR2);
 	else
+		bzero(CADDR2, PAGE_SIZE);
+#else
+	i686_pagezero(CADDR2);
 #endif
-		bzero(CADDR2, PAGE_SIZE);
+#else
+	bzero(CADDR2, PAGE_SIZE);
+#endif
 #ifdef SMP
 	curthread->td_switchin = NULL;
 #endif
@@ -2786,11 +2900,20 @@
 	invlpg((u_int)CADDR2);
 #endif
 #if defined(I686_CPU)
+#if defined(I486_CPU) || defined(I586_CPU)
 	if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
 		i686_pagezero(CADDR2);
 	else
+		bzero(CADDR2 + off, size);
+#else
+	if (off == 0 && size == PAGE_SIZE)
+		i686_pagezero(CADDR2);
+	else
+		bzero(CADDR2 + off, size);
 #endif
-		bzero((char *)CADDR2 + off, size);
+#else
+	bzero(CADDR2 + off, size);
+#endif
 #ifdef SMP
 	curthread->td_switchin = NULL;
 #endif
@@ -2821,11 +2944,17 @@
 	invlpg((u_int)CADDR3);
 #endif
 #if defined(I686_CPU)
+#if defined(I486_CPU) || defined(I586_CPU)
 	if (cpu_class == CPUCLASS_686)
 		i686_pagezero(CADDR3);
 	else
+		bzero(CADDR3, PAGE_SIZE);
+#else
+		i686_pagezero(CADDR3);
 #endif
-		bzero(CADDR3, PAGE_SIZE);
+#else
+	bzero(CADDR3, PAGE_SIZE);
+#endif
 #ifdef SMP
 	curthread->td_switchin = NULL;
 #endif
@@ -2881,10 +3010,7 @@
  *		down (or not) as appropriate.
  */
 void
-pmap_pageable(pmap, sva, eva, pageable)
-	pmap_t pmap;
-	vm_offset_t sva, eva;
-	boolean_t pageable;
+pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable)

>>> TRUNCATED FOR MAIL (1000 lines) <<<

To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe p4-projects" in the body of the message




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200208100743.g7A7hCdC074176>