Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 3 Apr 2010 07:12:04 +0000 (UTC)
From:      Juli Mallett <jmallett@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-user@freebsd.org
Subject:   svn commit: r206120 - in user/jmallett/octeon/sys: conf mips/include mips/mips
Message-ID:  <201004030712.o337C4Pk013459@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jmallett
Date: Sat Apr  3 07:12:03 2010
New Revision: 206120
URL: http://svn.freebsd.org/changeset/base/206120

Log:
  o) Provide an interface to basic TLB operations one might want to do in C
     in C using inline assembly.
  o) Remove duplication of various PTE bits by switching to my <machine/pte.h>
     and converting existing code to use it.
  o) Rename CLEAR_PTE_WIRED to CLEAR_PTE_SWBITS since it clears PG_W and PG_RO.
  o) Be consistent in how PTE bits are tested, set and cleared, except where
     there is a change beyond simply clearing or setting bits.
  o) Un-generalize a couple of pmap functions used in only one place.
  o) Provide a convenient helper macro for getting the ASID.
  o) Remove unused TLB access routines from cpufunc.h since they can live very
     comfortably in tlb.c now.

Added:
  user/jmallett/octeon/sys/mips/include/tlb.h
  user/jmallett/octeon/sys/mips/mips/tlb.c
Deleted:
  user/jmallett/octeon/sys/mips/mips/tlb.S
Modified:
  user/jmallett/octeon/sys/conf/files.mips
  user/jmallett/octeon/sys/mips/include/cpu.h
  user/jmallett/octeon/sys/mips/include/cpufunc.h
  user/jmallett/octeon/sys/mips/include/pmap.h
  user/jmallett/octeon/sys/mips/include/pte.h
  user/jmallett/octeon/sys/mips/include/vm.h
  user/jmallett/octeon/sys/mips/mips/cpu.c
  user/jmallett/octeon/sys/mips/mips/exception.S
  user/jmallett/octeon/sys/mips/mips/machdep.c
  user/jmallett/octeon/sys/mips/mips/mp_machdep.c
  user/jmallett/octeon/sys/mips/mips/pmap.c
  user/jmallett/octeon/sys/mips/mips/swtch.S
  user/jmallett/octeon/sys/mips/mips/trap.c
  user/jmallett/octeon/sys/mips/mips/vm_machdep.c

Modified: user/jmallett/octeon/sys/conf/files.mips
==============================================================================
--- user/jmallett/octeon/sys/conf/files.mips	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/conf/files.mips	Sat Apr  3 07:12:03 2010	(r206120)
@@ -44,7 +44,7 @@ mips/mips/vm_machdep.c		standard
 mips/mips/fp.S			standard
 mips/mips/pm_machdep.c		standard
 mips/mips/swtch.S		standard
-mips/mips/tlb.S			standard
+mips/mips/tlb.c			standard
 
 mips/mips/bus_space_generic.c 	standard
 mips/mips/busdma_machdep.c 	standard

Modified: user/jmallett/octeon/sys/mips/include/cpu.h
==============================================================================
--- user/jmallett/octeon/sys/mips/include/cpu.h	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/include/cpu.h	Sat Apr  3 07:12:03 2010	(r206120)
@@ -451,7 +451,6 @@ extern union cpuprid cpu_id;
 #if defined(_KERNEL) && !defined(_LOCORE)
 extern union cpuprid fpu_id;
 
-struct tlb;
 struct user;
 
 u_int32_t mips_cp0_config1_read(void);
@@ -466,12 +465,6 @@ void Mips_HitInvalidateDCache(vm_offset_
 void Mips_SyncICache(vm_offset_t, int);
 void Mips_InvalidateICache(vm_offset_t, int);
 
-void Mips_TLBFlush(int);
-void Mips_TLBFlushAddr(vm_offset_t);
-void Mips_TLBWriteIndexed(int, struct tlb *);
-void Mips_TLBUpdate(vm_offset_t, unsigned);
-void Mips_TLBRead(int, struct tlb *);
-void mips_TBIAP(int);
 void wbflush(void);
 
 extern u_int32_t cpu_counter_interval;	/* Number of counter ticks/tick */

Modified: user/jmallett/octeon/sys/mips/include/cpufunc.h
==============================================================================
--- user/jmallett/octeon/sys/mips/include/cpufunc.h	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/include/cpufunc.h	Sat Apr  3 07:12:03 2010	(r206120)
@@ -102,36 +102,6 @@ mips_write_membar(void)
 }
 
 #ifdef _KERNEL
-
-static __inline void
-mips_tlbp(void)
-{
-	__asm __volatile ("tlbp");
-	mips_barrier();
-}
-
-static __inline void
-mips_tlbr(void)
-{
-	__asm __volatile ("tlbr");
-	mips_barrier();
-}
-
-static __inline void
-mips_tlbwi(void)
-{
-	__asm __volatile ("tlbwi");
-	mips_barrier();
-}
-
-static __inline void
-mips_tlbwr(void)
-{
-	__asm __volatile ("tlbwr");
-	mips_barrier();
-}
-
-
 #if defined(__mips_n32) || defined(__mips_n64)
 #define	MIPS_RDRW64_COP0(n,r)					\
 static __inline uint64_t					\

Modified: user/jmallett/octeon/sys/mips/include/pmap.h
==============================================================================
--- user/jmallett/octeon/sys/mips/include/pmap.h	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/include/pmap.h	Sat Apr  3 07:12:03 2010	(r206120)
@@ -98,6 +98,7 @@ pt_entry_t *pmap_segmap(pmap_t pmap, vm_
 vm_offset_t pmap_kextract(vm_offset_t va);
 
 #define	vtophys(va)	pmap_kextract(((vm_offset_t) (va)))
+#define	pmap_asid(pmap)	(pmap)->pm_asid[PCPU_GET(cpuid)].asid
 
 extern struct pmap	kernel_pmap_store;
 #define kernel_pmap	(&kernel_pmap_store)
@@ -210,11 +211,6 @@ pmap_map_fpage(vm_paddr_t pa, struct fpa
     boolean_t check_unmaped);
 void pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp);
 
-/*
- * Function to save TLB contents so that they may be inspected in the debugger.
- */
-extern void pmap_save_tlb(void);
-
 #endif				/* _KERNEL */
 
 #endif				/* !LOCORE */

Modified: user/jmallett/octeon/sys/mips/include/pte.h
==============================================================================
--- user/jmallett/octeon/sys/mips/include/pte.h	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/include/pte.h	Sat Apr  3 07:12:03 2010	(r206120)
@@ -1,13 +1,6 @@
-/*	$OpenBSD: pte.h,v 1.4 1998/01/28 13:46:25 pefo Exp $	*/
-
 /*-
- * Copyright (c) 1988 University of Utah.
- * Copyright (c) 1992, 1993
- *	The Regents of the University of California.  All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * the Systems Programming Group of the University of Utah Computer
- * Science Department and Ralph Campbell.
+ * Copyright (c) 2004-2010 Juli Mallett <jmallett@FreeBSD.org>
+ * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -17,18 +10,11 @@
  * 2. Redistributions in binary form must reproduce the above copyright
  *    notice, this list of conditions and the following disclaimer in the
  *    documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- *    must display the following acknowledgement:
- *	This product includes software developed by the University of
- *	California, Berkeley and its contributors.
- * 4. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
  *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -37,117 +23,129 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- *	from: Utah Hdr: pte.h 1.11 89/09/03
- *	from: @(#)pte.h 8.1 (Berkeley) 6/10/93
- *	JNPR: pte.h,v 1.1.4.1 2007/09/10 06:20:19 girish
  * $FreeBSD$
  */
 
-#ifndef _MACHINE_PTE_H_
+#ifndef	_MACHINE_PTE_H_
 #define	_MACHINE_PTE_H_
 
-#include <machine/endian.h>
-
-/*
- * MIPS hardware page table entry
- */
-
 #ifndef _LOCORE
-struct pte {
-#if BYTE_ORDER == BIG_ENDIAN
-unsigned int	pg_prot:2,		/* SW: access control */
-		pg_pfnum:24,		/* HW: core page frame number or 0 */
-		pg_attr:3,		/* HW: cache attribute */
-		pg_m:1,			/* HW: modified (dirty) bit */
-		pg_v:1,			/* HW: valid bit */
-		pg_g:1;			/* HW: ignore pid bit */
-#endif
-#if BYTE_ORDER == LITTLE_ENDIAN
-unsigned int	pg_g:1,			/* HW: ignore pid bit */
-		pg_v:1,			/* HW: valid bit */
-		pg_m:1,			/* HW: modified (dirty) bit */
-		pg_attr:3,		/* HW: cache attribute */
-		pg_pfnum:24,		/* HW: core page frame number or 0 */
-		pg_prot:2;		/* SW: access control */
+	/* 32-bit PTE.  */
+typedef	uint32_t pt_entry_t;
+
+	/* Page directory entry.  */
+typedef	pt_entry_t *pd_entry_t;
 #endif
-};
 
 /*
- * Structure defining an tlb entry data set.
+ * Given a virtual address, get the offset of its PTE within its page
+ * directory page.
  */
+#define	PDE_OFFSET(va)	(((vm_offset_t)(va) >> PAGE_SHIFT) & (NPTEPG - 1))
 
-struct tlb {
-	int	tlb_mask;
-	int	tlb_hi;
-	int	tlb_lo0;
-	int	tlb_lo1;
-};
+/*
+ * TLB and PTE management.  Most things operate within the context of
+ * EntryLo0,1, and begin with TLBLO_.  Things which work with EntryHi
+ * start with TLBHI_.  PTE bits begin with PG_.
+ *
+ * Note that while the TLB uses 4K pages, our PTEs correspond to VM pages,
+ * which in turn are 8K.  This corresponds well to the fact that each TLB
+ * entry maps 2 TLB pages (one even, one odd.)
+ */
+#define	TLB_PAGE_SHIFT	(PAGE_SHIFT - 1)
+#define	TLB_PAGE_SIZE	(1 << TLB_PAGE_SHIFT)
+#define	TLB_PAGE_MASK	(TLB_PAGE_SIZE - 1)
 
-typedef unsigned int pt_entry_t;
-typedef pt_entry_t *pd_entry_t;
+/*
+ * TLB PageMask register.  Has mask bits set above the default, 4K, page mask.
+ */
+#define	TLBMASK_SHIFT	(TLB_PAGE_SHIFT + 1)
+#define	TLBMASK_MASK	((PAGE_MASK >> TLBMASK_SHIFT) << TLBMASK_SHIFT)
 
-#define	PDESIZE		sizeof(pd_entry_t)	/* for assembly files */
-#define	PTESIZE		sizeof(pt_entry_t)	/* for assembly files */
+/*
+ * PFN for EntryLo register.  Upper bits are 0, which is to say that
+ * bit 29 is the last hardware bit;  Bits 30 and upwards (EntryLo is
+ * 64 bit though it can be referred to in 32-bits providing 2 software
+ * bits safely.  We use it as 64 bits to get many software bits, and
+ * god knows what else.) are unacknowledged by hardware.  They may be
+ * written as anything, but otherwise they have as much meaning as
+ * other 0 fields.
+ */
+#define	TLBLO_SWBITS_SHIFT	(30)
+#define	TLBLO_PFN_SHIFT		(6 + (PAGE_SHIFT - TLBMASK_SHIFT))
+#define	TLBLO_PFN_MASK		(0x03FFFFFC0)
+#define	TLBLO_PA_TO_PFN(pa)	((((pa) >> TLB_PAGE_SHIFT) << TLBLO_PFN_SHIFT) & TLBLO_PFN_MASK)
+#define	TLBLO_PFN_ODD		(TLBLO_PA_TO_PFN(TLB_PAGE_SIZE))
+#define	TLBLO_PFN_TO_PA(pfn)	(((pfn) >> TLBLO_PFN_SHIFT) << TLB_PAGE_SHIFT)
+#define	TLBLO_PTE_TO_PFN(pte)	((pte) & TLBLO_PFN_MASK)
+#define	TLBLO_PTE_TO_PA(pte)	(TLBLO_PFN_TO_PA(TLBLO_PTE_TO_PFN((pte))))
 
-#endif /* _LOCORE */
+/*
+ * VPN for EntryHi register.  Upper two bits select user, supervisor,
+ * or kernel.  Bits 61 to 40 copy bit 63.  VPN2 is bits 39 and down to
+ * as low as 13, down to PAGE_SHIFT, to index 2 TLB pages*.  From bit 12
+ * to bit 8 there is a 5-bit 0 field.  Low byte is ASID.
+ *
+ * Note that in FreeBSD, we map 2 TLB pages is equal to 1 VM page.
+ */
+#if defined(__mips_n64)
+#define	TLBHI_R_SHIFT		62
+#define	TLBHI_R_USER		(0x00UL << TLBHI_R_SHIFT)
+#define	TLBHI_R_SUPERVISOR	(0x01UL << TLBHI_R_SHIFT)
+#define	TLBHI_R_KERNEL		(0x03UL << TLBHI_R_SHIFT)
+#define	TLBHI_R_MASK		(0x03UL << TLBHI_R_SHIFT)
+#define	TLBHI_VA_R(va)		((va) & TLBHI_R_MASK)
+#define	TLBHI_FILL_SHIFT	48
+#define	TLBHI_FILL_MASK		((0x7FFFFUL) << TLBHI_FILL_SHIFT)
+#define	TLBHI_VA_FILL(va)	((((va) & (1UL << 63)) != 0 ? TLBHI_FILL_MASK : 0))
+#define	TLBHI_VPN2_SHIFT	(PAGE_SHIFT)
+#define	TLBHI_VPN2_MASK		(((~((1UL << TLBHI_VPN2_SHIFT) - 1)) << (63 - TLBHI_FILL_SHIFT)) >> (63 - TLBHI_FILL_SHIFT))
+#define	TLBHI_VA_TO_VPN2(va)	((va) & TLBHI_VPN2_MASK)
+#define	TLBHI_ENTRY(va, asid)	((TLBHI_VA_R((va))) /* Region. */ | \
+				 (TLBHI_VA_FILL((va))) /* Fill. */ | \
+				 (TLBHI_VA_TO_VPN2((va))) /* VPN2. */ | \
+				 ((asid)))
+#else
+#define	TLBHI_ENTRY(va, asid)	(((va) & ~PAGE_MASK) | (asid))
+#endif
 
-#define	PT_ENTRY_NULL	((pt_entry_t *) 0)
+/*
+ * TLB flags managed in hardware:
+ * 	C:	Cache attribute.
+ * 	D:	Dirty bit.  This means a page is writable.  It is not
+ * 		set at first, and a write is trapped, and the dirty
+ * 		bit is set.  See also PG_RO.
+ * 	V:	Valid bit.  Obvious, isn't it?
+ * 	G:	Global bit.  This means that this mapping is present
+ * 		in EVERY address space, and to ignore the ASID when
+ * 		it is matched.
+ */
+#define	PG_C(attr)	((attr & 0x07) << 3)
+#define	PG_C_UC		(PG_C(0x02))
+#define	PG_C_CNC	(PG_C(0x03))
+#define	PG_D		0x04
+#define	PG_V		0x02
+#define	PG_G		0x01
 
-#define	PTE_WIRED	0x80000000	/* SW */
-#define	PTE_W		PTE_WIRED
-#define	PTE_RO		0x40000000	/* SW */
+/*
+ * VM flags managed in software:
+ * 	RO:	Read only.  Never set PG_D on this page, and don't
+ * 		listen to requests to write to it.
+ * 	W:	Wired.  ???
+ */
+#define	PG_RO	(0x01 << TLBLO_SWBITS_SHIFT)
+#define	PG_W	(0x02 << TLBLO_SWBITS_SHIFT)
 
-#define	PTE_G		0x00000001	/* HW */
-#define	PTE_V		0x00000002
-/*#define	PTE_NV		0x00000000       Not Used */
-#define	PTE_M		0x00000004
-#define	PTE_RW		PTE_M
-#define PTE_ODDPG       0x00000040 
-/*#define	PG_ATTR		0x0000003f  Not Used */
-#define	PTE_UNCACHED	0x00000010
-#ifdef CPU_SB1
-#define	PTE_CACHE	0x00000028	/* cacheable coherent */
-#else
-#define	PTE_CACHE	0x00000018
-#endif
-/*#define	PG_CACHEMODE	0x00000038 Not Used*/
-#define	PTE_ROPAGE	(PTE_V | PTE_RO | PTE_CACHE) /* Write protected */
-#define	PTE_RWPAGE	(PTE_V | PTE_M | PTE_CACHE)  /* Not wr-prot not clean */
-#define	PTE_CWPAGE	(PTE_V | PTE_CACHE)	   /* Not wr-prot but clean */
-#define	PTE_IOPAGE	(PTE_G | PTE_V | PTE_M | PTE_UNCACHED)
-#define	PTE_FRAME	0x3fffffc0
-#define PTE_HVPN        0xffffe000      /* Hardware page no mask */
-#define PTE_ASID        0x000000ff      /* Address space ID */
+/*
+ * PTE management functions for bits defined above.
+ *
+ * XXX Can make these atomics, but some users of them are using PTEs in local
+ * registers and such and don't need the overhead.
+ */
+#define	pte_clear(pte, bit)	((*pte) &= ~(bit))
+#define	pte_set(pte, bit)	((*pte) |= (bit))
+#define	pte_test(pte, bit)	(((*pte) & (bit)) == (bit))
 
-#define	TLB_PAGE_SHIFT	(PAGE_SHIFT - 1)
-#define	PTE_SHIFT	6
-#define	pfn_is_ext(x)	((x) & 0x3c000000)
-#define	vad_to_pfn(x)	((((vm_offset_t)(x) >> TLB_PAGE_SHIFT) << PTE_SHIFT) & PTE_FRAME)
-#define	pfn_to_vad(x)	((((x) & PTE_FRAME) >> PTE_SHIFT) << TLB_PAGE_SHIFT)
-
-/* User virtual to pte offset in page table */
-#define	vad_to_pte_offset(adr)	(((adr) >> PAGE_SHIFT) & (NPTEPG -1))
-
-#define	mips_pg_v(entry)	((entry) & PTE_V)
-#define	mips_pg_wired(entry)	((entry) & PTE_WIRED)
-#define	mips_pg_m_bit()		(PTE_M)
-#define	mips_pg_rw_bit()	(PTE_M)
-#define	mips_pg_ro_bit()	(PTE_RO)
-#define	mips_pg_ropage_bit()	(PTE_ROPAGE)
-#define	mips_pg_rwpage_bit()	(PTE_RWPAGE)
-#define	mips_pg_cwpage_bit()	(PTE_CWPAGE)
-#define	mips_pg_global_bit()	(PTE_G)
-#define	mips_pg_wired_bit()	(PTE_WIRED)
-#define	mips_tlbpfn_to_paddr(x)	pfn_to_vad((x))
-#define	mips_paddr_to_tlbpfn(x)	vad_to_pfn((x))
-
-/* These are not used */
-#define	PTE_SIZE_4K	0x00000000
-#define	PTE_SIZE_16K	0x00006000
-#define	PTE_SIZE_64K	0x0001e000
-#define	PTE_SIZE_256K	0x0007e000
-#define	PTE_SIZE_1M	0x001fe000
-#define	PTE_SIZE_4M	0x007fe000
-#define	PTE_SIZE_16M	0x01ffe000
+	/* Internal API for the MIPS PMAP.  */
 
-#endif	/* !_MACHINE_PTE_H_ */
+#endif /* !_MACHINE_PTE_H_ */

Added: user/jmallett/octeon/sys/mips/include/tlb.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ user/jmallett/octeon/sys/mips/include/tlb.h	Sat Apr  3 07:12:03 2010	(r206120)
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2004-2010 Juli Mallett <jmallett@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef	_MACHINE_TLB_H_
+#define	_MACHINE_TLB_H_
+
+void tlb_invalidate_address(struct pmap *, vm_offset_t);
+void tlb_invalidate_all(void);
+void tlb_update(struct pmap *, vm_offset_t, pt_entry_t);
+
+#endif /* !_MACHINE_TLB_H_ */

Modified: user/jmallett/octeon/sys/mips/include/vm.h
==============================================================================
--- user/jmallett/octeon/sys/mips/include/vm.h	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/include/vm.h	Sat Apr  3 07:12:03 2010	(r206120)
@@ -32,8 +32,8 @@
 #include <machine/pte.h>
 
 /* Memory attributes. */
-#define	VM_MEMATTR_UNCACHED			((vm_memattr_t)PTE_UNCACHED)
-#define	VM_MEMATTR_CACHEABLE_NONCOHERENT	((vm_memattr_t)PTE_CACHE)
+#define	VM_MEMATTR_UNCACHED			((vm_memattr_t)PG_C_UC)
+#define	VM_MEMATTR_CACHEABLE_NONCOHERENT	((vm_memattr_t)PG_C_CNC)
 
 #define	VM_MEMATTR_DEFAULT		VM_MEMATTR_CACHEABLE_NONCOHERENT
 

Modified: user/jmallett/octeon/sys/mips/mips/cpu.c
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/cpu.c	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/mips/cpu.c	Sat Apr  3 07:12:03 2010	(r206120)
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
 #include <machine/intr_machdep.h>
 #include <machine/locore.h>
 #include <machine/pte.h>
+#include <machine/tlb.h>
 #include <machine/hwfunc.h>
 
 struct mips_cpuinfo cpuinfo;
@@ -136,7 +137,7 @@ mips_cpu_init(void)
 	mips_get_identity(&cpuinfo);
 	num_tlbentries = cpuinfo.tlb_nentries;
 	mips_wr_wired(0);
-	Mips_TLBFlush(num_tlbentries);
+	tlb_invalidate_all();
 	mips_wr_wired(VMWIRED_ENTRIES);
 	mips_config_cache(&cpuinfo);
 	mips_vector_init();

Modified: user/jmallett/octeon/sys/mips/mips/exception.S
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/exception.S	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/mips/exception.S	Sat Apr  3 07:12:03 2010	(r206120)
@@ -69,7 +69,7 @@
 /*
  * Clear the software-managed bits in a PTE in register pr.
  */
-#define	CLEAR_PTE_WIRED(pr)							\
+#define	CLEAR_PTE_SWBITS(pr)							\
 	sll		pr, 2 ;							\
 	srl		pr, 2			# keep bottom 30 bits
 
@@ -148,9 +148,9 @@ MipsDoTLBMiss:
 	andi		k0, k0, 0xff8			#0c: k0=page tab offset
 	PTR_ADDU	k1, k1, k0			#0d: k1=pte address
 	lw		k0, 0(k1)			#0e: k0=lo0 pte
-	CLEAR_PTE_WIRED(k0)
+	CLEAR_PTE_SWBITS(k0)
 	MTC0		k0, COP_0_TLB_LO0		#12: lo0 is loaded
-	addu		k0, PTE_ODDPG
+	addu		k0, TLBLO_PFN_ODD
 	MTC0		k0, COP_0_TLB_LO1		#15: lo1 is loaded
 	HAZARD_DELAY
 	tlbwr						#1a: write to tlb
@@ -852,14 +852,14 @@ NLEAF(MipsTLBInvalidException)
 	lw		k0, 0(k1)			# k0=this PTE
 
 	/* Validate page table entry.  */
-	andi		k0, PTE_V
+	andi		k0, PG_V
 	beqz		k0, 3f
 	nop
 
 	lw		k0, 0(k1)
-	CLEAR_PTE_WIRED(k0)
+	CLEAR_PTE_SWBITS(k0)
 	MTC0		k0, COP_0_TLB_LO0
-	addu		k0, PTE_ODDPG
+	addu		k0, TLBLO_PFN_ODD
 	MTC0		k0, COP_0_TLB_LO1
 
 	tlbp
@@ -996,9 +996,9 @@ NLEAF(MipsTLBMissException)
 	andi		k0, k0, 0xff8			# k0=page tab offset
 	PTR_ADDU	k1, k1, k0			# k1=pte address
 	lw		k0, 0(k1)			# k0=lo0 pte
-	CLEAR_PTE_WIRED(k0)
+	CLEAR_PTE_SWBITS(k0)
 	MTC0		k0, COP_0_TLB_LO0		# lo0 is loaded
-	addu		k0, PTE_ODDPG
+	addu		k0, TLBLO_PFN_ODD
 	MTC0		k0, COP_0_TLB_LO1		# lo1 is loaded
 	HAZARD_DELAY
 	tlbwr					# write to tlb

Modified: user/jmallett/octeon/sys/mips/mips/machdep.c
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/machdep.c	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/mips/machdep.c	Sat Apr  3 07:12:03 2010	(r206120)
@@ -428,7 +428,7 @@ mips_pcpu_tlb_init(struct pcpu *pcpu)
 	 */
 	memset(&tlb, 0, sizeof(tlb));
 	pa = vtophys(pcpu);
-	lobits = PTE_RW | PTE_V | PTE_G | PTE_CACHE;
+	lobits = PG_D | PG_V | PG_G | PG_C_CNC;
 	tlb.tlb_hi = (vm_offset_t)pcpup;
 	tlb.tlb_lo0 = mips_paddr_to_tlbpfn(pa) | lobits;
 	tlb.tlb_lo1 = mips_paddr_to_tlbpfn(pa + PAGE_SIZE) | lobits;

Modified: user/jmallett/octeon/sys/mips/mips/mp_machdep.c
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/mp_machdep.c	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/mips/mp_machdep.c	Sat Apr  3 07:12:03 2010	(r206120)
@@ -128,7 +128,6 @@ mips_ipi_handler(void *arg)
 			CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
 
 			savectx(&stoppcbs[cpu]);
-			pmap_save_tlb();
 
 			/* Indicate we are stopped */
 			atomic_set_int(&stopped_cpus, cpumask);
@@ -236,7 +235,7 @@ smp_init_secondary(u_int32_t cpuid)
 
 	/* TLB */
 	mips_wr_wired(0);
-	Mips_TLBFlush(num_tlbentries);
+	tlb_invalidate_all();
 	mips_wr_wired(VMWIRED_ENTRIES);
 
 	/*

Modified: user/jmallett/octeon/sys/mips/mips/pmap.c
==============================================================================
--- user/jmallett/octeon/sys/mips/mips/pmap.c	Sat Apr  3 03:31:14 2010	(r206119)
+++ user/jmallett/octeon/sys/mips/mips/pmap.c	Sat Apr  3 07:12:03 2010	(r206120)
@@ -98,6 +98,7 @@ __FBSDID("$FreeBSD$");
 
 #include <machine/cache.h>
 #include <machine/md_var.h>
+#include <machine/tlb.h>
 
 #if defined(DIAGNOSTIC)
 #define	PMAP_DIAGNOSTIC
@@ -123,19 +124,9 @@ __FBSDID("$FreeBSD$");
 #define	pmap_pde(m, v)		(&((m)->pm_segtab[pmap_segshift((v))]))
 #define	segtab_pde(m, v)	((m)[pmap_segshift((v))])
 
-#define	pmap_pte_w(pte)		((*(int *)pte & PTE_W) != 0)
-#define	pmap_pde_v(pte)		((*(int *)pte) != 0)
-#define	pmap_pte_m(pte)		((*(int *)pte & PTE_M) != 0)
-#define	pmap_pte_v(pte)		((*(int *)pte & PTE_V) != 0)
-
-#define	pmap_pte_set_w(pte, v)	((v)?(*(int *)pte |= PTE_W):(*(int *)pte &= ~PTE_W))
-#define	pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
-
 #define	NUSERPGTBLS		(pmap_segshift(VM_MAXUSER_ADDRESS))
 #define	MIPS_SEGSIZE		(1L << SEGSHIFT)
 #define	mips_segtrunc(va)	((va) & ~(MIPS_SEGSIZE-1))
-#define	pmap_TLB_invalidate_all() mips_TBIAP(num_tlbentries)
-#define	pmap_va_asid(pmap, va)	((va) | ((pmap)->pm_asid[PCPU_GET(cpuid)].asid << VMTLB_PID_SHIFT))
 #define	is_kernel_pmap(x)	((x) == kernel_pmap)
 
 struct pmap kernel_pmap_store;
@@ -153,8 +144,6 @@ unsigned pmap_max_asid;		/* max ASID sup
 
 vm_offset_t kernel_vm_end;
 
-static struct tlb tlbstash[MAXCPU][MIPS_MAX_TLB_ENTRIES];
-
 static void pmap_asid_alloc(pmap_t pmap);
 
 /*
@@ -170,27 +159,27 @@ struct sysmaps sysmaps_pcpu[MAXCPU];
 
 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
-static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
+static __inline void pmap_clear_modified_bit(vm_page_t m);
 
 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
 static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
-static boolean_t pmap_testbit(vm_page_t m, int bit);
+static boolean_t pmap_check_modified_bit(vm_page_t m);
 static void 
 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
     vm_page_t m, boolean_t wired);
 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
     vm_offset_t va, vm_page_t m);
+static __inline void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
 
 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
 
 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
 static int init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
-static void pmap_TLB_invalidate_kernel(vm_offset_t);
-static void pmap_TLB_update_kernel(vm_offset_t, pt_entry_t);
 static void pmap_init_fpage(void);
 
 #ifdef SMP
@@ -244,7 +233,7 @@ pmap_pte(pmap_t pmap, vm_offset_t va)
 	if (pmap) {
 		pdeaddr = pmap_segmap(pmap, va);
 		if (pdeaddr) {
-			return pdeaddr + vad_to_pte_offset(va);
+			return pdeaddr + PDE_OFFSET(va);
 		}
 	}
 	return ((pt_entry_t *)0);
@@ -401,8 +390,8 @@ again:
 	 */
 	if (memory_larger_than_512meg) {
 		for (i = 0; i < MAXCPU; i++) {
-			sysmap_lmem[i].CMAP1 = PTE_G;
-			sysmap_lmem[i].CMAP2 = PTE_G;
+			pte_set(&sysmap_lmem[i].CMAP1, PG_G);
+			pte_set(&sysmap_lmem[i].CMAP2, PG_G);
 			sysmap_lmem[i].CADDR1 = (caddr_t)virtual_avail;
 			virtual_avail += PAGE_SIZE;
 			sysmap_lmem[i].CADDR2 = (caddr_t)virtual_avail;
@@ -450,7 +439,7 @@ again:
 	 * in the tlb.
 	 */
 	for (i = 0, pte = pgtab; i < (nkpt * NPTEPG); i++, pte++)
-		*pte = PTE_G;
+		*pte = PG_G;
 
 	/*
 	 * The segment table contains the KVA of the pages in the second
@@ -524,7 +513,7 @@ pmap_init(void)
 static int
 pmap_nw_modified(pt_entry_t pte)
 {
-	if ((pte & (PTE_M | PTE_RO)) == (PTE_M | PTE_RO))
+	if (pte_test(&pte, PG_RO) == pte_est(&pte, PG_D))
 		return (1);
 	else
 		return (0);
@@ -547,7 +536,11 @@ pmap_invalidate_all_action(void *arg)
 #endif
 
 	if (pmap->pm_active & PCPU_GET(cpumask)) {
-		pmap_TLB_invalidate_all();
+		/*
+		 * XXX/juli
+		 * Add something like TBIAP.
+		 */
+		tlb_invalidate_all();
 	} else
 		pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
 }
@@ -578,7 +571,7 @@ pmap_invalidate_page_action(void *arg)
 #endif
 
 	if (is_kernel_pmap(pmap)) {
-		pmap_TLB_invalidate_kernel(va);
+		tlb_invalidate_address(pmap, va);
 		return;
 	}
 	if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
@@ -587,18 +580,7 @@ pmap_invalidate_page_action(void *arg)
 		pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
 		return;
 	}
-	va = pmap_va_asid(pmap, (va & ~PAGE_MASK));
-	Mips_TLBFlushAddr(va);
-}
-
-static void
-pmap_TLB_invalidate_kernel(vm_offset_t va)
-{
-	u_int32_t pid;
-
-	pid = mips_rd_entryhi() & VMTLB_PID;
-	va = va | (pid << VMTLB_PID_SHIFT);
-	Mips_TLBFlushAddr(va);
+	tlb_invalidate_address(pmap, va);
 }
 
 struct pmap_update_page_arg {
@@ -629,7 +611,7 @@ pmap_update_page_action(void *arg)
 
 #endif
 	if (is_kernel_pmap(pmap)) {
-		pmap_TLB_update_kernel(va, pte);
+		tlb_update(pmap, va, pte);
 		return;
 	}
 	if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
@@ -638,19 +620,7 @@ pmap_update_page_action(void *arg)
 		pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
 		return;
 	}
-	va = pmap_va_asid(pmap, va);
-	Mips_TLBUpdate(va, pte);
-}
-
-static void
-pmap_TLB_update_kernel(vm_offset_t va, pt_entry_t pte)
-{
-	u_int32_t pid;
-
-	pid = mips_rd_entryhi() & VMTLB_PID;
-	va = va | (pid << VMTLB_PID_SHIFT);
-
-	Mips_TLBUpdate(va, pte);
+	tlb_update(pmap, va, pte);
 }
 
 /*
@@ -668,7 +638,7 @@ pmap_extract(pmap_t pmap, vm_offset_t va
 	PMAP_LOCK(pmap);
 	pte = pmap_pte(pmap, va);
 	if (pte) {
-		retval = mips_tlbpfn_to_paddr(*pte) | (va & PAGE_MASK);
+		retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
 	}
 	PMAP_UNLOCK(pmap);
 	return retval;
@@ -684,17 +654,17 @@ pmap_extract(pmap_t pmap, vm_offset_t va
 vm_page_t
 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 {
-	pt_entry_t pte;
+	pt_entry_t *pte;
 	vm_page_t m;
 
 	m = NULL;
 	vm_page_lock_queues();
 	PMAP_LOCK(pmap);
 
-	pte = *pmap_pte(pmap, va);
-	if (pte != 0 && pmap_pte_v(&pte) &&
-	    ((pte & PTE_RW) || (prot & VM_PROT_WRITE) == 0)) {
-		m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pte));
+	pte = pmap_pte(pmap, va);
+	if (pte_test(pte, PG_V) && (pte_test(pte, PG_D) ||
+	    (prot & VM_PROT_WRITE) == 0)) {
+		m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(*pte));
 		vm_page_hold(m);
 	}
 	vm_page_unlock_queues();
@@ -712,23 +682,23 @@ pmap_extract_and_hold(pmap_t pmap, vm_of
  /* PMAP_INLINE */ void
 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 {
-	register pt_entry_t *pte;
-	pt_entry_t npte, opte;
+	pt_entry_t *pte;
+	pt_entry_t npte;
 
 #ifdef PMAP_DEBUG
 	printf("pmap_kenter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
 #endif
-	npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W;
+	npte = TLBLO_PA_TO_PFN(pa) | PG_D | PG_V | PG_G | PG_W;
 
 	if (is_cacheable_mem(pa))
-		npte |= PTE_CACHE;
+		npte |= PG_C_CNC;
 	else
-		npte |= PTE_UNCACHED;
+		npte |= PG_C_UC;
 
 	pte = pmap_pte(kernel_pmap, va);
-	opte = *pte;
 	*pte = npte;
 
+#if 0
 	/*
 	 * The original code did an update_page() here, but
 	 * we often do a lot of pmap_kenter() calls and then
@@ -736,6 +706,9 @@ pmap_kenter(vm_offset_t va, vm_paddr_t p
 	 * the TLB has overflown many times.
 	 */
 	pmap_invalidate_page(kernel_pmap, va);
+#else
+	pmap_update_page(kernel_pmap, va, npte);
+#endif
 }
 
 /*
@@ -744,7 +717,7 @@ pmap_kenter(vm_offset_t va, vm_paddr_t p
  /* PMAP_INLINE */ void
 pmap_kremove(vm_offset_t va)
 {
-	register pt_entry_t *pte;
+	pt_entry_t *pte;
 
 	/*
 	 * Write back all caches from the page being destroyed
@@ -752,7 +725,7 @@ pmap_kremove(vm_offset_t va)
 	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
 
 	pte = pmap_pte(kernel_pmap, va);
-	*pte = PTE_G;
+	*pte = PG_G;
 	pmap_invalidate_page(kernel_pmap, va);
 }
 
@@ -887,7 +860,7 @@ vm_offset_t
 pmap_map_fpage(vm_paddr_t pa, struct fpage *fp, boolean_t check_unmaped)
 {
 	vm_offset_t kva;
-	register pt_entry_t *pte;
+	pt_entry_t *pte;
 	pt_entry_t npte;
 
 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
@@ -903,11 +876,11 @@ pmap_map_fpage(vm_paddr_t pa, struct fpa
 	fp->state = TRUE;
 	kva = fp->kva;
 
-	npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
+	npte = TLBLO_PA_TO_PFN(pa) | PG_D | PG_V | PG_G | PG_W | PG_C_CNC;
 	pte = pmap_pte(kernel_pmap, kva);
 	*pte = npte;
 
-	pmap_TLB_update_kernel(kva, npte);
+	pmap_update_page(kernel_pmap, kva, npte);
 
 	return (kva);
 }
@@ -919,7 +892,7 @@ void
 pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp)
 {
 	vm_offset_t kva;
-	register pt_entry_t *pte;
+	pt_entry_t *pte;
 
 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 	/*
@@ -931,8 +904,8 @@ pmap_unmap_fpage(vm_paddr_t pa, struct f
 	kva = fp->kva;
 
 	pte = pmap_pte(kernel_pmap, kva);
-	*pte = PTE_G;
-	pmap_TLB_invalidate_kernel(kva);
+	*pte = PG_G;
+	pmap_invalidate_page(kernel_pmap, kva);
 
 	fp->state = FALSE;
 
@@ -1364,7 +1337,7 @@ pmap_growkernel(vm_offset_t addr)
 		 * produce a global bit to store in the tlb.
 		 */
 		for (i = 0; i < NPTEPG; i++, pte++)
-			*pte = PTE_G;
+			*pte = PG_G;
 
 		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
 		    ~(PAGE_SIZE * NPTEPG - 1);
@@ -1444,12 +1417,12 @@ retry:
 			KASSERT(pte != NULL, ("pte"));
 			oldpte = loadandclear((u_int *)pte);
 			if (is_kernel_pmap(pmap))
-				*pte = PTE_G;
-			KASSERT((oldpte & PTE_W) == 0,
+				*pte = PG_G;
+			KASSERT(!pte_test(&oldpte, PG_W),
 			    ("wired pte for unwired page"));
 			if (m->md.pv_flags & PV_TABLE_REF)
 				vm_page_flag_set(m, PG_REFERENCED);
-			if (oldpte & PTE_M)
+			if (pte_test(&oldpte, PG_D))
 				vm_page_dirty(m);
 			pmap_invalidate_page(pmap, va);
 			TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
@@ -1591,17 +1564,17 @@ pmap_remove_pte(struct pmap *pmap, pt_en
 
 	oldpte = loadandclear((u_int *)ptq);
 	if (is_kernel_pmap(pmap))
-		*ptq = PTE_G;
+		*ptq = PG_G;
 
-	if (oldpte & PTE_W)
+	if (pte_test(&oldpte, PG_W))
 		pmap->pm_stats.wired_count -= 1;
 
 	pmap->pm_stats.resident_count -= 1;
-	pa = mips_tlbpfn_to_paddr(oldpte);
+	pa = TLBLO_PTE_TO_PA(oldpte);
 
 	if (page_is_managed(pa)) {
 		m = PHYS_TO_VM_PAGE(pa);
-		if (oldpte & PTE_M) {
+		if (pte_test(&oldpte, PG_D)) {
 #if defined(PMAP_DIAGNOSTIC)
 			if (pmap_nw_modified(oldpte)) {
 				printf(
@@ -1626,7 +1599,7 @@ pmap_remove_pte(struct pmap *pmap, pt_en
 static void
 pmap_remove_page(struct pmap *pmap, vm_offset_t va)
 {
-	register pt_entry_t *ptq;
+	pt_entry_t *ptq;
 
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1635,7 +1608,7 @@ pmap_remove_page(struct pmap *pmap, vm_o
 	/*
 	 * if there is no pte for this address, just skip it!!!
 	 */
-	if (!ptq || !pmap_pte_v(ptq)) {
+	if (!ptq || !pte_test(ptq, PG_V)) {
 		return;
 	}
 
@@ -1711,8 +1684,8 @@ out:
 void
 pmap_remove_all(vm_page_t m)
 {
-	register pv_entry_t pv;
-	register pt_entry_t *pte, tpte;
+	pv_entry_t pv;
+	pt_entry_t *pte, tpte;
 
 	KASSERT((m->flags & PG_FICTITIOUS) == 0,
 	    ("pmap_remove_all: page %p is fictitious", m));
@@ -1737,15 +1710,15 @@ pmap_remove_all(vm_page_t m)
 
 		tpte = loadandclear((u_int *)pte);
 		if (is_kernel_pmap(pv->pv_pmap))
-			*pte = PTE_G;
+			*pte = PG_G;
 
-		if (tpte & PTE_W)
+		if (pte_test(&tpte, PG_W))
 			pv->pv_pmap->pm_stats.wired_count--;
 
 		/*
 		 * Update the vm_page_t clean and reference bits.
 		 */
-		if (tpte & PTE_M) {
+		if (pte_test(&tpte, PG_D)) {
 #if defined(PMAP_DIAGNOSTIC)
 			if (pmap_nw_modified(tpte)) {
 				printf(
@@ -1806,13 +1779,13 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
 		 * If pte is invalid, skip this page
 		 */
 		pte = pmap_pte(pmap, sva);
-		if (!pmap_pte_v(pte)) {
+		if (!pte_test(pte, PG_V)) {
 			sva += PAGE_SIZE;
 			continue;
 		}
 retry:
 		obits = pbits = *pte;
-		pa = mips_tlbpfn_to_paddr(pbits);
+		pa = TLBLO_PTE_TO_PA(pbits);
 
 		if (page_is_managed(pa)) {
 			m = PHYS_TO_VM_PAGE(pa);
@@ -1820,12 +1793,13 @@ retry:
 				vm_page_flag_set(m, PG_REFERENCED);
 				m->md.pv_flags &= ~PV_TABLE_REF;
 			}
-			if (pbits & PTE_M) {
+			if (pte_test(&pbits, PG_D)) {
 				vm_page_dirty(m);
 				m->md.pv_flags &= ~PV_TABLE_MOD;
 			}
 		}
-		pbits = (pbits & ~PTE_M) | PTE_RO;
+		pte_clear(&pbits, PG_D);
+		pte_set(&pbits, PG_RO);
 
 		if (pbits != *pte) {
 			if (!atomic_cmpset_int((u_int *)pte, obits, pbits))
@@ -1855,7 +1829,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
     vm_prot_t prot, boolean_t wired)
 {
 	vm_offset_t pa, opa;
-	register pt_entry_t *pte;
+	pt_entry_t *pte;
 	pt_entry_t origpte, newpte;
 	vm_page_t mpte, om;
 	int rw = 0;
@@ -1893,21 +1867,21 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	pa = VM_PAGE_TO_PHYS(m);
 	om = NULL;
 	origpte = *pte;
-	opa = mips_tlbpfn_to_paddr(origpte);
+	opa = TLBLO_PTE_TO_PA(origpte);
 
 	/*
 	 * Mapping has not changed, must be protection or wiring change.
 	 */
-	if ((origpte & PTE_V) && (opa == pa)) {
+	if (pte_test(&origpte, PG_V) && opa == pa) {
 		/*
 		 * Wiring change, just update stats. We don't worry about
 		 * wiring PT pages as they remain resident as long as there
 		 * are valid mappings in them. Hence, if a user page is
 		 * wired, the PT page will be also.
 		 */
-		if (wired && ((origpte & PTE_W) == 0))
+		if (wired && !pte_test(&origpte, PG_W))
 			pmap->pm_stats.wired_count++;
-		else if (!wired && (origpte & PTE_W))
+		else if (!wired && pte_test(&origpte, PG_W))
 			pmap->pm_stats.wired_count--;
 
 #if defined(PMAP_DIAGNOSTIC)
@@ -1938,7 +1912,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	 * handle validating new mapping.
 	 */
 	if (opa) {
-		if (origpte & PTE_W)
+		if (pte_test(&origpte, PG_W))
 			pmap->pm_stats.wired_count--;
 
 		if (page_is_managed(opa)) {
@@ -1981,35 +1955,34 @@ validate:
 	/*

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201004030712.o337C4Pk013459>