Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 4 Apr 2009 00:22:44 +0000 (UTC)
From:      Nathan Whitehorn <nwhitehorn@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r190681 - in head: share/mk sys/conf sys/dev/powermac_nvram sys/dev/uart sys/powerpc/aim sys/powerpc/booke sys/powerpc/include sys/powerpc/ofw sys/powerpc/powermac sys/powerpc/powerpc
Message-ID:  <200904040022.n340Mifi039247@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: nwhitehorn
Date: Sat Apr  4 00:22:44 2009
New Revision: 190681
URL: http://svn.freebsd.org/changeset/base/190681

Log:
  Add support for 64-bit PowerPC CPUs operating in the 64-bit bridge mode
  provided, for example, on the PowerPC 970 (G5), as well as on related CPUs
  like the POWER3 and POWER4.
  
  This also adds support for various built-in hardware found on Apple G5
  hardware (e.g. the IBM CPC925 northbridge).
  
  Reviewed by:    grehan

Added:
  head/sys/powerpc/aim/mmu_oea64.c   (contents, props changed)
  head/sys/powerpc/powermac/cpcht.c   (contents, props changed)
  head/sys/powerpc/powermac/cpchtvar.h   (contents, props changed)
  head/sys/powerpc/powerpc/uio_machdep.c
     - copied unchanged from r190402, head/sys/powerpc/booke/uio_machdep.c
Deleted:
  head/sys/powerpc/aim/uio_machdep.c
  head/sys/powerpc/booke/uio_machdep.c
Modified:
  head/share/mk/bsd.cpu.mk
  head/sys/conf/files.powerpc
  head/sys/dev/powermac_nvram/powermac_nvram.c
  head/sys/dev/uart/uart_cpu_powerpc.c
  head/sys/powerpc/aim/machdep.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mp_cpudep.c
  head/sys/powerpc/aim/ofw_machdep.c
  head/sys/powerpc/aim/trap_subr.S
  head/sys/powerpc/aim/uma_machdep.c
  head/sys/powerpc/aim/vm_machdep.c
  head/sys/powerpc/booke/machdep.c
  head/sys/powerpc/include/hid.h
  head/sys/powerpc/include/md_var.h
  head/sys/powerpc/include/sf_buf.h
  head/sys/powerpc/include/spr.h
  head/sys/powerpc/include/vmparam.h
  head/sys/powerpc/ofw/ofw_syscons.c
  head/sys/powerpc/powerpc/bus_machdep.c
  head/sys/powerpc/powerpc/cpu.c
  head/sys/powerpc/powerpc/mem.c
  head/sys/powerpc/powerpc/mmu_if.m
  head/sys/powerpc/powerpc/pmap_dispatch.c

Modified: head/share/mk/bsd.cpu.mk
==============================================================================
--- head/share/mk/bsd.cpu.mk	Fri Apr  3 23:52:47 2009	(r190680)
+++ head/share/mk/bsd.cpu.mk	Sat Apr  4 00:22:44 2009	(r190681)
@@ -119,7 +119,7 @@ _CPUCFLAGS = -mcpu=${CPUTYPE}
 MACHINE_CPU = booke
 _CPUCFLAGS = -Wa,-me500 -msoft-float
 .  else
-_CPUCFLAGS = -mcpu=${CPUTYPE}
+_CPUCFLAGS = -mcpu=${CPUTYPE} -mno-powerpc64
 .  endif
 . elif ${MACHINE_ARCH} == "mips"
 .  if ${CPUTYPE} == "mips32"

Modified: head/sys/conf/files.powerpc
==============================================================================
--- head/sys/conf/files.powerpc	Fri Apr  3 23:52:47 2009	(r190680)
+++ head/sys/conf/files.powerpc	Sat Apr  4 00:22:44 2009	(r190681)
@@ -76,13 +76,13 @@ powerpc/aim/interrupt.c		optional	aim
 powerpc/aim/locore.S		optional	aim no-obj
 powerpc/aim/machdep.c		optional	aim
 powerpc/aim/mmu_oea.c		optional	aim
+powerpc/aim/mmu_oea64.c		optional	aim
 powerpc/aim/mp_cpudep.c		optional	aim smp
 powerpc/aim/nexus.c		optional	aim
 powerpc/aim/ofw_machdep.c	optional	aim
 powerpc/aim/ofwmagic.S		optional	aim
 powerpc/aim/swtch.S		optional	aim
 powerpc/aim/trap.c		optional	aim
-powerpc/aim/uio_machdep.c	optional	aim
 powerpc/aim/uma_machdep.c	optional	aim
 powerpc/aim/vm_machdep.c	optional	aim
 powerpc/booke/clock.c		optional	e500
@@ -93,7 +93,6 @@ powerpc/booke/machdep.c		optional	e500
 powerpc/booke/pmap.c		optional	e500
 powerpc/booke/swtch.S		optional	e500
 powerpc/booke/trap.c		optional	e500
-powerpc/booke/uio_machdep.c	optional	e500
 powerpc/booke/vm_machdep.c	optional	e500
 powerpc/fpu/fpu_add.c		optional	fpu_emu
 powerpc/fpu/fpu_compare.c	optional	fpu_emu
@@ -127,6 +126,7 @@ powerpc/powermac/uninorth.c	optional	pow
 powerpc/powermac/cuda.c		optional	powermac cuda
 powerpc/powermac/pmu.c		optional	powermac pmu 
 powerpc/powermac/macgpio.c	optional	powermac pci 
+powerpc/powermac/cpcht.c	optional	powermac pci
 powerpc/powerpc/altivec.c	optional	aim
 powerpc/powerpc/atomic.S	standard
 powerpc/powerpc/autoconf.c	standard
@@ -158,6 +158,7 @@ powerpc/powerpc/stack_machdep.c	optional
 powerpc/powerpc/suswintr.c	standard
 powerpc/powerpc/syncicache.c	standard
 powerpc/powerpc/sys_machdep.c	standard
+powerpc/powerpc/uio_machdep.c	standard
 powerpc/psim/iobus.c 		optional	psim
 powerpc/psim/ata_iobus.c	optional	ata psim
 powerpc/psim/openpic_iobus.c	optional	psim

Modified: head/sys/dev/powermac_nvram/powermac_nvram.c
==============================================================================
--- head/sys/dev/powermac_nvram/powermac_nvram.c	Fri Apr  3 23:52:47 2009	(r190680)
+++ head/sys/dev/powermac_nvram/powermac_nvram.c	Sat Apr  4 00:22:44 2009	(r190681)
@@ -131,19 +131,25 @@ powermac_nvram_attach(device_t dev)
 {
 	struct powermac_nvram_softc *sc;
 	phandle_t node;
-	u_int32_t reg[2];
-	int gen0, gen1;
+	u_int32_t reg[3];
+	int gen0, gen1, i;
 
 	node = ofw_bus_get_node(dev);
 	sc = device_get_softc(dev);
 
-	if (OF_getprop(node, "reg", reg, sizeof(reg)) < 8)
+	if ((i = OF_getprop(node, "reg", reg, sizeof(reg))) < 8)
 		return ENXIO;
 
 	sc->sc_dev = dev;
 	sc->sc_node = node;
 
-	sc->sc_bank0 = (vm_offset_t)pmap_mapdev(reg[0], NVRAM_SIZE * 2);
+	/*
+	 * Find which byte of reg corresponds to the 32-bit physical address.
+	 * We should probably read #address-cells from /chosen instead.
+	 */
+	i = (i/4) - 2;
+
+	sc->sc_bank0 = (vm_offset_t)pmap_mapdev(reg[i], NVRAM_SIZE * 2);
 	sc->sc_bank1 = sc->sc_bank0 + NVRAM_SIZE;
 
 	gen0 = powermac_nvram_check((void *)sc->sc_bank0);

Modified: head/sys/dev/uart/uart_cpu_powerpc.c
==============================================================================
--- head/sys/dev/uart/uart_cpu_powerpc.c	Fri Apr  3 23:52:47 2009	(r190680)
+++ head/sys/dev/uart/uart_cpu_powerpc.c	Sat Apr  4 00:22:44 2009	(r190681)
@@ -31,6 +31,8 @@ __FBSDID("$FreeBSD$");
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
 
 #include <machine/bus.h>
 
@@ -53,8 +55,7 @@ bus_space_tag_t uart_bus_space_mem = &bs
 int
 uart_cpu_eqres(struct uart_bas *b1, struct uart_bas *b2)
 {
-
-	return ((b1->bsh == b2->bsh) ? 1 : 0);
+	return ((pmap_kextract(b1->bsh) == pmap_kextract(b2->bsh)) ? 1 : 0);
 }
 
 #ifdef MPC85XX
@@ -116,7 +117,16 @@ uart_cpu_getdev(int devtype, struct uart
 		return (ENXIO);
 	if (OF_getprop(input, "name", buf, sizeof(buf)) == -1)
 		return (ENXIO);
-	if (strcmp(buf, "ch-a"))
+
+	if (strcmp(buf, "ch-a") == 0) {
+		class = &uart_z8530_class;
+		di->bas.regshft = 4;
+		di->bas.chan = 1;
+	} else if (strcmp(buf,"serial") == 0) {
+		class = &uart_ns8250_class;
+		di->bas.regshft = 0;
+		di->bas.chan = 0;
+	} else
 		return (ENXIO);
 
 	error = OF_decode_addr(input, 0, &di->bas.bst, &di->bas.bsh);
@@ -125,11 +135,13 @@ uart_cpu_getdev(int devtype, struct uart
 
 	di->ops = uart_getops(class);
 
-	di->bas.rclk = 230400;
-	di->bas.chan = 1;
-	di->bas.regshft = 4;
+	if (OF_getprop(input, "clock-frequency", &di->bas.rclk, 
+	    sizeof(di->bas.rclk)) == -1)
+		di->bas.rclk = 230400;
+	if (OF_getprop(input, "current-speed", &di->baudrate, 
+	    sizeof(di->baudrate)) == -1)
+		di->baudrate = 0;
 
-	di->baudrate = 0;
 	di->databits = 8;
 	di->stopbits = 1;
 	di->parity = UART_PARITY_NONE;

Modified: head/sys/powerpc/aim/machdep.c
==============================================================================
--- head/sys/powerpc/aim/machdep.c	Fri Apr  3 23:52:47 2009	(r190680)
+++ head/sys/powerpc/aim/machdep.c	Sat Apr  4 00:22:44 2009	(r190681)
@@ -130,6 +130,8 @@ extern vm_offset_t ksym_start, ksym_end;
 
 int cold = 1;
 int cacheline_size = 32;
+int ppc64 = 0;
+int hw_direct_map = 1;
 
 struct pcpu __pcpu[MAXCPU];
 
@@ -230,10 +232,13 @@ cpu_startup(void *dummy)
 
 extern char	kernel_text[], _end[];
 
+extern void	*testppc64, *testppc64size;
+extern void	*restorebridge, *restorebridgesize;
+extern void	*rfid_patch, *rfi_patch1, *rfi_patch2;
 #ifdef SMP
 extern void	*rstcode, *rstsize;
 #endif
-extern void	*trapcode, *trapsize;
+extern void	*trapcode, *trapcode64, *trapsize;
 extern void	*alitrap, *alisize;
 extern void	*dsitrap, *dsisize;
 extern void	*decrint, *decrsize;
@@ -245,11 +250,16 @@ powerpc_init(u_int startkernel, u_int en
 {
 	struct		pcpu *pc;
 	vm_offset_t	end;
+	void		*generictrap;
+	size_t		trap_offset;
 	void		*kmdp;
         char		*env;
+	int		vers;
+	uint32_t	msr, scratch;
 
 	end = 0;
 	kmdp = NULL;
+	trap_offset = 0;
 
 	/*
 	 * Parse metadata if present and fetch parameters.  Must be done
@@ -315,6 +325,26 @@ powerpc_init(u_int startkernel, u_int en
 		printf("powerpc_init: no loader metadata.\n");
 	}
 
+	/*
+	 * Set cacheline_size based on the CPU model.
+	 */
+
+	vers = mfpvr() >> 16;
+	switch (vers) {
+		case IBM970:
+		case IBM970FX:
+		case IBM970MP:
+		case IBM970GX:
+			cacheline_size = 128;
+			break;
+		default:
+			cacheline_size = 32;
+	}
+
+	/*
+	 * Init KDB
+	 */
+
 	kdb_init();
 
 	/*
@@ -322,47 +352,110 @@ powerpc_init(u_int startkernel, u_int en
 	 *      Disable translation in case the vector area
 	 *      hasn't been mapped (G5)
 	 */
-	mtmsr(mfmsr() & ~(PSL_IR | PSL_DR));
+	msr = mfmsr();
+	mtmsr(msr & ~(PSL_IR | PSL_DR));
 	isync();
+
+	/*
+	 * Figure out whether we need to use the 64 bit PMAP. This works by
+	 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
+	 * and setting ppc64 = 0 if that causes a trap.
+	 */
+
+	ppc64 = 1;
+
+	bcopy(&testppc64, (void *)EXC_PGM,  (size_t)&testppc64size);
+	__syncicache((void *)EXC_PGM, (size_t)&testppc64size);
+
+	__asm __volatile("\
+		mfmsr %0;	\
+		mtsprg2 %1;	\
+				\
+		mtmsrd %0;	\
+		mfsprg2 %1;"
+	    : "=r"(scratch), "=r"(ppc64));
+
+	/*
+	 * Now copy restorebridge into all the handlers, if necessary,
+	 * and set up the trap tables.
+	 */
+
+	if (ppc64) {
+		/* Patch the two instances of rfi -> rfid */
+		bcopy(&rfid_patch,&rfi_patch1,4);
+		bcopy(&rfid_patch,&rfi_patch2,4);
+
+		/*
+		 * Copy a code snippet to restore 32-bit bridge mode
+		 * to the top of every non-generic trap handler
+		 */
+
+		trap_offset += (size_t)&restorebridgesize;
+		bcopy(&restorebridge, (void *)EXC_RST, trap_offset); 
+		bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); 
+		bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); 
+		bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); 
+		bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); 
+		bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); 
+		bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); 
+
+		/*
+		 * Set the common trap entry point to the one that
+		 * knows to restore 32-bit operation on execution.
+		 */
+
+		generictrap = &trapcode64;
+	} else {
+		generictrap = &trapcode;
+	}
+
 #ifdef SMP
-	bcopy(&rstcode,  (void *)EXC_RST,  (size_t)&rstsize);
+	bcopy(&rstcode, (void *)(EXC_RST + trap_offset),  (size_t)&rstsize);
 #else
-	bcopy(&trapcode, (void *)EXC_RST,  (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_RST,  (size_t)&trapsize);
 #endif
-	bcopy(&trapcode, (void *)EXC_MCHK, (size_t)&trapsize);
-	bcopy(&dsitrap,  (void *)EXC_DSI,  (size_t)&dsisize);
-	bcopy(&trapcode, (void *)EXC_ISI,  (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_EXI,  (size_t)&trapsize);
-	bcopy(&alitrap,  (void *)EXC_ALI,  (size_t)&alisize);
-	bcopy(&trapcode, (void *)EXC_PGM,  (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_FPU,  (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_DECR, (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_SC,   (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_TRC,  (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_FPA,  (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_VEC,  (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_VECAST, (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_THRM, (size_t)&trapsize);
-	bcopy(&trapcode, (void *)EXC_BPT,  (size_t)&trapsize);
+
 #ifdef KDB
-	bcopy(&dblow,   (void *)EXC_MCHK, (size_t)&dbsize);
-	bcopy(&dblow,   (void *)EXC_PGM,  (size_t)&dbsize);
-	bcopy(&dblow,   (void *)EXC_TRC,  (size_t)&dbsize);
-	bcopy(&dblow,   (void *)EXC_BPT,  (size_t)&dbsize);
+	bcopy(&dblow,	(void *)(EXC_MCHK + trap_offset), (size_t)&dbsize);
+	bcopy(&dblow,   (void *)(EXC_PGM + trap_offset),  (size_t)&dbsize);
+	bcopy(&dblow,   (void *)(EXC_TRC + trap_offset),  (size_t)&dbsize);
+	bcopy(&dblow,   (void *)(EXC_BPT + trap_offset),  (size_t)&dbsize);
+#else
+	bcopy(generictrap, (void *)EXC_MCHK, (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_PGM,  (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_TRC,  (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_BPT,  (size_t)&trapsize);
 #endif
+	bcopy(&dsitrap,  (void *)(EXC_DSI + trap_offset),  (size_t)&dsisize);
+	bcopy(&alitrap,  (void *)(EXC_ALI + trap_offset),  (size_t)&alisize);
+	bcopy(generictrap, (void *)EXC_ISI,  (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_EXI,  (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_FPU,  (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_DECR, (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_SC,   (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_FPA,  (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_VEC,  (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_VECAST, (size_t)&trapsize);
+	bcopy(generictrap, (void *)EXC_THRM, (size_t)&trapsize);
 	__syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
 
 	/*
-	 * Make sure translation has been enabled
+	 * Restore MSR
 	 */
-	mtmsr(mfmsr() | PSL_IR|PSL_DR|PSL_ME|PSL_RI);
+	mtmsr(msr);
 	isync();
 
 	/*
 	 * Initialise virtual memory.
 	 */
-	pmap_mmu_install(MMU_TYPE_OEA, 0);		/* XXX temporary */
+	if (ppc64)
+		pmap_mmu_install(MMU_TYPE_G5, 0);
+	else
+		pmap_mmu_install(MMU_TYPE_OEA, 0);
+
 	pmap_bootstrap(startkernel, endkernel);
+	mtmsr(mfmsr() | PSL_IR|PSL_DR|PSL_ME|PSL_RI);
+	isync();
 
 	/*
 	 * Initialize params/tunables that are derived from memsize

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c	Fri Apr  3 23:52:47 2009	(r190680)
+++ head/sys/powerpc/aim/mmu_oea.c	Sat Apr  4 00:22:44 2009	(r190681)
@@ -323,6 +323,7 @@ void moea_zero_page_area(mmu_t, vm_page_
 void moea_zero_page_idle(mmu_t, vm_page_t);
 void moea_activate(mmu_t, struct thread *);
 void moea_deactivate(mmu_t, struct thread *);
+void moea_cpu_bootstrap(mmu_t, int);
 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
 void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t);
 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
@@ -364,6 +365,7 @@ static mmu_method_t moea_methods[] = {
 
 	/* Internal interfaces */
 	MMUMETHOD(mmu_bootstrap,       	moea_bootstrap),
+	MMUMETHOD(mmu_cpu_bootstrap,   	moea_cpu_bootstrap),
 	MMUMETHOD(mmu_mapdev,		moea_mapdev),
 	MMUMETHOD(mmu_unmapdev,		moea_unmapdev),
 	MMUMETHOD(mmu_kextract,		moea_kextract),
@@ -617,7 +619,7 @@ om_cmp(const void *a, const void *b)
 }
 
 void
-pmap_cpu_bootstrap(int ap)
+moea_cpu_bootstrap(mmu_t mmup, int ap)
 {
 	u_int sdr;
 	int i;
@@ -709,6 +711,9 @@ moea_bootstrap(mmu_t mmup, vm_offset_t k
 	__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
 	isync();
 
+	/* set global direct map flag */
+	hw_direct_map = 1;
+
 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
 	CTR0(KTR_PMAP, "moea_bootstrap: physical memory");
 
@@ -895,7 +900,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t k
 	kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
 	kernel_pmap->pm_active = ~0;
 
-	pmap_cpu_bootstrap(0);
+	moea_cpu_bootstrap(mmup,0);
 
 	pmap_bootstrapped++;
 

Added: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/powerpc/aim/mmu_oea64.c	Sat Apr  4 00:22:44 2009	(r190681)
@@ -0,0 +1,2443 @@
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *        This product includes software developed by the NetBSD
+ *        Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*-
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
+ */
+/*-
+ * Copyright (C) 2001 Benno Rice.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this module is called upon to
+ * provide software-use-only maps which may or may not be stored in the
+ * same form as hardware maps.  These pseudo-maps are used to store
+ * intermediate results from copy operations to and from address spaces.
+ *
+ * Since the information managed by this module is also stored by the
+ * logical address mapping module, this module may throw away valid virtual
+ * to physical mappings at almost any time.  However, invalidations of
+ * mappings must be done as requested.
+ *
+ * In order to cope with hardware architectures which make virtual to
+ * physical map invalidates expensive, this module may delay invalidate
+ * reduced protection operations until such time as they are actually
+ * necessary.  This module is given full information as to which processors
+ * are currently using which maps, and to when physical maps must be made
+ * correct.
+ */
+
+#include "opt_kstack_pages.h"
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/msgbuf.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/vmmeter.h>
+
+#include <sys/kdb.h>
+
+#include <dev/ofw/openfirm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/uma.h>
+
+#include <machine/cpu.h>
+#include <machine/powerpc.h>
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/psl.h>
+#include <machine/bat.h>
+#include <machine/pte.h>
+#include <machine/sr.h>
+#include <machine/trap.h>
+#include <machine/mmuvar.h>
+
+#include "mmu_if.h"
+
+#define	MOEA_DEBUG
+
+#define TODO	panic("%s: not implemented", __func__);
+
+static __inline u_int32_t
+cntlzw(volatile u_int32_t a) {
+	u_int32_t b;
+	__asm ("cntlzw %0, %1" : "=r"(b) : "r"(a));
+	return b;
+}
+
+static __inline uint64_t
+va_to_vsid(pmap_t pm, vm_offset_t va)
+{
+	return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
+}
+
+#define	TLBSYNC()	__asm __volatile("tlbsync; ptesync");
+#define	SYNC()		__asm __volatile("sync");
+#define	EIEIO()		__asm __volatile("eieio");
+
+/*
+ * The tlbie instruction must be executed in 64-bit mode
+ * so we have to twiddle MSR[SF] around every invocation.
+ * Just to add to the fun, exceptions must be off as well
+ * so that we can't trap in 64-bit mode. What a pain.
+ */
+
+static __inline void
+TLBIE(pmap_t pmap, vm_offset_t va) {
+	register_t msr;
+	register_t scratch;
+
+	uint64_t vpn;
+	register_t vpn_hi, vpn_lo;
+
+#if 1
+	/*
+	 * CPU documentation says that tlbie takes the VPN, not the
+	 * VA. I think the code below does this correctly. We will see.
+	 */
+
+	vpn = (uint64_t)(va & ADDR_PIDX);
+	if (pmap != NULL)
+		vpn |= (va_to_vsid(pmap,va) << 28);
+#else
+	vpn = va;
+#endif
+
+	vpn_hi = (uint32_t)(vpn >> 32);
+	vpn_lo = (uint32_t)vpn;
+
+	__asm __volatile("\
+	    mfmsr %0; \
+	    clrldi %1,%0,49; \
+	    insrdi %1,1,1,0; \
+	    mtmsrd %1; \
+	    ptesync; \
+	    \
+	    sld %1,%2,%4; \
+	    or %1,%1,%3; \
+	    tlbie %1; \
+	    \
+	    mtmsrd %0; \
+	    eieio; \
+	    tlbsync; \
+	    ptesync;" 
+	: "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32));
+}
+
+#define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync()
+#define ENABLE_TRANS(msr)	mtmsr(msr); isync()
+
+#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
+#define	VSID_TO_SR(vsid)	((vsid) & 0xf)
+#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
+
+#define	PVO_PTEGIDX_MASK	0x007		/* which PTEG slot */
+#define	PVO_PTEGIDX_VALID	0x008		/* slot is valid */
+#define	PVO_WIRED		0x010		/* PVO entry is wired */
+#define	PVO_MANAGED		0x020		/* PVO entry is managed */
+#define	PVO_BOOTSTRAP		0x080		/* PVO entry allocated during
+						   bootstrap */
+#define PVO_FAKE		0x100		/* fictitious phys page */
+#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
+#define PVO_ISFAKE(pvo)		((pvo)->pvo_vaddr & PVO_FAKE)
+#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
+#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
+#define	PVO_PTEGIDX_CLR(pvo)	\
+	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
+#define	PVO_PTEGIDX_SET(pvo, i)	\
+	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
+
+#define	MOEA_PVO_CHECK(pvo)
+
+#define LOCK_TABLE() mtx_lock(&moea64_table_mutex)
+#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex);
+#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED)
+
+struct ofw_map {
+	vm_offset_t	om_va;
+	vm_size_t	om_len;
+	vm_offset_t	om_pa_hi;
+	vm_offset_t	om_pa_lo;
+	u_int		om_mode;
+};
+
+/*
+ * Map of physical memory regions.
+ */
+static struct	mem_region *regions;
+static struct	mem_region *pregions;
+extern u_int	phys_avail_count;
+extern int	regions_sz, pregions_sz;
+extern int	ofw_real_mode;
+static struct	ofw_map translations[64];
+
+extern struct pmap ofw_pmap;
+
+extern void bs_remap_earlyboot(void);
+
+
+/*
+ * Lock for the pteg and pvo tables.
+ */
+struct mtx	moea64_table_mutex;
+
+/*
+ * PTEG data.
+ */
+static struct	lpteg *moea64_pteg_table;
+u_int		moea64_pteg_count;
+u_int		moea64_pteg_mask;
+
+/*
+ * PVO data.
+ */
+struct	pvo_head *moea64_pvo_table;		/* pvo entries by pteg index */
+/* lists of unmanaged pages */
+struct	pvo_head moea64_pvo_kunmanaged =
+    LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
+struct	pvo_head moea64_pvo_unmanaged =
+    LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged);
+
+uma_zone_t	moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
+uma_zone_t	moea64_mpvo_zone; /* zone for pvo entries for managed pages */
+
+vm_offset_t	pvo_allocator_start;
+vm_offset_t	pvo_allocator_end;
+
+#define	BPVO_POOL_SIZE	327680
+static struct	pvo_entry *moea64_bpvo_pool;
+static int	moea64_bpvo_pool_index = 0;
+
+#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
+static u_int	moea64_vsid_bitmap[NPMAPS / VSID_NBPW];
+
+static boolean_t moea64_initialized = FALSE;
+
+/*
+ * Statistics.
+ */
+u_int	moea64_pte_valid = 0;
+u_int	moea64_pte_overflow = 0;
+u_int	moea64_pvo_entries = 0;
+u_int	moea64_pvo_enter_calls = 0;
+u_int	moea64_pvo_remove_calls = 0;
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 
+    &moea64_pte_valid, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
+    &moea64_pte_overflow, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 
+    &moea64_pvo_entries, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
+    &moea64_pvo_enter_calls, 0, "");
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
+    &moea64_pvo_remove_calls, 0, "");
+
+vm_offset_t	moea64_scratchpage_va[2];
+struct	pvo_entry *moea64_scratchpage_pvo[2];
+struct	lpte 	*moea64_scratchpage_pte[2];
+struct	mtx	moea64_scratchpage_mtx;
+
+/*
+ * Allocate physical memory for use in moea64_bootstrap.
+ */
+static vm_offset_t	moea64_bootstrap_alloc(vm_size_t, u_int);
+
+/*
+ * PTE calls.
+ */
+static int		moea64_pte_insert(u_int, struct lpte *);
+
+/*
+ * PVO calls.
+ */
+static int	moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
+		    vm_offset_t, vm_offset_t, uint64_t, int, int);
+static void	moea64_pvo_remove(struct pvo_entry *, int);
+static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *);
+static struct	lpte *moea64_pvo_to_pte(const struct pvo_entry *, int);
+
+/*
+ * Utility routines.
+ */
+static void		moea64_bridge_bootstrap(mmu_t mmup, 
+			    vm_offset_t kernelstart, vm_offset_t kernelend);
+static void		moea64_bridge_cpu_bootstrap(mmu_t, int ap);
+static void		moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+			    vm_prot_t, boolean_t);
+static boolean_t	moea64_query_bit(vm_page_t, u_int64_t);
+static u_int		moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *);
+static void		moea64_kremove(mmu_t, vm_offset_t);
+static void		moea64_syncicache(pmap_t pmap, vm_offset_t va, 
+			    vm_offset_t pa);
+static void		tlbia(void);
+
+/*
+ * Kernel MMU interface
+ */
+void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
+void moea64_clear_modify(mmu_t, vm_page_t);
+void moea64_clear_reference(mmu_t, vm_page_t);
+void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
+void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
+void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
+    vm_prot_t);
+void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
+vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
+vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
+void moea64_init(mmu_t);
+boolean_t moea64_is_modified(mmu_t, vm_page_t);
+boolean_t moea64_ts_referenced(mmu_t, vm_page_t);
+vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
+boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
+int moea64_page_wired_mappings(mmu_t, vm_page_t);
+void moea64_pinit(mmu_t, pmap_t);
+void moea64_pinit0(mmu_t, pmap_t);
+void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
+void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
+void moea64_qremove(mmu_t, vm_offset_t, int);
+void moea64_release(mmu_t, pmap_t);
+void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
+void moea64_remove_all(mmu_t, vm_page_t);
+void moea64_remove_write(mmu_t, vm_page_t);
+void moea64_zero_page(mmu_t, vm_page_t);
+void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
+void moea64_zero_page_idle(mmu_t, vm_page_t);
+void moea64_activate(mmu_t, struct thread *);
+void moea64_deactivate(mmu_t, struct thread *);
+void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
+void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
+vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
+void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
+boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
+boolean_t moea64_page_executable(mmu_t, vm_page_t);
+
+static mmu_method_t moea64_bridge_methods[] = {
+	MMUMETHOD(mmu_change_wiring,	moea64_change_wiring),
+	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
+	MMUMETHOD(mmu_clear_reference,	moea64_clear_reference),
+	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
+	MMUMETHOD(mmu_enter,		moea64_enter),
+	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
+	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
+	MMUMETHOD(mmu_extract,		moea64_extract),
+	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
+	MMUMETHOD(mmu_init,		moea64_init),
+	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
+	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
+	MMUMETHOD(mmu_map,     		moea64_map),
+	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
+	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
+	MMUMETHOD(mmu_pinit,		moea64_pinit),
+	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
+	MMUMETHOD(mmu_protect,		moea64_protect),
+	MMUMETHOD(mmu_qenter,		moea64_qenter),
+	MMUMETHOD(mmu_qremove,		moea64_qremove),
+	MMUMETHOD(mmu_release,		moea64_release),
+	MMUMETHOD(mmu_remove,		moea64_remove),
+	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
+	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
+	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
+	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
+	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
+	MMUMETHOD(mmu_activate,		moea64_activate),
+	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
+
+	/* Internal interfaces */
+	MMUMETHOD(mmu_bootstrap,       	moea64_bridge_bootstrap),
+	MMUMETHOD(mmu_cpu_bootstrap,   	moea64_bridge_cpu_bootstrap),
+	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
+	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
+	MMUMETHOD(mmu_kextract,		moea64_kextract),
+	MMUMETHOD(mmu_kenter,		moea64_kenter),
+	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
+	MMUMETHOD(mmu_page_executable,	moea64_page_executable),
+
+	{ 0, 0 }
+};
+
+static mmu_def_t oea64_bridge_mmu = {
+	MMU_TYPE_G5,
+	moea64_bridge_methods,
+	0
+};
+MMU_DEF(oea64_bridge_mmu);
+
+static __inline u_int
+va_to_pteg(uint64_t vsid, vm_offset_t addr)
+{
+	u_int hash;
+
+	hash = vsid ^ (((uint64_t)addr & ADDR_PIDX) >>
+	    ADDR_PIDX_SHFT);
+	return (hash & moea64_pteg_mask);
+}
+
+static __inline struct pvo_head *
+pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
+{
+	struct	vm_page *pg;
+
+	pg = PHYS_TO_VM_PAGE(pa);
+
+	if (pg_p != NULL)
+		*pg_p = pg;
+
+	if (pg == NULL)
+		return (&moea64_pvo_unmanaged);
+
+	return (&pg->md.mdpg_pvoh);
+}
+
+static __inline struct pvo_head *
+vm_page_to_pvoh(vm_page_t m)
+{
+
+	return (&m->md.mdpg_pvoh);
+}
+
+static __inline void
+moea64_attr_clear(vm_page_t m, u_int64_t ptebit)
+{
+
+	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	m->md.mdpg_attrs &= ~ptebit;
+}
+
+static __inline u_int64_t
+moea64_attr_fetch(vm_page_t m)
+{
+
+	return (m->md.mdpg_attrs);
+}
+
+static __inline void
+moea64_attr_save(vm_page_t m, u_int64_t ptebit)
+{
+
+	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+	m->md.mdpg_attrs |= ptebit;
+}
+
+static __inline int
+moea64_pte_compare(const struct lpte *pt, const struct lpte *pvo_pt)
+{
+	if (pt->pte_hi == pvo_pt->pte_hi)
+		return (1);
+
+	return (0);
+}
+
+static __inline int
+moea64_pte_match(struct lpte *pt, uint64_t vsid, vm_offset_t va, int which)
+{
+	return (pt->pte_hi & ~LPTE_VALID) ==
+	    ((vsid << LPTE_VSID_SHIFT) |
+	    ((uint64_t)(va >> ADDR_API_SHFT64) & LPTE_API) | which);
+}
+
+static __inline void
+moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 
+    uint64_t pte_lo)
+{
+	ASSERT_TABLE_LOCK();
+
+	/*
+	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
+	 * set when the real pte is set in memory.
+	 *
+	 * Note: Don't set the valid bit for correct operation of tlb update.
+	 */
+	pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
+	    (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
+
+	pt->pte_lo = pte_lo;
+}
+
+static __inline void
+moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt)
+{
+
+	ASSERT_TABLE_LOCK();
+
+	pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG);
+}
+
+static __inline void
+moea64_pte_clear(struct lpte *pt, pmap_t pmap, vm_offset_t va, u_int64_t ptebit)
+{
+	ASSERT_TABLE_LOCK();
+
+	/*
+	 * As shown in Section 7.6.3.2.3
+	 */
+	pt->pte_lo &= ~ptebit;
+	TLBIE(pmap,va);
+}
+
+static __inline void
+moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt)
+{
+
+	ASSERT_TABLE_LOCK();
+	pvo_pt->pte_hi |= LPTE_VALID;
+
+	/*
+	 * Update the PTE as defined in section 7.6.3.1.
+	 * Note that the REF/CHG bits are from pvo_pt and thus should have
+	 * been saved so this routine can restore them (if desired).
+	 */
+	pt->pte_lo = pvo_pt->pte_lo;
+	EIEIO();
+	pt->pte_hi = pvo_pt->pte_hi;
+	SYNC();
+	moea64_pte_valid++;
+}
+
+static __inline void
+moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va)
+{
+	ASSERT_TABLE_LOCK();
+	pvo_pt->pte_hi &= ~LPTE_VALID;
+
+	/*
+	 * Force the reg & chg bits back into the PTEs.

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200904040022.n340Mifi039247>