From owner-svn-src-projects@FreeBSD.ORG Tue Jul 28 15:57:54 2009 Return-Path: Delivered-To: svn-src-projects@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 35319106566C; Tue, 28 Jul 2009 15:57:54 +0000 (UTC) (envelope-from nwhitehorn@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 23ADE8FC08; Tue, 28 Jul 2009 15:57:54 +0000 (UTC) (envelope-from nwhitehorn@FreeBSD.org) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.3/8.14.3) with ESMTP id n6SFvsls037029; Tue, 28 Jul 2009 15:57:54 GMT (envelope-from nwhitehorn@svn.freebsd.org) Received: (from nwhitehorn@localhost) by svn.freebsd.org (8.14.3/8.14.3/Submit) id n6SFvrtE037024; Tue, 28 Jul 2009 15:57:53 GMT (envelope-from nwhitehorn@svn.freebsd.org) Message-Id: <200907281557.n6SFvrtE037024@svn.freebsd.org> From: Nathan Whitehorn Date: Tue, 28 Jul 2009 15:57:53 +0000 (UTC) To: src-committers@freebsd.org, svn-src-projects@freebsd.org X-SVN-Group: projects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r195920 - projects/ppc64/sys/powerpc/aim64 X-BeenThere: svn-src-projects@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the src " projects" tree" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 28 Jul 2009 15:57:54 -0000 Author: nwhitehorn Date: Tue Jul 28 15:57:53 2009 New Revision: 195920 URL: http://svn.freebsd.org/changeset/base/195920 Log: Bring up the system a little more, with some initial SLB management. We now boot into after the PMAP layer is up and the MMU enabled. Modified: projects/ppc64/sys/powerpc/aim64/locore.S projects/ppc64/sys/powerpc/aim64/machdep.c projects/ppc64/sys/powerpc/aim64/mmu_oea64.c projects/ppc64/sys/powerpc/aim64/trap_subr.S Modified: projects/ppc64/sys/powerpc/aim64/locore.S ============================================================================== --- projects/ppc64/sys/powerpc/aim64/locore.S Tue Jul 28 15:07:41 2009 (r195919) +++ projects/ppc64/sys/powerpc/aim64/locore.S Tue Jul 28 15:57:53 2009 (r195920) @@ -169,8 +169,8 @@ ASENTRY(__start) addi 4,4,end@l mr 5,4 - lis 3,kernel_text@ha - addi 3,3,kernel_text@l + lis 3,kernbase@ha + addi 3,3,kernbase@l /* Restore the argument pointer and length */ mr 6,20 Modified: projects/ppc64/sys/powerpc/aim64/machdep.c ============================================================================== --- projects/ppc64/sys/powerpc/aim64/machdep.c Tue Jul 28 15:07:41 2009 (r195919) +++ projects/ppc64/sys/powerpc/aim64/machdep.c Tue Jul 28 15:57:53 2009 (r195920) @@ -104,7 +104,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include @@ -230,8 +229,6 @@ cpu_startup(void *dummy) extern char kernel_text[], _end[]; -extern void *restorebridge, *restorebridgesize; -extern void *rfid_patch, *rfi_patch1, *rfi_patch2; #ifdef SMP extern void *rstcode, *rstsize; #endif @@ -398,8 +395,10 @@ powerpc_init(u_int startkernel, u_int en bcopy(&trapcode, (void *)EXC_BPT, (size_t)&trapsize); #endif bcopy(&dsitrap, (void *)(EXC_DSI), (size_t)&dsisize); + bcopy(&trapcode, (void *)EXC_DSE, (size_t)&trapsize); bcopy(&alitrap, (void *)(EXC_ALI), (size_t)&alisize); bcopy(&trapcode, (void *)EXC_ISI, (size_t)&trapsize); + bcopy(&trapcode, (void *)EXC_ISE, (size_t)&trapsize); bcopy(&trapcode, (void *)EXC_EXI, (size_t)&trapsize); bcopy(&trapcode, (void *)EXC_FPU, (size_t)&trapsize); bcopy(&trapcode, (void *)EXC_DECR, (size_t)&trapsize); Modified: projects/ppc64/sys/powerpc/aim64/mmu_oea64.c ============================================================================== --- projects/ppc64/sys/powerpc/aim64/mmu_oea64.c Tue Jul 28 15:07:41 2009 (r195919) +++ projects/ppc64/sys/powerpc/aim64/mmu_oea64.c Tue Jul 28 15:57:53 2009 (r195920) @@ -170,7 +170,12 @@ cntlzd(volatile register_t a) { static __inline uint64_t va_to_vsid(pmap_t pm, vm_offset_t va) { + #ifdef __powerpc64__ + return (((uint64_t)pm->pm_context << 36) | + ((uintptr_t)va >> ADDR_SR_SHFT)); + #else return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); + #endif } #define TLBSYNC() __asm __volatile("tlbsync; ptesync"); @@ -186,11 +191,13 @@ va_to_vsid(pmap_t pm, vm_offset_t va) static __inline void TLBIE(pmap_t pmap, vm_offset_t va) { +#ifndef __powerpc64__ register_t msr; register_t scratch; + register_t vpn_hi, vpn_lo; +#endif uint64_t vpn; - register_t vpn_hi, vpn_lo; #if 1 /* @@ -205,6 +212,15 @@ TLBIE(pmap_t pmap, vm_offset_t va) { vpn = va; #endif +#ifdef __powerpc64__ + __asm __volatile("\ + ptesync; \ + tlbie %0; \ + eieio; \ + tlbsync; \ + ptesync;" + :: "r"(vpn)); +#else vpn_hi = (uint32_t)(vpn >> 32); vpn_lo = (uint32_t)vpn; @@ -224,13 +240,13 @@ TLBIE(pmap_t pmap, vm_offset_t va) { tlbsync; \ ptesync;" : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32)); +#endif } #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() #define ENABLE_TRANS(msr) mtmsr(msr); isync() #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) -#define VSID_TO_SR(vsid) ((vsid) & 0xf) #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) #define PVO_PTEGIDX_MASK 0x007 /* which PTEG slot */ @@ -270,7 +286,7 @@ static struct mem_region *regions; static struct mem_region *pregions; static u_int phys_avail_count; static int regions_sz, pregions_sz; -static int ofw_real_mode; +extern int ofw_real_mode; static struct ofw_map translations[64]; extern struct pmap ofw_pmap; @@ -372,6 +388,7 @@ static void moea64_kremove(mmu_t, vm_of static void moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa); static void tlbia(void); +static void slbia(void); /* * Kernel MMU interface @@ -701,14 +718,45 @@ moea64_bridge_cpu_bootstrap(mmu_t mmup, { int i = 0; + #ifdef __powerpc64__ + register_t slb1, slb2; + #endif + /* * Initialize segment registers and MMU */ mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); - for (i = 0; i < 16; i++) { - mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); - } + + /* + * Install kernel SLB entries + */ + + #ifdef __powerpc64__ + slbia(); + + for (i = 0; i < NSEGS; i++) { + if (!kernel_pmap->pm_sr[i]) + continue; + + /* The right-most bit is a validity bit */ + slb1 = ((register_t)kernel_pmap->pm_context << 36) | + (kernel_pmap->pm_sr[i] >> 1); + slb1 <<= 12; + slb2 = kernel_pmap->pm_sr[i] << 27 | i; + + __asm __volatile ("slbmte %0, %1" :: "r"(slb1), + "r"(slb2)); + } + #else + for (i = 0; i < NSEGS; i++) + mtsrin(i << ADDR_SR_SHFT, pmap->pm_sr[i]); + #endif + + /* + * Install page table + */ + __asm __volatile ("sync; mtsdr1 %0; isync" :: "r"((uintptr_t)moea64_pteg_table | (64 - cntlzd(moea64_pteg_mask >> 11)))); @@ -844,8 +892,14 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_o /* * Initialize the kernel pmap (which is statically allocated). */ + kernel_pmap->pm_context = 0; + #ifdef __powerpc64__ + for (i = 0; i < 16; i++) + kernel_pmap->pm_sr[i] = (i << 1) | 1; + #else for (i = 0; i < 16; i++) kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; + #endif kernel_pmap->pmap_phys = kernel_pmap; kernel_pmap->pm_active = ~0; @@ -883,6 +937,7 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_o moea64_kenter(mmup, pa, pa); ENABLE_TRANS(msr); + if (!ofw_real_mode) { /* * Set up the Open Firmware pmap and add its mappings. @@ -895,6 +950,7 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_o if ((chosen = OF_finddevice("/chosen")) == -1) panic("moea64_bootstrap: can't find /chosen"); OF_getprop(chosen, "mmu", &mmui, 4); + if ((mmu = OF_instance_to_package(mmui)) == -1) panic("moea64_bootstrap: can't get mmu package"); if ((sz = OF_getproplen(mmu, "translations")) == -1) @@ -1748,9 +1804,15 @@ moea64_pinit(mmu_t mmu, pmap_t pmap) hash |= i; } moea64_vsid_bitmap[n] |= mask; - for (i = 0; i < 16; i++) { - pmap->pm_sr[i] = VSID_MAKE(i, hash); - } + + #ifdef __powerpc64__ + pmap->pm_context = hash; + for (i = 0; i < NSEGS; i++) + pmap->pm_sr[i] = 0; + #else + for (i = 0; i < 16; i++) + pmap->pm_sr[i] = VSID_MAKE(i, hash); + #endif return; } @@ -1976,6 +2038,12 @@ tlbia(void) TLBIE(NULL,i); } +static void +slbia(void) +{ + __asm __volatile ("slbia"); +} + static int moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags, int recurse) Modified: projects/ppc64/sys/powerpc/aim64/trap_subr.S ============================================================================== --- projects/ppc64/sys/powerpc/aim64/trap_subr.S Tue Jul 28 15:07:41 2009 (r195919) +++ projects/ppc64/sys/powerpc/aim64/trap_subr.S Tue Jul 28 15:57:53 2009 (r195920) @@ -42,22 +42,9 @@ /* * Save/restore segment registers */ -#define RESTORE_SRS(pmap,sr) mtsr 0,sr; \ - lwz sr,1*4(pmap); mtsr 1,sr; \ - lwz sr,2*4(pmap); mtsr 2,sr; \ - lwz sr,3*4(pmap); mtsr 3,sr; \ - lwz sr,4*4(pmap); mtsr 4,sr; \ - lwz sr,5*4(pmap); mtsr 5,sr; \ - lwz sr,6*4(pmap); mtsr 6,sr; \ - lwz sr,7*4(pmap); mtsr 7,sr; \ - lwz sr,8*4(pmap); mtsr 8,sr; \ - lwz sr,9*4(pmap); mtsr 9,sr; \ - lwz sr,10*4(pmap); mtsr 10,sr; \ - lwz sr,11*4(pmap); mtsr 11,sr; \ - lwz sr,12*4(pmap); mtsr 12,sr; \ - lwz sr,13*4(pmap); mtsr 13,sr; \ - lwz sr,14*4(pmap); mtsr 14,sr; \ - lwz sr,15*4(pmap); mtsr 15,sr; isync; + +#define RESTORE_SRS(pmap, sr) \ + slbia; /* * User SRs are loaded through a pointer to the current pmap. @@ -249,10 +236,13 @@ CNAME(rstcode): addi %r1,%r1,(124-16)@l lis %r3,1@l - bla CNAME(pmap_cpu_bootstrap) - bla CNAME(cpudep_ap_bootstrap) + bla CNAME(.pmap_cpu_bootstrap) + nop + bla CNAME(.cpudep_ap_bootstrap) + nop mr %r1,%r3 - bla CNAME(machdep_ap_bootstrap) + bla CNAME(.machdep_ap_bootstrap) + nop /* Should not be reached */ 9: @@ -441,10 +431,12 @@ k_trap: /* Call C interrupt dispatcher: */ trapagain: addi %r3,%r1,8 - bl CNAME(powerpc_interrupt) - .globl CNAME(trapexit) /* backtrace code sentinel */ -CNAME(trapexit): + bl CNAME(.powerpc_interrupt) + nop + bl CNAME(.trapexit) + nop +ASENTRY(trapexit) /* backtrace code sentinel */ /* Disable interrupts: */ mfmsr %r3 andi. %r3,%r3,~PSL_EE@l @@ -511,7 +503,8 @@ dbtrap: FRAME_SETUP(PC_DBSAVE) /* Call C trap code: */ addi %r3,%r1,8 - bl CNAME(db_trap_glue) + bl CNAME(.db_trap_glue) + nop or. %r3,%r3,%r3 bne dbleave /* This wasn't for KDB, so switch to real trap: */