From owner-svn-src-all@FreeBSD.ORG Wed May 14 01:16:07 2014 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 821A330C; Wed, 14 May 2014 01:16:07 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 62C82246F; Wed, 14 May 2014 01:16:07 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s4E1G7i3012773; Wed, 14 May 2014 01:16:07 GMT (envelope-from ian@svn.freebsd.org) Received: (from ian@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s4E1G6YI012767; Wed, 14 May 2014 01:16:06 GMT (envelope-from ian@svn.freebsd.org) Message-Id: <201405140116.s4E1G6YI012767@svn.freebsd.org> From: Ian Lepore Date: Wed, 14 May 2014 01:16:06 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r265998 - in stable/10/sys: boot/powerpc/ps3 dev/adb dev/uart powerpc/aim powerpc/booke powerpc/include X-SVN-Group: stable-10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 14 May 2014 01:16:07 -0000 Author: ian Date: Wed May 14 01:16:05 2014 New Revision: 265998 URL: http://svnweb.freebsd.org/changeset/base/265998 Log: MFC r257180, r257195, r257196, r257198, r257209, r257295 Add some extra sanity checking and checks to printf format specifiers. Try even harder to find a console before giving up. Make devices with registers into the KVA region work reliably. Turn on VM_KMEM_SIZE_SCALE on 32-bit as well as 64-bit PowerPC. Return NOKEY instead of 0 if there are no more key presses queued. Modified: stable/10/sys/boot/powerpc/ps3/start.S stable/10/sys/dev/adb/adb_kbd.c stable/10/sys/dev/uart/uart_cpu_fdt.c stable/10/sys/powerpc/aim/mmu_oea64.c stable/10/sys/powerpc/booke/pmap.c stable/10/sys/powerpc/include/vmparam.h Directory Properties: stable/10/ (props changed) Modified: stable/10/sys/boot/powerpc/ps3/start.S ============================================================================== --- stable/10/sys/boot/powerpc/ps3/start.S Wed May 14 00:55:21 2014 (r265997) +++ stable/10/sys/boot/powerpc/ps3/start.S Wed May 14 01:16:05 2014 (r265998) @@ -27,7 +27,7 @@ #define LOCORE -#include +#include /* * KBoot and simulators will start this program from the _start symbol, with Modified: stable/10/sys/dev/adb/adb_kbd.c ============================================================================== --- stable/10/sys/dev/adb/adb_kbd.c Wed May 14 00:55:21 2014 (r265997) +++ stable/10/sys/dev/adb/adb_kbd.c Wed May 14 01:16:05 2014 (r265998) @@ -621,7 +621,7 @@ akbd_read_char(keyboard_t *kbd, int wait if (!sc->buffers) { mtx_unlock(&sc->sc_mutex); - return (0); + return (NOKEY); } adb_code = sc->buffer[0]; Modified: stable/10/sys/dev/uart/uart_cpu_fdt.c ============================================================================== --- stable/10/sys/dev/uart/uart_cpu_fdt.c Wed May 14 00:55:21 2014 (r265997) +++ stable/10/sys/dev/uart/uart_cpu_fdt.c Wed May 14 01:16:05 2014 (r265998) @@ -142,14 +142,19 @@ uart_cpu_getdev(int devtype, struct uart /* * Retrieve /chosen/std{in,out}. */ - if ((chosen = OF_finddevice("/chosen")) == -1) - return (ENXIO); - for (name = propnames; *name != NULL; name++) { - if (phandle_chosen_propdev(chosen, *name, &node) == 0) - break; + node = -1; + if ((chosen = OF_finddevice("/chosen")) != -1) { + for (name = propnames; *name != NULL; name++) { + if (phandle_chosen_propdev(chosen, *name, &node) == 0) + break; + } } - if (*name == NULL) + if (chosen == -1 || *name == NULL) + node = OF_finddevice("serial0"); /* Last ditch */ + + if (node == -1) /* Can't find anything */ return (ENXIO); + /* * Retrieve serial attributes. */ Modified: stable/10/sys/powerpc/aim/mmu_oea64.c ============================================================================== --- stable/10/sys/powerpc/aim/mmu_oea64.c Wed May 14 00:55:21 2014 (r265997) +++ stable/10/sys/powerpc/aim/mmu_oea64.c Wed May 14 01:16:05 2014 (r265998) @@ -501,15 +501,7 @@ moea64_add_ofw_mappings(mmu_t mmup, phan qsort(translations, sz, sizeof (*translations), om_cmp); for (i = 0; i < sz; i++) { - CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", - (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, - translations[i].om_len); - - if (translations[i].om_pa_lo % PAGE_SIZE) - panic("OFW translation not page-aligned!"); - pa_base = translations[i].om_pa_lo; - #ifdef __powerpc64__ pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; #else @@ -517,6 +509,14 @@ moea64_add_ofw_mappings(mmu_t mmup, phan panic("OFW translations above 32-bit boundary!"); #endif + if (pa_base % PAGE_SIZE) + panic("OFW translation not page-aligned (phys)!"); + if (translations[i].om_va % PAGE_SIZE) + panic("OFW translation not page-aligned (virt)!"); + + CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x", + pa_base, translations[i].om_va, translations[i].om_len); + /* Now enter the pages for this mapping */ DISABLE_TRANS(msr); @@ -693,9 +693,9 @@ moea64_early_bootstrap(mmu_t mmup, vm_of hwphyssz = 0; TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); for (i = 0, j = 0; i < regions_sz; i++, j += 2) { - CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, - regions[i].mr_start + regions[i].mr_size, - regions[i].mr_size); + CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)", + regions[i].mr_start, regions[i].mr_start + + regions[i].mr_size, regions[i].mr_size); if (hwphyssz != 0 && (physsz + regions[i].mr_size) >= hwphyssz) { if (physsz < hwphyssz) { Modified: stable/10/sys/powerpc/booke/pmap.c ============================================================================== --- stable/10/sys/powerpc/booke/pmap.c Wed May 14 00:55:21 2014 (r265997) +++ stable/10/sys/powerpc/booke/pmap.c Wed May 14 01:16:05 2014 (r265998) @@ -189,6 +189,7 @@ static tlb_entry_t tlb1[TLB1_ENTRIES]; /* Next free entry in the TLB1 */ static unsigned int tlb1_idx; +static vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS; static tlbtid_t tid_alloc(struct pmap *); @@ -2681,11 +2682,23 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_padd size = roundup(size, PAGE_SIZE); + /* + * We leave a hole for device direct mapping between the maximum user + * address (0x8000000) and the minimum KVA address (0xc0000000). If + * devices are in there, just map them 1:1. If not, map them to the + * device mapping area about VM_MAX_KERNEL_ADDRESS. These mapped + * addresses should be pulled from an allocator, but since we do not + * ever free TLB1 entries, it is safe just to increment a counter. + * Note that there isn't a lot of address space here (128 MB) and it + * is not at all difficult to imagine running out, since that is a 4:1 + * compression from the 0xc0000000 - 0xf0000000 address space that gets + * mapped there. + */ if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) && (pa + size - 1) < VM_MIN_KERNEL_ADDRESS) va = pa; else - va = kva_alloc(size); + va = atomic_fetchadd_int(&tlb1_map_base, size); res = (void *)va; do { @@ -3085,7 +3098,7 @@ tlb1_mapin_region(vm_offset_t va, vm_pad } mapped = (va - base); - debugf("mapped size 0x%08x (wasted space 0x%08x)\n", + printf("mapped size 0x%08x (wasted space 0x%08x)\n", mapped, mapped - size); return (mapped); } @@ -3148,7 +3161,6 @@ tlb1_init() vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size) { - static vm_offset_t early_io_map_base = VM_MAX_KERNEL_ADDRESS; vm_paddr_t pa_base; vm_offset_t va, sz; int i; @@ -3165,14 +3177,14 @@ pmap_early_io_map(vm_paddr_t pa, vm_size pa_base = trunc_page(pa); size = roundup(size + (pa - pa_base), PAGE_SIZE); - va = early_io_map_base + (pa - pa_base); + va = tlb1_map_base + (pa - pa_base); do { sz = 1 << (ilog2(size) & ~1); - tlb1_set_entry(early_io_map_base, pa_base, sz, _TLB_ENTRY_IO); + tlb1_set_entry(tlb1_map_base, pa_base, sz, _TLB_ENTRY_IO); size -= sz; pa_base += sz; - early_io_map_base += sz; + tlb1_map_base += sz; } while (size > 0); #ifdef SMP Modified: stable/10/sys/powerpc/include/vmparam.h ============================================================================== --- stable/10/sys/powerpc/include/vmparam.h Wed May 14 00:55:21 2014 (r265997) +++ stable/10/sys/powerpc/include/vmparam.h Wed May 14 01:16:05 2014 (r265998) @@ -112,6 +112,7 @@ #define VM_MIN_KERNEL_ADDRESS KERNBASE #define VM_MAX_KERNEL_ADDRESS 0xf8000000 +#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS #endif /* AIM/E500 */ @@ -175,14 +176,21 @@ struct pmap_physseg { #define VM_KMEM_SIZE (12 * 1024 * 1024) #endif -#ifdef __powerpc64__ +/* + * How many physical pages per KVA page allocated. + * min(max(VM_KMEM_SIZE, Physical memory/VM_KMEM_SIZE_SCALE), VM_KMEM_SIZE_MAX) + * is the total KVA space allocated for kmem_map. + */ #ifndef VM_KMEM_SIZE_SCALE -#define VM_KMEM_SIZE_SCALE (3) +#define VM_KMEM_SIZE_SCALE (3) #endif +/* + * Ceiling on the amount of kmem_map KVA space: 40% of the entire KVA space. + */ #ifndef VM_KMEM_SIZE_MAX -#define VM_KMEM_SIZE_MAX 0x1c0000000 /* 7 GB */ -#endif +#define VM_KMEM_SIZE_MAX ((VM_MAX_SAFE_KERNEL_ADDRESS - \ + VM_MIN_KERNEL_ADDRESS + 1) * 2 / 5) #endif #define ZERO_REGION_SIZE (64 * 1024) /* 64KB */