From owner-svn-src-all@FreeBSD.ORG Wed Jul 2 23:57:56 2014 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 325C1ED2; Wed, 2 Jul 2014 23:57:56 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 1E32621A7; Wed, 2 Jul 2014 23:57:56 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.8/8.14.8) with ESMTP id s62Nvuqp078042; Wed, 2 Jul 2014 23:57:56 GMT (envelope-from marcel@svn.freebsd.org) Received: (from marcel@localhost) by svn.freebsd.org (8.14.8/8.14.8/Submit) id s62NvtPH078037; Wed, 2 Jul 2014 23:57:55 GMT (envelope-from marcel@svn.freebsd.org) Message-Id: <201407022357.s62NvtPH078037@svn.freebsd.org> From: Marcel Moolenaar Date: Wed, 2 Jul 2014 23:57:55 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r268201 - in stable/10/sys/ia64: ia64 include X-SVN-Group: stable-10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 02 Jul 2014 23:57:56 -0000 Author: marcel Date: Wed Jul 2 23:57:55 2014 New Revision: 268201 URL: http://svnweb.freebsd.org/changeset/base/268201 Log: MFC r263380 & r268185: Add KTR events for the PMAP interface functions. Modified: stable/10/sys/ia64/ia64/machdep.c stable/10/sys/ia64/ia64/pmap.c stable/10/sys/ia64/include/pmap.h Directory Properties: stable/10/ (props changed) Modified: stable/10/sys/ia64/ia64/machdep.c ============================================================================== --- stable/10/sys/ia64/ia64/machdep.c Wed Jul 2 23:47:43 2014 (r268200) +++ stable/10/sys/ia64/ia64/machdep.c Wed Jul 2 23:57:55 2014 (r268201) @@ -133,6 +133,7 @@ SYSCTL_UINT(_hw_freq, OID_AUTO, itc, CTL "ITC frequency"); int cold = 1; +int unmapped_buf_allowed = 0; struct bootinfo *bootinfo; @@ -746,8 +747,8 @@ ia64_init(void) mdlen = md->md_pages * EFI_PAGE_SIZE; switch (md->md_type) { case EFI_MD_TYPE_IOPORT: - ia64_port_base = (uintptr_t)pmap_mapdev(md->md_phys, - mdlen); + ia64_port_base = pmap_mapdev_priv(md->md_phys, + mdlen, VM_MEMATTR_UNCACHEABLE); break; case EFI_MD_TYPE_PALCODE: ia64_pal_base = md->md_phys; Modified: stable/10/sys/ia64/ia64/pmap.c ============================================================================== --- stable/10/sys/ia64/ia64/pmap.c Wed Jul 2 23:47:43 2014 (r268200) +++ stable/10/sys/ia64/ia64/pmap.c Wed Jul 2 23:57:55 2014 (r268201) @@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include @@ -484,6 +485,8 @@ void pmap_page_init(vm_page_t m) { + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + TAILQ_INIT(&m->md.pv_list); m->md.memattr = VM_MEMATTR_DEFAULT; } @@ -497,6 +500,8 @@ void pmap_init(void) { + CTR1(KTR_PMAP, "%s()", __func__); + ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); } @@ -604,13 +609,25 @@ pmap_free_rid(uint32_t rid) * Page table page management routines..... ***************************************************/ +static void +pmap_pinit_common(pmap_t pmap) +{ + int i; + + for (i = 0; i < IA64_VM_MINKERN_REGION; i++) + pmap->pm_rid[i] = pmap_allocate_rid(); + TAILQ_INIT(&pmap->pm_pvchunk); + bzero(&pmap->pm_stats, sizeof pmap->pm_stats); +} + void -pmap_pinit0(struct pmap *pmap) +pmap_pinit0(pmap_t pmap) { + CTR2(KTR_PMAP, "%s(pm=%p)", __func__, pmap); + PMAP_LOCK_INIT(pmap); - /* kernel_pmap is the same as any other pmap. */ - pmap_pinit(pmap); + pmap_pinit_common(pmap); } /* @@ -618,14 +635,12 @@ pmap_pinit0(struct pmap *pmap) * such as one in a vmspace structure. */ int -pmap_pinit(struct pmap *pmap) +pmap_pinit(pmap_t pmap) { - int i; - for (i = 0; i < IA64_VM_MINKERN_REGION; i++) - pmap->pm_rid[i] = pmap_allocate_rid(); - TAILQ_INIT(&pmap->pm_pvchunk); - bzero(&pmap->pm_stats, sizeof pmap->pm_stats); + CTR2(KTR_PMAP, "%s(pm=%p)", __func__, pmap); + + pmap_pinit_common(pmap); return (1); } @@ -643,6 +658,8 @@ pmap_release(pmap_t pmap) { int i; + CTR2(KTR_PMAP, "%s(pm=%p)", __func__, pmap); + for (i = 0; i < IA64_VM_MINKERN_REGION; i++) if (pmap->pm_rid[i]) pmap_free_rid(pmap->pm_rid[i]); @@ -658,6 +675,8 @@ pmap_growkernel(vm_offset_t addr) struct ia64_lpte *leaf; vm_page_t nkpg; + CTR2(KTR_PMAP, "%s(va=%#lx)", __func__, addr); + while (kernel_vm_end <= addr) { if (nkpt == PAGE_SIZE/8 + PAGE_SIZE*PAGE_SIZE/64) panic("%s: out of kernel address space", __func__); @@ -1152,6 +1171,8 @@ pmap_extract(pmap_t pmap, vm_offset_t va pmap_t oldpmap; vm_paddr_t pa; + CTR3(KTR_PMAP, "%s(pm=%p, va=%#lx)", __func__, pmap, va); + pa = 0; PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); @@ -1178,6 +1199,9 @@ pmap_extract_and_hold(pmap_t pmap, vm_of vm_page_t m; vm_paddr_t pa; + CTR4(KTR_PMAP, "%s(pm=%p, va=%#lx, prot=%#x)", __func__, pmap, va, + prot); + pa = 0; m = NULL; PMAP_LOCK(pmap); @@ -1359,6 +1383,8 @@ pmap_kextract(vm_offset_t va) vm_paddr_t pa; u_int idx; + CTR2(KTR_PMAP, "%s(va=%#lx)", __func__, va); + KASSERT(va >= VM_MAXUSER_ADDRESS, ("Must be kernel VA")); /* Regions 6 and 7 are direct mapped. */ @@ -1419,6 +1445,8 @@ pmap_qenter(vm_offset_t va, vm_page_t *m struct ia64_lpte *pte; int i; + CTR4(KTR_PMAP, "%s(va=%#lx, m_p=%p, cnt=%d)", __func__, va, m, count); + for (i = 0; i < count; i++) { pte = pmap_find_kpte(va); if (pmap_present(pte)) @@ -1442,6 +1470,8 @@ pmap_qremove(vm_offset_t va, int count) struct ia64_lpte *pte; int i; + CTR3(KTR_PMAP, "%s(va=%#lx, cnt=%d)", __func__, va, count); + for (i = 0; i < count; i++) { pte = pmap_find_kpte(va); if (pmap_present(pte)) { @@ -1458,10 +1488,12 @@ pmap_qremove(vm_offset_t va, int count) * to not have the PTE reflect that, nor update statistics. */ void -pmap_kenter(vm_offset_t va, vm_offset_t pa) +pmap_kenter(vm_offset_t va, vm_paddr_t pa) { struct ia64_lpte *pte; + CTR3(KTR_PMAP, "%s(va=%#lx, pa=%#lx)", __func__, va, pa); + pte = pmap_find_kpte(va); if (pmap_present(pte)) pmap_invalidate_page(va); @@ -1480,6 +1512,8 @@ pmap_kremove(vm_offset_t va) { struct ia64_lpte *pte; + CTR2(KTR_PMAP, "%s(va=%#lx)", __func__, va); + pte = pmap_find_kpte(va); if (pmap_present(pte)) { pmap_remove_vhpt(va); @@ -1503,6 +1537,10 @@ pmap_kremove(vm_offset_t va) vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { + + CTR5(KTR_PMAP, "%s(va_p=%p, sva=%#lx, eva=%#lx, prot=%#x)", __func__, + virt, start, end, prot); + return IA64_PHYS_TO_RR7(start); } @@ -1522,6 +1560,9 @@ pmap_remove(pmap_t pmap, vm_offset_t sva vm_offset_t va; struct ia64_lpte *pte; + CTR4(KTR_PMAP, "%s(pm=%p, sva=%#lx, eva=%#lx)", __func__, pmap, sva, + eva); + /* * Perform an unsynchronized read. This is, however, safe. */ @@ -1553,13 +1594,14 @@ pmap_remove(pmap_t pmap, vm_offset_t sva * inefficient because they iteratively called * pmap_remove (slow...) */ - void pmap_remove_all(vm_page_t m) { pmap_t oldpmap; pv_entry_t pv; + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); rw_wlock(&pvh_global_lock); @@ -1592,6 +1634,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sv pmap_t oldpmap; struct ia64_lpte *pte; + CTR5(KTR_PMAP, "%s(pm=%p, sva=%#lx, eva=%#lx, prot=%#x)", __func__, + pmap, sva, eva, prot); + if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; @@ -1658,6 +1703,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, struct ia64_lpte *pte; boolean_t icache_inval, managed; + CTR6(KTR_PMAP, "pmap_enter(pm=%p, va=%#lx, acc=%#x, m=%p, prot=%#x, " + "wired=%u)", pmap, va, access, m, prot, wired); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); @@ -1789,6 +1837,9 @@ pmap_enter_object(pmap_t pmap, vm_offset vm_page_t m; vm_pindex_t diff, psize; + CTR6(KTR_PMAP, "%s(pm=%p, sva=%#lx, eva=%#lx, m=%p, prot=%#x)", + __func__, pmap, start, end, m_start, prot); + VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); @@ -1813,12 +1864,14 @@ pmap_enter_object(pmap_t pmap, vm_offset * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ - void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { pmap_t oldpmap; + CTR5(KTR_PMAP, "%s(pm=%p, va=%#lx, m=%p, prot=%#x)", __func__, pmap, + va, m, prot); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); @@ -1876,11 +1929,13 @@ pmap_enter_quick_locked(pmap_t pmap, vm_ * faults on process startup and immediately after an mmap. */ void -pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, - vm_object_t object, vm_pindex_t pindex, - vm_size_t size) +pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, + vm_pindex_t pindex, vm_size_t size) { + CTR6(KTR_PMAP, "%s(pm=%p, va=%#lx, obj=%p, idx=%lu, sz=%#lx)", + __func__, pmap, addr, object, pindex, size); + VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("pmap_object_init_pt: non-device object")); @@ -1894,14 +1949,14 @@ pmap_object_init_pt(pmap_t pmap, vm_offs * The mapping must already exist in the pmap. */ void -pmap_change_wiring(pmap, va, wired) - register pmap_t pmap; - vm_offset_t va; - boolean_t wired; +pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) { pmap_t oldpmap; struct ia64_lpte *pte; + CTR4(KTR_PMAP, "%s(pm=%p, va=%#lx, wired=%u)", __func__, pmap, va, + wired); + PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); @@ -1919,8 +1974,6 @@ pmap_change_wiring(pmap, va, wired) PMAP_UNLOCK(pmap); } - - /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len @@ -1928,30 +1981,31 @@ pmap_change_wiring(pmap, va, wired) * * This routine is only advisory and need not do anything. */ - void -pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, - vm_offset_t src_addr) +pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_va, vm_size_t len, + vm_offset_t src_va) { -} + CTR6(KTR_PMAP, "%s(dpm=%p, spm=%p, dva=%#lx, sz=%#lx, sva=%#lx)", + __func__, dst_pmap, src_pmap, dst_va, len, src_va); +} /* * pmap_zero_page zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. */ - void pmap_zero_page(vm_page_t m) { void *p; + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + p = (void *)pmap_page_to_va(m); bzero(p, PAGE_SIZE); } - /* * pmap_zero_page_area zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear @@ -1959,33 +2013,33 @@ pmap_zero_page(vm_page_t m) * * off and size must reside within a single page. */ - void pmap_zero_page_area(vm_page_t m, int off, int size) { char *p; + CTR4(KTR_PMAP, "%s(m=%p, ofs=%d, len=%d)", __func__, m, off, size); + p = (void *)pmap_page_to_va(m); bzero(p + off, size); } - /* * pmap_zero_page_idle zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. This is for the vm_idlezero process. */ - void pmap_zero_page_idle(vm_page_t m) { void *p; + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + p = (void *)pmap_page_to_va(m); bzero(p, PAGE_SIZE); } - /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using @@ -1997,13 +2051,13 @@ pmap_copy_page(vm_page_t msrc, vm_page_t { void *dst, *src; + CTR3(KTR_PMAP, "%s(sm=%p, dm=%p)", __func__, msrc, mdst); + src = (void *)pmap_page_to_va(msrc); dst = (void *)pmap_page_to_va(mdst); bcopy(src, dst, PAGE_SIZE); } -int unmapped_buf_allowed; - void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize) @@ -2012,6 +2066,9 @@ pmap_copy_pages(vm_page_t ma[], vm_offse vm_offset_t a_pg_offset, b_pg_offset; int cnt; + CTR6(KTR_PMAP, "%s(m0=%p, va0=%#lx, m1=%p, va1=%#lx, sz=%#x)", + __func__, ma, a_offset, mb, b_offset, xfersize); + while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); @@ -2042,6 +2099,8 @@ pmap_page_exists_quick(pmap_t pmap, vm_p int loops = 0; boolean_t rv; + CTR3(KTR_PMAP, "%s(pm=%p, m=%p)", __func__, pmap, m); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; @@ -2073,6 +2132,8 @@ pmap_page_wired_mappings(vm_page_t m) pv_entry_t pv; int count; + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); @@ -2112,6 +2173,8 @@ pmap_remove_pages(pmap_t pmap) u_long inuse, bitmask; int allfree, bit, field, idx; + CTR2(KTR_PMAP, "%s(pm=%p)", __func__, pmap); + rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); @@ -2179,6 +2242,8 @@ pmap_ts_referenced(vm_page_t m) pv_entry_t pv; int count = 0; + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); rw_wlock(&pvh_global_lock); @@ -2214,6 +2279,8 @@ pmap_is_modified(vm_page_t m) pv_entry_t pv; boolean_t rv; + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); rv = FALSE; @@ -2254,6 +2321,8 @@ pmap_is_prefaultable(pmap_t pmap, vm_off { struct ia64_lpte *pte; + CTR3(KTR_PMAP, "%s(pm=%p, va=%#lx)", __func__, pmap, addr); + pte = pmap_find_vhpt(addr); if (pte != NULL && pmap_present(pte)) return (FALSE); @@ -2274,6 +2343,8 @@ pmap_is_referenced(vm_page_t m) pv_entry_t pv; boolean_t rv; + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); rv = FALSE; @@ -2306,6 +2377,9 @@ pmap_advise(pmap_t pmap, vm_offset_t sva pmap_t oldpmap; vm_page_t m; + CTR5(KTR_PMAP, "%s(pm=%p, sva=%#lx, eva=%#lx, adv=%d)", __func__, + pmap, sva, eva, advice); + PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); for (; sva < eva; sva += PAGE_SIZE) { @@ -2348,6 +2422,8 @@ pmap_clear_modify(vm_page_t m) pmap_t oldpmap, pmap; pv_entry_t pv; + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); @@ -2389,6 +2465,8 @@ pmap_remove_write(vm_page_t m) pv_entry_t pv; vm_prot_t prot; + CTR2(KTR_PMAP, "%s(m=%p)", __func__, m); + KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); @@ -2425,20 +2503,13 @@ pmap_remove_write(vm_page_t m) rw_wunlock(&pvh_global_lock); } -/* - * Map a set of physical memory pages into the kernel virtual - * address space. Return a pointer to where it is mapped. This - * routine is intended to be used for mapping device memory, - * NOT real memory. - */ -void * -pmap_mapdev(vm_paddr_t pa, vm_size_t sz) +vm_offset_t +pmap_mapdev_priv(vm_paddr_t pa, vm_size_t sz, vm_memattr_t attr) { - static void *last_va = NULL; - static vm_paddr_t last_pa = 0; + static vm_offset_t last_va = 0; + static vm_paddr_t last_pa = ~0UL; static vm_size_t last_sz = 0; struct efi_md *md; - vm_offset_t va; if (pa == last_pa && sz == last_sz) return (last_va); @@ -2447,30 +2518,48 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t sz) if (md == NULL) { printf("%s: [%#lx..%#lx] not covered by memory descriptor\n", __func__, pa, pa + sz - 1); - return ((void *)IA64_PHYS_TO_RR6(pa)); + return (IA64_PHYS_TO_RR6(pa)); } if (md->md_type == EFI_MD_TYPE_FREE) { printf("%s: [%#lx..%#lx] is in DRAM\n", __func__, pa, pa + sz - 1); - return (NULL); + return (0); } - va = (md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(pa) : + last_va = (md->md_attr & EFI_MD_ATTR_WB) ? IA64_PHYS_TO_RR7(pa) : IA64_PHYS_TO_RR6(pa); - - last_va = (void *)va; last_pa = pa; last_sz = sz; return (last_va); } /* - * 'Unmap' a range mapped by pmap_mapdev(). + * Map a set of physical memory pages into the kernel virtual + * address space. Return a pointer to where it is mapped. This + * routine is intended to be used for mapping device memory, + * NOT real memory. + */ +void * +pmap_mapdev_attr(vm_paddr_t pa, vm_size_t sz, vm_memattr_t attr) +{ + vm_offset_t va; + + CTR4(KTR_PMAP, "%s(pa=%#lx, sz=%#lx, attr=%#x)", __func__, pa, sz, + attr); + + va = pmap_mapdev_priv(pa, sz, attr); + return ((void *)(uintptr_t)va); +} + +/* + * 'Unmap' a range mapped by pmap_mapdev_attr(). */ void pmap_unmapdev(vm_offset_t va, vm_size_t size) { + + CTR3(KTR_PMAP, "%s(va=%#lx, sz=%#lx)", __func__, va, size); } /* @@ -2496,6 +2585,8 @@ pmap_page_set_memattr(vm_page_t m, vm_me pv_entry_t pv; void *va; + CTR3(KTR_PMAP, "%s(m=%p, attr=%#x)", __func__, m, ma); + rw_wlock(&pvh_global_lock); m->md.memattr = ma; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -2542,6 +2633,9 @@ pmap_mincore(pmap_t pmap, vm_offset_t ad vm_paddr_t pa; int val; + CTR4(KTR_PMAP, "%s(pm=%p, va=%#lx, pa_p=%p)", __func__, pmap, addr, + locked_pa); + PMAP_LOCK(pmap); retry: oldpmap = pmap_switch(pmap); @@ -2574,9 +2668,15 @@ out: return (val); } +/* + * + */ void pmap_activate(struct thread *td) { + + CTR2(KTR_PMAP, "%s(td=%p)", __func__, td); + pmap_switch(vmspace_pmap(td->td_proc->p_vmspace)); } @@ -2609,6 +2709,9 @@ out: return (prevpm); } +/* + * + */ void pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) { @@ -2617,6 +2720,8 @@ pmap_sync_icache(pmap_t pm, vm_offset_t vm_offset_t lim; vm_size_t len; + CTR4(KTR_PMAP, "%s(pm=%p, va=%#lx, sz=%#lx)", __func__, pm, va, sz); + sz += va & 31; va &= ~31; sz = (sz + 31) & ~31; @@ -2644,6 +2749,9 @@ void pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size) { + + CTR5(KTR_PMAP, "%s(obj=%p, ofs=%#lx, va_p=%p, sz=%#lx)", __func__, + object, offset, addr, size); } #include "opt_ddb.h" Modified: stable/10/sys/ia64/include/pmap.h ============================================================================== --- stable/10/sys/ia64/include/pmap.h Wed Jul 2 23:47:43 2014 (r268200) +++ stable/10/sys/ia64/include/pmap.h Wed Jul 2 23:57:55 2014 (r268201) @@ -114,24 +114,29 @@ extern vm_offset_t virtual_end; extern uint64_t pmap_vhpt_base[]; extern int pmap_vhpt_log2size; +#define pmap_mapbios(pa,sz) pmap_mapdev_attr(pa,sz,VM_MEMATTR_UNCACHEABLE) +#define pmap_mapdev(pa,sz) pmap_mapdev_attr(pa,sz,VM_MEMATTR_UNCACHEABLE) +#define pmap_unmapbios(va,sz) pmap_unmapdev(va,sz) + #define pmap_page_get_memattr(m) ((m)->md.memattr) -#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) +#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) -#define pmap_mapbios(pa, sz) pmap_mapdev(pa, sz) -#define pmap_unmapbios(va, sz) pmap_unmapdev(va, sz) -vm_offset_t pmap_alloc_vhpt(void); -void pmap_bootstrap(void); -void pmap_invalidate_all(void); -void pmap_kenter(vm_offset_t va, vm_offset_t pa); +void pmap_kenter(vm_offset_t va, vm_paddr_t pa); vm_paddr_t pmap_kextract(vm_offset_t va); void pmap_kremove(vm_offset_t); -void *pmap_mapdev(vm_paddr_t, vm_size_t); +void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); void pmap_page_set_memattr(vm_page_t, vm_memattr_t); +void pmap_unmapdev(vm_offset_t, vm_size_t); + +/* Machine-architecture private */ +vm_offset_t pmap_alloc_vhpt(void); +void pmap_bootstrap(void); +void pmap_invalidate_all(void); +vm_offset_t pmap_mapdev_priv(vm_paddr_t, vm_size_t, vm_memattr_t); vm_offset_t pmap_page_to_va(vm_page_t); vm_offset_t pmap_steal_memory(vm_size_t); struct pmap *pmap_switch(struct pmap *pmap); -void pmap_unmapdev(vm_offset_t, vm_size_t); #endif /* _KERNEL */