From owner-svn-src-all@FreeBSD.ORG Sat Aug 2 22:25:25 2014 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) (using TLSv1 with cipher ADH-AES256-SHA (256/256 bits)) (No client certificate requested) by hub.freebsd.org (Postfix) with ESMTPS id 0080D81C; Sat, 2 Aug 2014 22:25:24 +0000 (UTC) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:1900:2254:2068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id E0E4A25DE; Sat, 2 Aug 2014 22:25:24 +0000 (UTC) Received: from svn.freebsd.org ([127.0.1.70]) by svn.freebsd.org (8.14.9/8.14.9) with ESMTP id s72MPOFU016651; Sat, 2 Aug 2014 22:25:24 GMT (envelope-from marcel@svn.freebsd.org) Received: (from marcel@localhost) by svn.freebsd.org (8.14.9/8.14.9/Submit) id s72MPOsB016650; Sat, 2 Aug 2014 22:25:24 GMT (envelope-from marcel@svn.freebsd.org) Message-Id: <201408022225.s72MPOsB016650@svn.freebsd.org> From: Marcel Moolenaar Date: Sat, 2 Aug 2014 22:25:24 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-stable@freebsd.org, svn-src-stable-10@freebsd.org Subject: svn commit: r269449 - stable/10/lib/libkvm X-SVN-Group: stable-10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 02 Aug 2014 22:25:25 -0000 Author: marcel Date: Sat Aug 2 22:25:24 2014 New Revision: 269449 URL: http://svnweb.freebsd.org/changeset/base/269449 Log: MFC 259910, 260023, 260028, 260600 & 260701: o Fix "kptdir is itself virtual" error, caused by having the kptdir in PBVM. o Allow building a cross libkvm for ia64. o Add support for virtual cores (aka minidumps). o We don't have to worry about page sizes when working on virtual cores. o Handle truncation of the size returned by _kvm_kvatop(). Modified: stable/10/lib/libkvm/kvm_ia64.c Directory Properties: stable/10/ (props changed) Modified: stable/10/lib/libkvm/kvm_ia64.c ============================================================================== --- stable/10/lib/libkvm/kvm_ia64.c Sat Aug 2 21:36:40 2014 (r269448) +++ stable/10/lib/libkvm/kvm_ia64.c Sat Aug 2 22:25:24 2014 (r269449) @@ -32,12 +32,21 @@ #include #include +#ifndef CROSS_LIBKVM #include #include +#include #include +#else +#include "../../sys/ia64/include/atomic.h" +#include "../../sys/ia64/include/bootinfo.h" +#include "../../sys/ia64/include/elf.h" +#include "../../sys/ia64/include/pte.h" +#endif #include #include +#include #include #include @@ -55,6 +64,8 @@ #define PBVM_BASE 0x9ffc000000000000UL #define PBVM_PGSZ (64 * 1024) +typedef size_t (a2p_f)(kvm_t *, uint64_t, off_t *); + struct vmstate { void *mmapbase; size_t mmapsize; @@ -62,6 +73,7 @@ struct vmstate { u_long kptdir; u_long *pbvm_pgtbl; u_int pbvm_pgtblsz; + a2p_f *kvatop; }; /* @@ -70,7 +82,7 @@ struct vmstate { * set of headers. */ static int -_kvm_maphdrs(kvm_t *kd, size_t sz) +ia64_maphdrs(kvm_t *kd, size_t sz) { struct vmstate *vm = kd->vmst; @@ -91,38 +103,103 @@ _kvm_maphdrs(kvm_t *kd, size_t sz) } /* - * Translate a physical memory address to a file-offset in the crash-dump. + * Physical core support. */ + static size_t -_kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) +phys_addr2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz) { - Elf64_Ehdr *e = kd->vmst->mmapbase; - Elf64_Phdr *p = (Elf64_Phdr*)((char*)e + e->e_phoff); - int n = e->e_phnum; - - if (pa != REGION_ADDR(pa)) { - _kvm_err(kd, kd->program, "internal error"); - return (0); - } - + Elf64_Ehdr *e; + Elf64_Phdr *p; + int n; + + if (pa != REGION_ADDR(pa)) + goto fail; + + e = (Elf64_Ehdr *)(kd->vmst->mmapbase); + n = e->e_phnum; + p = (Elf64_Phdr *)(void *)((uintptr_t)(void *)e + e->e_phoff); while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz)) p++, n--; if (n == 0) - return (0); + goto fail; *ofs = (pa - p->p_paddr) + p->p_offset; if (pgsz == 0) return (p->p_memsz - (pa - p->p_paddr)); return (pgsz - ((size_t)pa & (pgsz - 1))); + + fail: + _kvm_err(kd, kd->program, "invalid physical address %#jx", + (uintmax_t)pa); + return (0); +} + +static size_t +phys_kvatop(kvm_t *kd, uint64_t va, off_t *ofs) +{ + struct ia64_lpte pte; + uint64_t pa, pgaddr, pt0addr, pt1addr; + size_t pgno, pgsz, pt0no, pt1no; + + if (va >= REGION_BASE(6)) { + /* Regions 6 and 7: direct mapped. */ + pa = REGION_ADDR(va); + return (phys_addr2off(kd, pa, ofs, 0)); + } else if (va >= REGION_BASE(5)) { + /* Region 5: Kernel Virtual Memory. */ + va = REGION_ADDR(va); + pgsz = kd->vmst->pagesize; + pt0no = KPTE_DIR0_INDEX(va, pgsz); + pt1no = KPTE_DIR1_INDEX(va, pgsz); + pgno = KPTE_PTE_INDEX(va, pgsz); + if (pt0no >= NKPTEDIR(pgsz)) + goto fail; + pt0addr = kd->vmst->kptdir + (pt0no << 3); + if (kvm_read(kd, pt0addr, &pt1addr, 8) != 8) + goto fail; + if (pt1addr == 0) + goto fail; + pt1addr += pt1no << 3; + if (kvm_read(kd, pt1addr, &pgaddr, 8) != 8) + goto fail; + if (pgaddr == 0) + goto fail; + pgaddr += pgno * sizeof(pte); + if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte)) + goto fail; + if (!(pte.pte & PTE_PRESENT)) + goto fail; + pa = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1)); + return (phys_addr2off(kd, pa, ofs, pgsz)); + } else if (va >= PBVM_BASE) { + /* Region 4: Pre-Boot Virtual Memory (PBVM). */ + va -= PBVM_BASE; + pgsz = PBVM_PGSZ; + pt0no = va / pgsz; + if (pt0no >= (kd->vmst->pbvm_pgtblsz >> 3)) + goto fail; + pt0addr = kd->vmst->pbvm_pgtbl[pt0no]; + if (!(pt0addr & PTE_PRESENT)) + goto fail; + pa = (pt0addr & PTE_PPN_MASK) + va % pgsz; + return (phys_addr2off(kd, pa, ofs, pgsz)); + } + + fail: + _kvm_err(kd, kd->program, "invalid kernel virtual address %#jx", + (uintmax_t)va); + *ofs = -1; + return (0); } static ssize_t -_kvm_read_phys(kvm_t *kd, uint64_t pa, void *buf, size_t bufsz) +phys_read(kvm_t *kd, uint64_t pa, void *buf, size_t bufsz) { off_t ofs; size_t sz; - sz = _kvm_pa2off(kd, pa, &ofs, 0); + sz = phys_addr2off(kd, pa, &ofs, 0); if (sz < bufsz) return ((ssize_t)sz); @@ -131,6 +208,50 @@ _kvm_read_phys(kvm_t *kd, uint64_t pa, v return (read(kd->pmfd, buf, bufsz)); } +/* + * Virtual core support (aka minidump). + */ + +static size_t +virt_addr2off(kvm_t *kd, uint64_t va, off_t *ofs, size_t pgsz) +{ + Elf64_Ehdr *e; + Elf64_Phdr *p; + int n; + + if (va < REGION_BASE(4)) + goto fail; + + e = (Elf64_Ehdr *)(kd->vmst->mmapbase); + n = e->e_phnum; + p = (Elf64_Phdr *)(void *)((uintptr_t)(void *)e + e->e_phoff); + while (n && (va < p->p_vaddr || va >= p->p_vaddr + p->p_memsz)) + p++, n--; + if (n == 0) + goto fail; + + *ofs = (va - p->p_vaddr) + p->p_offset; + if (pgsz == 0) + return (p->p_memsz - (va - p->p_vaddr)); + return (pgsz - ((size_t)va & (pgsz - 1))); + + fail: + _kvm_err(kd, kd->program, "invalid virtual address %#jx", + (uintmax_t)va); + return (0); +} + +static size_t +virt_kvatop(kvm_t *kd, uint64_t va, off_t *ofs) +{ + + return (virt_addr2off(kd, va, ofs, 0)); +} + +/* + * KVM architecture support functions. + */ + void _kvm_freevtop(kvm_t *kd) { @@ -160,27 +281,37 @@ _kvm_initvtop(kvm_t *kd) return (-1); } +#ifndef CROSS_LIBKVM kd->vmst->pagesize = getpagesize(); +#else + kd->vmst->pagesize = 8192; +#endif - if (_kvm_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1) + if (ia64_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1) return (-1); ehdr = kd->vmst->mmapbase; hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum; - if (_kvm_maphdrs(kd, hdrsz) == -1) + if (ia64_maphdrs(kd, hdrsz) == -1) return (-1); + kd->vmst->kvatop = (ehdr->e_flags & EF_IA_64_ABSOLUTE) ? + phys_kvatop : virt_kvatop; + /* * Load the PBVM page table. We need this to resolve PBVM addresses. * The PBVM page table is obtained from the bootinfo structure, of - * which the physical address is given to us in e_entry. If e_entry - * is 0, then this is assumed to be a pre-PBVM kernel. + * which the address is given to us in e_entry. If e_entry is 0, then + * this is assumed to be a pre-PBVM kernel. + * Note that the address of the bootinfo structure is either physical + * or virtual, depending on whether the core is physical or virtual. */ - if (ehdr->e_entry != 0) { - sz = _kvm_read_phys(kd, ehdr->e_entry, &bi, sizeof(bi)); + if (ehdr->e_entry != 0 && (ehdr->e_flags & EF_IA_64_ABSOLUTE) != 0) { + sz = phys_read(kd, ehdr->e_entry, &bi, sizeof(bi)); if (sz != sizeof(bi)) { _kvm_err(kd, kd->program, - "cannot read bootinfo from PA %#lx", ehdr->e_entry); + "cannot read bootinfo at physical address %#jx", + (uintmax_t)ehdr->e_entry); return (-1); } if (bi.bi_magic != BOOTINFO_MAGIC) { @@ -193,12 +324,12 @@ _kvm_initvtop(kvm_t *kd) return (-1); } kd->vmst->pbvm_pgtblsz = bi.bi_pbvm_pgtblsz; - sz = _kvm_read_phys(kd, bi.bi_pbvm_pgtbl, kd->vmst->pbvm_pgtbl, + sz = phys_read(kd, bi.bi_pbvm_pgtbl, kd->vmst->pbvm_pgtbl, bi.bi_pbvm_pgtblsz); if (sz != bi.bi_pbvm_pgtblsz) { _kvm_err(kd, kd->program, - "cannot read page table from PA %#lx", - bi.bi_pbvm_pgtbl); + "cannot read page table at physical address %#jx", + (uintmax_t)bi.bi_pbvm_pgtbl); return (-1); } } else { @@ -225,7 +356,7 @@ _kvm_initvtop(kvm_t *kd) return (-1); } - if (va < REGION_BASE(6)) { + if (va == REGION_BASE(5)) { _kvm_err(kd, kd->program, "kptdir is itself virtual"); return (-1); } @@ -237,56 +368,8 @@ _kvm_initvtop(kvm_t *kd) int _kvm_kvatop(kvm_t *kd, u_long va, off_t *ofs) { - struct ia64_lpte pte; - uint64_t pa, pgaddr, pt0addr, pt1addr; - size_t pgno, pgsz, pt0no, pt1no; - - if (va >= REGION_BASE(6)) { - /* Regions 6 and 7: direct mapped. */ - pa = REGION_ADDR(va); - return (_kvm_pa2off(kd, pa, ofs, 0)); - } else if (va >= REGION_BASE(5)) { - /* Region 5: Kernel Virtual Memory. */ - va = REGION_ADDR(va); - pgsz = kd->vmst->pagesize; - pt0no = KPTE_DIR0_INDEX(va, pgsz); - pt1no = KPTE_DIR1_INDEX(va, pgsz); - pgno = KPTE_PTE_INDEX(va, pgsz); - if (pt0no >= NKPTEDIR(pgsz)) - goto fail; - pt0addr = kd->vmst->kptdir + (pt0no << 3); - if (kvm_read(kd, pt0addr, &pt1addr, 8) != 8) - goto fail; - if (pt1addr == 0) - goto fail; - pt1addr += pt1no << 3; - if (kvm_read(kd, pt1addr, &pgaddr, 8) != 8) - goto fail; - if (pgaddr == 0) - goto fail; - pgaddr += pgno * sizeof(pte); - if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte)) - goto fail; - if (!(pte.pte & PTE_PRESENT)) - goto fail; - pa = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1)); - return (_kvm_pa2off(kd, pa, ofs, pgsz)); - } else if (va >= PBVM_BASE) { - /* Region 4: Pre-Boot Virtual Memory (PBVM). */ - va -= PBVM_BASE; - pgsz = PBVM_PGSZ; - pt0no = va / pgsz; - if (pt0no >= (kd->vmst->pbvm_pgtblsz >> 3)) - goto fail; - pt0addr = kd->vmst->pbvm_pgtbl[pt0no]; - if (!(pt0addr & PTE_PRESENT)) - goto fail; - pa = (pt0addr & PTE_PPN_MASK) + va % pgsz; - return (_kvm_pa2off(kd, pa, ofs, pgsz)); - } + size_t sz; - fail: - _kvm_err(kd, kd->program, "invalid kernel virtual address"); - *ofs = ~0UL; - return (0); + sz = kd->vmst->kvatop(kd, va, ofs); + return ((sz > INT_MAX) ? INT_MAX : sz); }