Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 6 Aug 2011 15:59:54 +0000 (UTC)
From:      Marcel Moolenaar <marcel@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r224680 - head/lib/libkvm
Message-ID:  <201108061559.p76FxsAT057902@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: marcel
Date: Sat Aug  6 15:59:54 2011
New Revision: 224680
URL: http://svn.freebsd.org/changeset/base/224680

Log:
  Add support for PBVM addresses. In a nutshell this means:
  o   get the physical address and size of the PBVM page table. This
      can be found in the bootinfo structure, of which the physical
      address is recorded as the ELF entry point.
  o   translate region 4 virtual addresses to physical addresses using
      the PBVM page table.
  
  In _kvm_kvatop() make the distinction between physical address and
  core file offset a little clearer to avoid confusion. To further
  enhance readability, always store the translated address into pa
  so that it's obvious how the translation from va to pa happened.
  
  Approved by:	re (blanket)

Modified:
  head/lib/libkvm/kvm_ia64.c

Modified: head/lib/libkvm/kvm_ia64.c
==============================================================================
--- head/lib/libkvm/kvm_ia64.c	Sat Aug  6 14:25:11 2011	(r224679)
+++ head/lib/libkvm/kvm_ia64.c	Sat Aug  6 15:59:54 2011	(r224680)
@@ -33,6 +33,7 @@
 #include <sys/mman.h>
 
 #include <machine/atomic.h>
+#include <machine/bootinfo.h>
 #include <machine/pte.h>
 
 #include <kvm.h>
@@ -51,11 +52,16 @@
 #define	KPTE_DIR0_INDEX(va,ps)	((((va)/(ps)) / NKPTEPG(ps)) / NKPTEDIR(ps))
 #define	KPTE_DIR1_INDEX(va,ps)	((((va)/(ps)) / NKPTEPG(ps)) % NKPTEDIR(ps))
 
+#define	PBVM_BASE		0x9ffc000000000000UL
+#define	PBVM_PGSZ		(64 * 1024)
+
 struct vmstate {
 	void	*mmapbase;
 	size_t	mmapsize;
 	size_t	pagesize;
 	u_long	kptdir;
+	u_long	*pbvm_pgtbl;
+	u_int	pbvm_pgtblsz;
 };
 
 /*
@@ -110,11 +116,28 @@ _kvm_pa2off(kvm_t *kd, uint64_t pa, off_
 	return (pgsz - ((size_t)pa & (pgsz - 1)));
 }
 
+static ssize_t
+_kvm_read_phys(kvm_t *kd, uint64_t pa, void *buf, size_t bufsz)
+{
+	off_t ofs;
+	size_t sz;
+
+	sz = _kvm_pa2off(kd, pa, &ofs, 0);
+	if (sz < bufsz)
+		return ((ssize_t)sz);
+
+	if (lseek(kd->pmfd, ofs, 0) == -1)
+		return (-1);
+	return (read(kd->pmfd, buf, bufsz));
+}
+
 void
 _kvm_freevtop(kvm_t *kd)
 {
 	struct vmstate *vm = kd->vmst;
 
+	if (vm->pbvm_pgtbl != NULL)
+		free(vm->pbvm_pgtbl);
 	if (vm->mmapbase != NULL)
 		munmap(vm->mmapbase, vm->mmapsize);
 	free(vm);
@@ -124,10 +147,12 @@ _kvm_freevtop(kvm_t *kd)
 int
 _kvm_initvtop(kvm_t *kd)
 {
+	struct bootinfo bi;
 	struct nlist nl[2];
 	uint64_t va;
 	Elf64_Ehdr *ehdr;
 	size_t hdrsz;
+	ssize_t sz;
 
 	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
 	if (kd->vmst == NULL) {
@@ -146,6 +171,42 @@ _kvm_initvtop(kvm_t *kd)
 		return (-1);
 
 	/*
+	 * Load the PBVM page table. We need this to resolve PBVM addresses.
+	 * The PBVM page table is obtained from the bootinfo structure, of
+	 * which the physical address is given to us in e_entry. If e_entry
+	 * is 0, then this is assumed to be a pre-PBVM kernel.
+	 */
+	if (ehdr->e_entry != 0) {
+		sz = _kvm_read_phys(kd, ehdr->e_entry, &bi, sizeof(bi));
+		if (sz != sizeof(bi)) {
+			_kvm_err(kd, kd->program,
+			    "cannot read bootinfo from PA %#lx", ehdr->e_entry);
+			return (-1);
+		}
+		if (bi.bi_magic != BOOTINFO_MAGIC) {
+			_kvm_err(kd, kd->program, "invalid bootinfo");
+			return (-1);
+		}
+		kd->vmst->pbvm_pgtbl = _kvm_malloc(kd, bi.bi_pbvm_pgtblsz);
+		if (kd->vmst->pbvm_pgtbl == NULL) {
+			_kvm_err(kd, kd->program, "cannot allocate page table");
+			return (-1);
+		}
+		kd->vmst->pbvm_pgtblsz = bi.bi_pbvm_pgtblsz;
+		sz = _kvm_read_phys(kd, bi.bi_pbvm_pgtbl, kd->vmst->pbvm_pgtbl,
+		    bi.bi_pbvm_pgtblsz);
+		if (sz != bi.bi_pbvm_pgtblsz) {
+			_kvm_err(kd, kd->program,
+			    "cannot read page table from PA %#lx",
+			    bi.bi_pbvm_pgtbl);
+			return (-1);
+		}
+	} else {
+		kd->vmst->pbvm_pgtbl = NULL;
+		kd->vmst->pbvm_pgtblsz = 0;
+	}
+
+	/*
 	 * At this point we've got enough information to use kvm_read() for
 	 * direct mapped (ie region 6 and region 7) address, such as symbol
 	 * addresses/values.
@@ -174,17 +235,18 @@ _kvm_initvtop(kvm_t *kd)
 }
 
 int
-_kvm_kvatop(kvm_t *kd, u_long va, off_t *pa)
+_kvm_kvatop(kvm_t *kd, u_long va, off_t *ofs)
 {
 	struct ia64_lpte pte;
-	uint64_t pgaddr, pt0addr, pt1addr;
+	uint64_t pa, pgaddr, pt0addr, pt1addr;
 	size_t pgno, pgsz, pt0no, pt1no;
 
 	if (va >= REGION_BASE(6)) {
 		/* Regions 6 and 7: direct mapped. */
-		return (_kvm_pa2off(kd, REGION_ADDR(va), pa, 0));
+		pa = REGION_ADDR(va);
+		return (_kvm_pa2off(kd, pa, ofs, 0));
 	} else if (va >= REGION_BASE(5)) {
-		/* Region 5: virtual. */
+		/* Region 5: Kernel Virtual Memory. */
 		va = REGION_ADDR(va);
 		pgsz = kd->vmst->pagesize;
 		pt0no = KPTE_DIR0_INDEX(va, pgsz);
@@ -207,12 +269,24 @@ _kvm_kvatop(kvm_t *kd, u_long va, off_t 
 			goto fail;
 		if (!(pte.pte & PTE_PRESENT))
 			goto fail;
-		va = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1));
-		return (_kvm_pa2off(kd, va, pa, pgsz));
+		pa = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1));
+		return (_kvm_pa2off(kd, pa, ofs, pgsz));
+	} else if (va >= PBVM_BASE) {
+		/* Region 4: Pre-Boot Virtual Memory (PBVM). */
+		va -= PBVM_BASE;
+		pgsz = PBVM_PGSZ;
+		pt0no = va / pgsz;
+		if (pt0no >= (kd->vmst->pbvm_pgtblsz >> 3))
+			goto fail;
+		pt0addr = kd->vmst->pbvm_pgtbl[pt0no];
+		if (!(pt0addr & PTE_PRESENT))
+			goto fail;
+		pa = (pt0addr & PTE_PPN_MASK) + va % pgsz;
+		return (_kvm_pa2off(kd, pa, ofs, pgsz));
 	}
 
  fail:
 	_kvm_err(kd, kd->program, "invalid kernel virtual address");
-	*pa = ~0UL;
+	*ofs = ~0UL;
 	return (0);
 }



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201108061559.p76FxsAT057902>