Skip site navigation (1)Skip section navigation (2)
Date:      Tue, 27 Nov 2012 21:45:42 +0000 (UTC)
From:      "Cherry G. Mathew" <cherry@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r243633 - in projects/amd64_xen_pv/sys: amd64/xen conf
Message-ID:  <201211272145.qARLjgoi065808@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: cherry
Date: Tue Nov 27 21:45:42 2012
New Revision: 243633
URL: http://svnweb.freebsd.org/changeset/base/243633

Log:
  Separate out the pv code from pmap.c into a separate set of files.
  
   - Use the pv code to keep track of kernel_pmap P->V mappings.
   - Switch to post-boot PTOV(), VTOP() backends for mmu_map.[ch]
  
  This commit gets us along a bit further into the boot path.
  
  Approved by: gibbs(implicit)

Added:
  projects/amd64_xen_pv/sys/amd64/xen/pmap_pv.c
  projects/amd64_xen_pv/sys/amd64/xen/pmap_pv.h
Modified:
  projects/amd64_xen_pv/sys/amd64/xen/pmap.c
  projects/amd64_xen_pv/sys/conf/files.amd64

Modified: projects/amd64_xen_pv/sys/amd64/xen/pmap.c
==============================================================================
--- projects/amd64_xen_pv/sys/amd64/xen/pmap.c	Tue Nov 27 21:27:12 2012	(r243632)
+++ projects/amd64_xen_pv/sys/amd64/xen/pmap.c	Tue Nov 27 21:45:42 2012	(r243633)
@@ -147,6 +147,7 @@ __FBSDID("$FreeBSD$");
 #include <machine/xen/xenvar.h>
 
 #include <amd64/xen/mmu_map.h>
+#include <amd64/xen/pmap_pv.h>
 
 extern vm_offset_t pa_index; /* from machdep.c */
 extern unsigned long physfree; /* from machdep.c */
@@ -187,14 +188,8 @@ static uma_zone_t xen_pagezone;
 static size_t tsz; /* mmu_map.h opaque cookie size */
 static uintptr_t (*ptmb_mappedalloc)(void) = NULL;
 static void (*ptmb_mappedfree)(uintptr_t) = NULL;
-static uintptr_t ptmb_ptov(vm_paddr_t p)
-{
-	return PTOV(p);
-}
-static vm_paddr_t ptmb_vtop(uintptr_t v)
-{
-	return VTOP(v);
-}
+static uintptr_t (*ptmb_ptov)(vm_paddr_t) = NULL;
+static vm_paddr_t (*ptmb_vtop)(uintptr_t) = NULL;
 
 extern uint64_t xenstack; /* The stack Xen gives us at boot */
 extern char *console_page; /* The shared ring for console i/o */
@@ -455,10 +450,21 @@ pmap_xen_bootpages(vm_paddr_t *firstaddr
 	va = vallocpages(firstaddr, 1);
 	PT_SET_MA(va, ma | PG_RW | PG_V | PG_U);
 
-
 	HYPERVISOR_shared_info = (void *) va;
 }
 
+/* Boot time ptov - xen guarantees bootpages to be offset */
+static uintptr_t boot_ptov(vm_paddr_t p)
+{
+	return PTOV(p);
+}
+
+/* Boot time vtop - xen guarantees bootpages to be offset */
+static vm_paddr_t boot_vtop(uintptr_t v)
+{
+	return VTOP(v);
+}
+
 /* alloc from linear mapped boot time virtual address space */
 static uintptr_t
 mmu_alloc(void)
@@ -486,6 +492,8 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
 	/* setup mmu_map backend function pointers for boot */
 	ptmb_mappedalloc = mmu_alloc;
 	ptmb_mappedfree = NULL;
+	ptmb_ptov = boot_ptov;
+	ptmb_vtop = boot_vtop;
 
 	create_boot_pagetables(firstaddr);
 
@@ -509,7 +517,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
 
 	dump_avail[pa_index + 1] = phys_avail[pa_index] = VTOP(xen_start_info->pt_base);
 	dump_avail[pa_index + 2] = phys_avail[pa_index + 1] = phys_avail[pa_index] +
-		ptoa(xen_start_info->nr_pt_frames - 1);
+		ptoa(xen_start_info->nr_pt_frames);
 	pa_index += 2;
 
 	/* Map in Xen related pages into VA space */
@@ -538,7 +546,8 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
 	kernel_pmap->pm_pml4 = (pdp_entry_t *)KPML4phys;
 	kernel_pmap->pm_root = NULL;
 	CPU_FILL(&kernel_pmap->pm_active);	/* don't allow deactivation */
-	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
+	pmap_pv_init();
+	pmap_pv_pmap_init(kernel_pmap);
 
 	tsz = mmu_map_t_size();
 
@@ -552,11 +561,13 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
 	bzero(msgbufp, round_page(msgbufsize));
 }
 
+/*
+ *	Initialize a vm_page's machine-dependent fields.
+ */
 void
 pmap_page_init(vm_page_t m)
 {
-	/* XXX: TODO - pv_lists */
-
+	pmap_pv_vm_page_init(m);
 }
 
 /* 
@@ -601,15 +612,34 @@ pmap_growkernel(uintptr_t addr)
 	mmu_map_t_fini(tptr);
 }
 
+/*
+ *	Initialize the pmap module.
+ *	Called by vm_init, to initialize any structures that the pmap
+ *	system needs to map virtual memory.
+ */
+
 void
 pmap_init(void)
 {
 	uintptr_t va;
 
 	/* XXX: review the use of gdtset for the purpose below */
-	gdtset = 1; /* xpq may assert for locking sanity from this point onwards */
 
-	/* XXX: switch the mmu_map.c backend to something more sane */
+	/*
+	 * At this point we initialise the pv mappings of all PAs that
+	 * have been mapped into the kernel VA by pmap_bootstrap()
+	 */
+
+	vm_paddr_t pa;
+
+	for (pa = phys_avail[0]; pa < VTOP(virtual_avail); pa += PAGE_SIZE) {
+		vm_page_t m;
+		m = PHYS_TO_VM_PAGE(pa);
+		if (m == NULL) continue;
+		pmap_put_pv_entry(kernel_pmap, PTOV(pa), m);
+	}
+
+	gdtset = 1; /* xpq may assert for locking sanity from this point onwards */
 
 	/* Get a va for console and map the console mfn into it */
 	vm_paddr_t console_ma = xen_start_info->console.domU.mfn << PAGE_SHIFT;
@@ -629,7 +659,7 @@ pmap_pinit0(pmap_t pmap)
 	pmap->pm_root = NULL;
 	CPU_ZERO(&pmap->pm_active);
 	PCPU_SET(curpmap, pmap);
-	TAILQ_INIT(&pmap->pm_pvchunk);
+	pmap_pv_pmap_init(pmap);
 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 }
 
@@ -665,7 +695,7 @@ pmap_pinit(pmap_t pmap)
 
 	pmap->pm_root = NULL;
 	CPU_ZERO(&pmap->pm_active);
-	TAILQ_INIT(&pmap->pm_pvchunk);
+	pmap_pv_pmap_init(pmap);
 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 
 	return 1;
@@ -806,6 +836,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
 	    VM_OBJECT_LOCKED(m->object),
 	    ("pmap_enter: page %p is not busy", m));
 
+	KASSERT(pmap == kernel_pmap, ("XXX: TODO: Userland pmap\n"));
+	KASSERT(VM_PAGE_TO_PHYS(m) != 0,
+		("VM_PAGE_TO_PHYS(m) == 0x%lx\n", VM_PAGE_TO_PHYS(m)));
+
 	pmap_kenter(va, VM_PAGE_TO_PHYS(m)); /* Shim to keep bootup
 					      * happy for now */
 
@@ -922,6 +956,18 @@ nomapping:
 void 
 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 {
+
+	vm_page_t m;
+
+	m = PHYS_TO_VM_PAGE(pa);
+
+	if (gdtset == 1 && m != NULL) {
+		/*
+		 * Enter on the PV list if part of our managed memory.
+		 */
+		
+		pmap_put_pv_entry(kernel_pmap, va, m);
+	}
 	pmap_kenter_ma(va, xpmap_ptom(pa));
 }
 
@@ -1220,6 +1266,39 @@ pmap_change_attr(vm_offset_t va, vm_size
 }
 
 static uintptr_t
+xen_vm_ptov(vm_paddr_t pa)
+{
+	vm_page_t m;
+
+	m = PHYS_TO_VM_PAGE(pa);
+
+	/* Assert for valid PA *after* the VM has been init-ed */
+	KASSERT(gdtset == 1 && m != NULL || pa < physfree, ("Stray PA 0x%lx passed\n", pa));
+
+	if (m == NULL) { /* Early boottime page - obeys early mapping rules */
+		return PTOV(pa);
+	}
+
+	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+		("%s: page %p is not managed", __func__, m));
+
+	return pmap_pv_vm_page_to_v(kernel_pmap, m);
+}
+
+static vm_paddr_t
+xen_vm_vtop(uintptr_t va)
+{
+	vm_page_t m;
+	KASSERT((va >= VM_MIN_KERNEL_ADDRESS &&
+		 va <= VM_MAX_KERNEL_ADDRESS),
+		("Invalid kernel virtual address"));
+
+	m = vm_page_lookup(kernel_object, va - VM_MIN_KERNEL_ADDRESS);
+
+	return VM_PAGE_TO_PHYS(m);
+}
+
+static uintptr_t
 xen_pagezone_alloc(void)
 {
 	uintptr_t ret;
@@ -1270,9 +1349,11 @@ setup_xen_pagezone(void *dummy __unused)
 {
 
 	xen_pagezone = uma_zcreate("XEN PAGEZONE", PAGE_SIZE, NULL, NULL,
-	     xen_pagezone_init, xen_pagezone_fini, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
+	     xen_pagezone_init, xen_pagezone_fini, UMA_ALIGN_PTR, 0);
 	ptmb_mappedalloc = xen_pagezone_alloc;
 	ptmb_mappedfree = xen_pagezone_free;
+	ptmb_vtop = xen_vm_vtop;
+	ptmb_ptov = xen_vm_ptov;
 }
 SYSINIT(setup_xen_pagezone, SI_SUB_VM_CONF, SI_ORDER_ANY, setup_xen_pagezone,
     NULL);

Added: projects/amd64_xen_pv/sys/amd64/xen/pmap_pv.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/amd64_xen_pv/sys/amd64/xen/pmap_pv.c	Tue Nov 27 21:45:42 2012	(r243633)
@@ -0,0 +1,328 @@
+/*-
+ *
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ * Copyright (c) 2012 Spectra Logic Corporation
+ * All rights reserved.
+ * 
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Portions of this software were developed by
+ * Cherry G. Mathew <cherry.g.mathew@gmail.com> under sponsorship
+ * from Spectra Logic Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
+ */
+/*-
+ * Copyright (c) 2003 Networks Associates Technology, Inc.
+ * All rights reserved.
+ *
+ * This software was developed for the FreeBSD Project by Jake Burkholder,
+ * Safeport Network Services, and Network Associates Laboratories, the
+ * Security Research Division of Network Associates, Inc. under
+ * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
+ * CHATS research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * This file contains the amd64 physical->virtual mapping management code.
+ * This code used to reside in pmap.c previously and has been excised
+ * out to make things a bit more modularised.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/rwlock.h>
+#include <sys/mutex.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+#include <machine/md_var.h>
+
+#include <amd64/xen/pmap_pv.h>
+
+#ifdef PV_STATS
+#define PV_STAT(x)	do { x ; } while (0)
+#else
+#define PV_STAT(x)	do { } while (0)
+#endif
+
+/*
+ * Isolate the global pv list lock from data and other locks to prevent false
+ * sharing within the cache.
+ */
+static struct {
+	struct rwlock	lock;
+	char		padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
+} pvh_global __aligned(CACHE_LINE_SIZE);
+
+#define	pvh_global_lock	pvh_global.lock
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
+#define	NPV_LIST_LOCKS	MAXCPU
+#define	PHYS_TO_PV_LIST_LOCK(pa)	\
+			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
+#define	VM_PAGE_TO_PV_LIST_LOCK(m)	\
+			PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
+
+static struct mtx pv_chunks_mutex;
+static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
+
+/***************************************************
+ * page management routines.
+ ***************************************************/
+
+CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
+CTASSERT(_NPCM == 3);
+CTASSERT(_NPCPV == 168);
+
+static __inline struct pv_chunk *
+pv_to_chunk(pv_entry_t pv)
+{
+
+	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
+}
+
+#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
+
+#define	PC_FREE0	0xfffffffffffffffful
+#define	PC_FREE1	0xfffffffffffffffful
+#define	PC_FREE2	0x000000fffffffffful
+
+/*
+ * Returns a new PV entry, allocating a new PV chunk from the system when
+ * needed.  If this PV chunk allocation fails and a PV list lock pointer was
+ * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
+ * returned.
+ *
+ */
+
+pv_entry_t
+pmap_get_pv_entry(pmap_t pmap)
+{
+	int bit, field;
+	pv_entry_t pv;
+	struct pv_chunk *pc;
+	vm_page_t m;
+
+	rw_assert(&pvh_global_lock, RA_LOCKED);
+	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+	PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
+	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
+	if (pc != NULL) {
+		for (field = 0; field < _NPCM; field++) {
+			if (pc->pc_map[field]) {
+				bit = bsfq(pc->pc_map[field]);
+				break;
+			}
+		}
+		if (field < _NPCM) {
+			pv = &pc->pc_pventry[field * 64 + bit];
+			pc->pc_map[field] &= ~(1ul << bit);
+			/* If this was the last item, move it to tail */
+			if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
+			    pc->pc_map[2] == 0) {
+				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
+				    pc_list);
+			}
+			PV_STAT(atomic_add_long(&pv_entry_count, 1));
+			PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
+			return (pv);
+		}
+	}
+
+	/* No free items, allocate another chunk */
+	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+	    VM_ALLOC_WIRED);
+	if (m == NULL) {
+		panic("XXX: TODO: memory pressure reclaim\n");
+	}
+
+	PV_STAT(atomic_add_int(&pc_chunk_count, 1));
+	PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
+	dump_add_page(m->phys_addr);
+
+	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
+
+	/* 
+	 * DMAP entries are kernel only, and don't need tracking, so
+	 * we just wire in the va.
+	 */
+	pmap_kenter_ma((vm_offset_t)pc, xpmap_ptom(m->phys_addr));
+
+	pc->pc_pmap = pmap;
+	pc->pc_map[0] = PC_FREE0 & ~1ul;	/* preallocated bit 0 */
+	pc->pc_map[1] = PC_FREE1;
+	pc->pc_map[2] = PC_FREE2;
+
+	mtx_lock(&pv_chunks_mutex);
+	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
+	mtx_unlock(&pv_chunks_mutex);
+	pv = &pc->pc_pventry[0];
+	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+	PV_STAT(atomic_add_long(&pv_entry_count, 1));
+	PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
+	return (pv);
+}
+
+void
+pmap_put_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
+{
+	vm_paddr_t pa;
+	pv_entry_t pv;
+
+	KASSERT(m != NULL, ("Invalid page"));
+	pa = VM_PAGE_TO_PHYS(m);
+
+//	if ((m->oflags & VPO_UNMANAGED) == 0) { /* XXX: exclude
+//	unmanaged */
+
+		PMAP_LOCK(pmap);
+		rw_rlock(&pvh_global_lock);
+		pv = pmap_get_pv_entry(pmap);
+		pv->pv_va = va;
+		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+		rw_runlock(&pvh_global_lock);
+		PMAP_UNLOCK(pmap);
+//	}
+}
+
+/* This function may be called after pmap_pv_pmap_init() */
+void
+pmap_pv_init(void)
+{
+	int i;
+
+ 	/*
+	 * Initialize the global pv list lock.
+	 */
+	rw_init(&pvh_global_lock, "pmap pv global");
+
+	/*
+	 * Initialize the pv chunk list mutex.
+	 */
+
+	mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
+
+	/*
+	 * Initialize the pool of pv list locks.
+	 */
+	for (i = 0; i < NPV_LIST_LOCKS; i++)
+		rw_init(&pv_list_locks[i], "pmap pv list");
+
+}
+
+/* Initialise per-pmap pv data. OK to call it before pmap_pv_init() */
+
+void
+pmap_pv_pmap_init(pmap_t pmap)
+{
+	TAILQ_INIT(&pmap->pm_pvchunk);
+}
+
+void
+pmap_pv_vm_page_init(vm_page_t m)
+{
+	TAILQ_INIT(&m->md.pv_list);
+}
+
+/*
+ * Return va mapping of vm_page
+ * Returns VM_MAX_KERNEL_ADDRESS + 1 on error.
+ */
+ 
+vm_offset_t
+pmap_pv_vm_page_to_v(pmap_t pmap, vm_page_t m)
+{
+	pv_entry_t pv;
+
+	rw_rlock(&pvh_global_lock);
+	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+		if (PV_PMAP(pv) == pmap) { /* We return the first hit */
+			rw_runlock(&pvh_global_lock);
+			return pv->pv_va;
+		}
+	}
+
+	rw_runlock(&pvh_global_lock);
+	return VM_MAX_KERNEL_ADDRESS + 1;
+}
+
+/*
+ * Query if a given vm_page is mapped in the pmap
+ */
+bool
+pmap_pv_vm_page_mapped(pmap_t pmap, vm_page_t m)
+{
+	return (pmap_pv_vm_page_to_v(pmap, m) == 
+		(VM_MAX_KERNEL_ADDRESS + 1)) ? false : true;
+
+}

Added: projects/amd64_xen_pv/sys/amd64/xen/pmap_pv.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ projects/amd64_xen_pv/sys/amd64/xen/pmap_pv.h	Tue Nov 27 21:45:42 2012	(r243633)
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2011-2012 Spectra Logic Corporation
+ * All rights reserved.
+ *
+ * This software was developed by Cherry G. Mathew <cherry@FreeBSD.org>
+ * under sponsorship from Spectra Logic Corporation.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_PMAP_PV_H_
+#define	_MACHINE_PMAP_PV_H_
+
+void pmap_pv_init(void);
+void pmap_pv_pmap_init(pmap_t pmap);
+void pmap_pv_vm_page_init(vm_page_t m);
+vm_offset_t pmap_pv_vm_page_to_v(pmap_t pmap, vm_page_t m);
+bool pmap_pv_vm_page_mapped(pmap_t pmap, vm_page_t m);
+pv_entry_t pmap_get_pv_entry(pmap_t pmap);
+void pmap_put_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
+
+#endif /* !_MACHINE_PMAP_PV_H_ */

Modified: projects/amd64_xen_pv/sys/conf/files.amd64
==============================================================================
--- projects/amd64_xen_pv/sys/conf/files.amd64	Tue Nov 27 21:27:12 2012	(r243632)
+++ projects/amd64_xen_pv/sys/conf/files.amd64	Tue Nov 27 21:45:42 2012	(r243633)
@@ -126,6 +126,7 @@ amd64/amd64/mpboot.S		optional	native sm
 amd64/xen/mpboot.c		optional	xen smp
 amd64/amd64/pmap.c		optional	native
 amd64/xen/pmap.c		optional	xen
+amd64/xen/pmap_pv.c		optional	xen
 amd64/xen/mmu_map.c		optional	xen
 amd64/amd64/prof_machdep.c	optional	profiling-routine
 amd64/amd64/ptrace_machdep.c	standard



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201211272145.qARLjgoi065808>