Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 29 Jun 2012 15:47:04 +0000 (UTC)
From:      Attilio Rao <attilio@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r237793 - projects/amd64_xen_pv/sys/amd64/xen
Message-ID:  <201206291547.q5TFl4bL054546@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: attilio
Date: Fri Jun 29 15:47:03 2012
New Revision: 237793
URL: http://svn.freebsd.org/changeset/base/237793

Log:
  - Remove the possibility to pass an arbitrary size to the
    mmu_map_mbackend alloc structures.
    They will now allocate a single page size chunk of memory.
  - Fix the prototype for pmtb_mappedfree to match with the expected
    prototype.
  
  Approved by:	cherry

Modified:
  projects/amd64_xen_pv/sys/amd64/xen/mmu_map.c
  projects/amd64_xen_pv/sys/amd64/xen/mmu_map.h
  projects/amd64_xen_pv/sys/amd64/xen/pmap.c

Modified: projects/amd64_xen_pv/sys/amd64/xen/mmu_map.c
==============================================================================
--- projects/amd64_xen_pv/sys/amd64/xen/mmu_map.c	Fri Jun 29 15:30:14 2012	(r237792)
+++ projects/amd64_xen_pv/sys/amd64/xen/mmu_map.c	Fri Jun 29 15:47:03 2012	(r237793)
@@ -328,7 +328,7 @@ mmu_map_hold_va(struct pmap *pm, void *a
 		vm_paddr_t pml4tep_ma;
 		pml4_entry_t pml4te;
 
-		pti->pdpt = (pdp_entry_t *)pti->ptmb.alloc(PAGE_SIZE);
+		pti->pdpt = (pdp_entry_t *)pti->ptmb.alloc();
 
 		pml4tep = &pti->pml4t[pml4t_index(va)];
 		pml4tep_ma = xpmap_ptom(pti->ptmb.vtop((vm_offset_t)pml4tep));
@@ -346,7 +346,7 @@ mmu_map_hold_va(struct pmap *pm, void *a
 		vm_paddr_t pdptep_ma;
 		pdp_entry_t pdpte;
 
-		pti->pdt = (pd_entry_t *)pti->ptmb.alloc(PAGE_SIZE);
+		pti->pdt = (pd_entry_t *)pti->ptmb.alloc();
 
 		pdptep = &pti->pdpt[pdpt_index(va)];
 		pdptep_ma = xpmap_ptom(pti->ptmb.vtop((vm_offset_t)pdptep));
@@ -364,7 +364,7 @@ mmu_map_hold_va(struct pmap *pm, void *a
 		vm_paddr_t pdtep_ma;
 		pd_entry_t pdte;
 
-		pti->pt = (pt_entry_t *) pti->ptmb.alloc(PAGE_SIZE);
+		pti->pt = (pt_entry_t *) pti->ptmb.alloc();
 
 		pdtep = &pti->pdt[pdt_index(va)];
 		pdtep_ma = xpmap_ptom(pti->ptmb.vtop((vm_offset_t)pdtep));

Modified: projects/amd64_xen_pv/sys/amd64/xen/mmu_map.h
==============================================================================
--- projects/amd64_xen_pv/sys/amd64/xen/mmu_map.h	Fri Jun 29 15:30:14 2012	(r237792)
+++ projects/amd64_xen_pv/sys/amd64/xen/mmu_map.h	Fri Jun 29 15:47:03 2012	(r237793)
@@ -90,7 +90,7 @@ typedef void * mmu_map_t;
 
 struct mmu_map_mbackend { /* Callbacks */
 
-	vm_offset_t (*alloc)(size_t);
+	vm_offset_t (*alloc)(void);
 	void (*free)(vm_offset_t); /* May be NULL */
 
 	/* 

Modified: projects/amd64_xen_pv/sys/amd64/xen/pmap.c
==============================================================================
--- projects/amd64_xen_pv/sys/amd64/xen/pmap.c	Fri Jun 29 15:30:14 2012	(r237792)
+++ projects/amd64_xen_pv/sys/amd64/xen/pmap.c	Fri Jun 29 15:47:03 2012	(r237793)
@@ -185,8 +185,8 @@ static vm_paddr_t	boot_ptendphys;	/* phy
 
 static uma_zone_t xen_pagezone;
 static size_t tsz; /* mmu_map.h opaque cookie size */
-static vm_offset_t (*ptmb_mappedalloc)(size_t) = NULL;
-static void (*ptmb_mappedfree)(size_t) = NULL;
+static vm_offset_t (*ptmb_mappedalloc)(void) = NULL;
+static void (*ptmb_mappedfree)(vm_offset_t) = NULL;
 static vm_offset_t ptmb_ptov(vm_paddr_t p)
 {
 	return PTOV(p);
@@ -461,21 +461,18 @@ pmap_xen_bootpages(vm_paddr_t *firstaddr
 
 /* alloc from linear mapped boot time virtual address space */
 static vm_offset_t
-mmu_alloc(size_t size)
+mmu_alloc(void)
 {
-	KASSERT(size != 0, ("mmu_alloc size must not be zero\n"));
 	KASSERT(physfree != 0,
 		("physfree must have been set before using mmu_alloc"));
 				
-	size = round_page(size); /* We can allocate only in page sizes */
-
-	vm_offset_t va = vallocpages(&physfree, atop(size));
+	vm_offset_t va = vallocpages(&physfree, atop(PAGE_SIZE));
 
 	/* 
 	 * Xen requires the page table hierarchy to be R/O.
 	 */
 
-	pmap_xen_setpages_ro(va, atop(size));
+	pmap_xen_setpages_ro(va, atop(PAGE_SIZE));
 
 	return va;
 }
@@ -1167,12 +1164,10 @@ pmap_change_attr(vm_offset_t va, vm_size
 }
 
 static vm_offset_t
-xen_pagezone_alloc(size_t size)
+xen_pagezone_alloc(void)
 {
 	vm_offset_t ret;
 
-	KASSERT(size == PAGE_SIZE, ("%s: invalid size", __func__));
-
 	ret = (vm_offset_t)uma_zalloc(xen_pagezone, M_NOWAIT | M_ZERO);
 	if (ret == 0)
 		panic("%s: failed allocation\n", __func__);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201206291547.q5TFl4bL054546>