Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 4 Oct 2012 02:27:14 +0000 (UTC)
From:      Neel Natu <neel@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-projects@freebsd.org
Subject:   svn commit: r241178 - in projects/bhyve: lib/libvmmapi sys/amd64/include sys/amd64/vmm sys/amd64/vmm/io
Message-ID:  <201210040227.q942REIZ040769@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: neel
Date: Thu Oct  4 02:27:14 2012
New Revision: 241178
URL: http://svn.freebsd.org/changeset/base/241178

Log:
  Change vm_malloc() to map pages in the guest physical address space in 4KB
  chunks. This breaks the assumption that the entire memory segment is
  contiguously allocated in the host physical address space.
  
  This also paves the way to satisfy the 4KB page allocations by requesting
  free pages from the VM subsystem as opposed to hard-partitioning host memory
  at boot time.

Modified:
  projects/bhyve/lib/libvmmapi/vmmapi.c
  projects/bhyve/sys/amd64/include/vmm_dev.h
  projects/bhyve/sys/amd64/vmm/io/ppt.c
  projects/bhyve/sys/amd64/vmm/vmm.c
  projects/bhyve/sys/amd64/vmm/vmm_dev.c
  projects/bhyve/sys/amd64/vmm/vmm_mem.c

Modified: projects/bhyve/lib/libvmmapi/vmmapi.c
==============================================================================
--- projects/bhyve/lib/libvmmapi/vmmapi.c	Thu Oct  4 01:37:12 2012	(r241177)
+++ projects/bhyve/lib/libvmmapi/vmmapi.c	Thu Oct  4 02:27:14 2012	(r241178)
@@ -111,9 +111,10 @@ vm_destroy(struct vmctx *vm)
 {
 	assert(vm != NULL);
 
-	DESTROY(vm->name);
 	if (vm->fd >= 0)
 		close(vm->fd);
+	DESTROY(vm->name);
+
 	free(vm);
 }
 
@@ -151,7 +152,6 @@ vm_get_memory_seg(struct vmctx *ctx, vm_
 	bzero(&seg, sizeof(seg));
 	seg.gpa = gpa;
 	error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg);
-	*ret_hpa = seg.hpa;
 	*ret_len = seg.len;
 	return (error);
 }

Modified: projects/bhyve/sys/amd64/include/vmm_dev.h
==============================================================================
--- projects/bhyve/sys/amd64/include/vmm_dev.h	Thu Oct  4 01:37:12 2012	(r241177)
+++ projects/bhyve/sys/amd64/include/vmm_dev.h	Thu Oct  4 02:27:14 2012	(r241178)
@@ -35,7 +35,6 @@ void	vmmdev_cleanup(void);
 #endif
 
 struct vm_memory_segment {
-	vm_paddr_t	hpa;	/* out */
 	vm_paddr_t	gpa;	/* in */
 	size_t		len;	/* in */
 };

Modified: projects/bhyve/sys/amd64/vmm/io/ppt.c
==============================================================================
--- projects/bhyve/sys/amd64/vmm/io/ppt.c	Thu Oct  4 01:37:12 2012	(r241177)
+++ projects/bhyve/sys/amd64/vmm/io/ppt.c	Thu Oct  4 02:27:14 2012	(r241178)
@@ -356,7 +356,6 @@ ppt_map_mmio(struct vm *vm, int bus, int
 				if (error == 0) {
 					seg->gpa = gpa;
 					seg->len = len;
-					seg->hpa = hpa;
 				}
 				return (error);
 			}

Modified: projects/bhyve/sys/amd64/vmm/vmm.c
==============================================================================
--- projects/bhyve/sys/amd64/vmm/vmm.c	Thu Oct  4 01:37:12 2012	(r241177)
+++ projects/bhyve/sys/amd64/vmm/vmm.c	Thu Oct  4 02:27:14 2012	(r241178)
@@ -275,6 +275,28 @@ vm_create(const char *name)
 	return (vm);
 }
 
+static void
+vm_free_mem_seg(struct vm *vm, struct vm_memory_segment *seg)
+{
+	size_t len;
+	vm_paddr_t hpa;
+
+	len = 0;
+	while (len < seg->len) {
+		hpa = vm_gpa2hpa(vm, seg->gpa + len, PAGE_SIZE);
+		if (hpa == (vm_paddr_t)-1) {
+			panic("vm_free_mem_segs: cannot free hpa "
+			      "associated with gpa 0x%016lx", seg->gpa + len);
+		}
+
+		vmm_mem_free(hpa, PAGE_SIZE);
+
+		len += PAGE_SIZE;
+	}
+
+	bzero(seg, sizeof(struct vm_memory_segment));
+}
+
 void
 vm_destroy(struct vm *vm)
 {
@@ -283,7 +305,9 @@ vm_destroy(struct vm *vm)
 	ppt_unassign_all(vm);
 
 	for (i = 0; i < vm->num_mem_segs; i++)
-		vmm_mem_free(vm->mem_segs[i].hpa, vm->mem_segs[i].len);
+		vm_free_mem_seg(vm, &vm->mem_segs[i]);
+
+	vm->num_mem_segs = 0;
 
 	for (i = 0; i < VM_MAXCPU; i++)
 		vcpu_cleanup(&vm->vcpu[i]);
@@ -345,6 +369,7 @@ int
 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
 {
 	int error, available, allocated;
+	struct vm_memory_segment *seg;
 	vm_paddr_t g, hpa;
 
 	const boolean_t spok = TRUE;	/* superpage mappings are ok */
@@ -380,22 +405,32 @@ vm_malloc(struct vm *vm, vm_paddr_t gpa,
 	if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
 		return (E2BIG);
 
-	hpa = vmm_mem_alloc(len);
-	if (hpa == 0)
-		return (ENOMEM);
-
-	error = VMMMAP_SET(vm->cookie, gpa, hpa, len, VM_MEMATTR_WRITE_BACK,
-			   VM_PROT_ALL, spok);
-	if (error) {
-		vmm_mem_free(hpa, len);
-		return (error);
+	seg = &vm->mem_segs[vm->num_mem_segs];
+
+	seg->gpa = gpa;
+	seg->len = 0;
+	while (seg->len < len) {
+		hpa = vmm_mem_alloc(PAGE_SIZE);
+		if (hpa == 0) {
+			error = ENOMEM;
+			break;
+		}
+
+		error = VMMMAP_SET(vm->cookie, gpa + seg->len, hpa, PAGE_SIZE,
+				   VM_MEMATTR_WRITE_BACK, VM_PROT_ALL, spok);
+		if (error)
+			break;
+
+		iommu_create_mapping(vm->iommu, gpa + seg->len, hpa, PAGE_SIZE);
+
+		seg->len += PAGE_SIZE;
 	}
 
-	iommu_create_mapping(vm->iommu, gpa, hpa, len);
+	if (seg->len != len) {
+		vm_free_mem_seg(vm, seg);
+		return (error);
+	}
 
-	vm->mem_segs[vm->num_mem_segs].gpa = gpa;
-	vm->mem_segs[vm->num_mem_segs].hpa = hpa;
-	vm->mem_segs[vm->num_mem_segs].len = len;
 	vm->num_mem_segs++;
 
 	return (0);

Modified: projects/bhyve/sys/amd64/vmm/vmm_dev.c
==============================================================================
--- projects/bhyve/sys/amd64/vmm/vmm_dev.c	Thu Oct  4 01:37:12 2012	(r241177)
+++ projects/bhyve/sys/amd64/vmm/vmm_dev.c	Thu Oct  4 02:27:14 2012	(r241178)
@@ -299,7 +299,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long c
 		break;
 	case VM_GET_MEMORY_SEG:
 		seg = (struct vm_memory_segment *)data;
-		seg->hpa = seg->len = 0;
+		seg->len = 0;
 		(void)vm_gpabase2memseg(sc->vm, seg->gpa, seg);
 		error = 0;
 		break;

Modified: projects/bhyve/sys/amd64/vmm/vmm_mem.c
==============================================================================
--- projects/bhyve/sys/amd64/vmm/vmm_mem.c	Thu Oct  4 01:37:12 2012	(r241177)
+++ projects/bhyve/sys/amd64/vmm/vmm_mem.c	Thu Oct  4 02:27:14 2012	(r241178)
@@ -318,9 +318,9 @@ vmm_mem_alloc(size_t size)
 	int i;
 	vm_paddr_t addr;
 
-	if ((size & PDRMASK) != 0) {
+	if ((size & PAGE_MASK) != 0) {
 		panic("vmm_mem_alloc: size 0x%0lx must be "
-		      "aligned on a 0x%0x boundary\n", size, NBPDR);
+		      "aligned on a 0x%0x boundary\n", size, PAGE_SIZE);
 	}
 
 	addr = 0;
@@ -373,9 +373,9 @@ vmm_mem_free(vm_paddr_t base, size_t len
 {
 	int i;
 
-	if ((base & PDRMASK) != 0 || (length & PDRMASK) != 0) {
+	if ((base & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) {
 		panic("vmm_mem_free: base 0x%0lx and length 0x%0lx must be "
-		      "aligned on a 0x%0x boundary\n", base, length, NBPDR);
+		      "aligned on a 0x%0x boundary\n", base, length, PAGE_SIZE);
 	}
 
 	mtx_lock(&vmm_mem_mtx);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201210040227.q942REIZ040769>