Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 28 Nov 2002 14:43:17 -0800 (PST)
From:      Marcel Moolenaar <marcel@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 21655 for review
Message-ID:  <200211282243.gASMhHv5006003@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=21655

Change 21655 by marcel@marcel_nfs on 2002/11/28 14:42:35

	Better handle sparse physical memory: Don't let the size of
	vm_page_array[] depend on the difference between the last and
	the first physical address in phys_avail[]. Instead, use the
	previously determined total memory size. Now that a dense
	virtual address space is mapped onto a sparse physical address
	space, we cannot use a one-to-one mapping to get from the
	physical address to the address of the vm_page_t in the
	vm_page_array. Therefore, introduce vm_page_from_phys, which
	given a physical address and phys_avail[] determines the
	position of the corresponding vm_page_t in the array.
	
	It is assumed for now that performance is of lesser concern
	when mapping physical addresses to virtual addresses. However,
	to avoid pessimizing architectures that don't have sparse
	physical addresses (or dense enough to not care about it),
	it's probably best to move this to MD code. That would also
	address the assumptions that the largest chunk is the second
	one (ie typically i386 oriented) and that it's generally big
	enough to hold all VM structures.

Affected files ...

.. //depot/projects/ia64/sys/vm/vm_page.c#26 edit
.. //depot/projects/ia64/sys/vm/vm_page.h#18 edit

Differences ...

==== //depot/projects/ia64/sys/vm/vm_page.c#26 (text+ko) ====

@@ -151,6 +151,27 @@
 }
 
 /*
+ *	vm_page_from_phys
+ *
+ *	Given a physical address, return a pointer to the vm_page in the
+ *	vm_page array. This function deals with sparse memory addresses.
+ */
+vm_page_t
+vm_page_from_phys(vm_offset_t pa)
+{
+	vm_page_t p = vm_page_array;
+	int i;
+
+	for (i = 0; phys_avail[i + 1]; i += 2) {
+		if (pa >= phys_avail[i] && pa < phys_avail[i + 1])
+			return (p + (atop(pa) - phys_avail[i] / PAGE_SIZE));
+		p += (phys_avail[i + 1] - phys_avail[i]) / PAGE_SIZE;
+	}
+	panic("vm_page_from_phys: unmanaged physical address.");
+	return (0);
+}
+
+/*
  *	vm_page_startup:
  *
  *	Initializes the resident memory module.
@@ -188,6 +209,7 @@
 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
 	}
 
+	/* XXX: Based on assumptions that aren't valid on all architectures.*/
 	for (i = 0; phys_avail[i + 1]; i += 2) {
 		vm_size_t size = phys_avail[i + 1] - phys_avail[i];
 
@@ -221,8 +243,8 @@
 	bootpages = UMA_BOOT_PAGES * UMA_SLAB_SIZE;
 	new_end = end - bootpages;
 	new_end = trunc_page(new_end);
-	mapped = pmap_map(&vaddr, new_end, end,
-	    VM_PROT_READ | VM_PROT_WRITE);
+	/* XXX: Bounds check! */
+	mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE);
 	bzero((caddr_t) mapped, end - new_end);
 	uma_startup((caddr_t)mapped);
 
@@ -232,7 +254,12 @@
 	 * page).
 	 */
 	first_page = phys_avail[0] / PAGE_SIZE;
-	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
+	/*
+	 * Don't use the pysical address range for the actual page range.
+	 * On architectures with sparse memory addressing this is wrong.
+	 * Instead, use the total memory obtained above.
+	 */
+	page_range = total / PAGE_SIZE;
 	npages = (total - (page_range * sizeof(struct vm_page)) -
 	    (end - new_end)) / PAGE_SIZE;
 	end = new_end;
@@ -242,8 +269,8 @@
 	 * queue.
 	 */
 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
-	mapped = pmap_map(&vaddr, new_end, end,
-	    VM_PROT_READ | VM_PROT_WRITE);
+	/* XXX: Bounds check! */
+	mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE);
 	vm_page_array = (vm_page_t) mapped;
 
 	/*

==== //depot/projects/ia64/sys/vm/vm_page.h#18 (text+ko) ====

@@ -293,8 +293,7 @@
 
 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
 
-#define PHYS_TO_VM_PAGE(pa) \
-		(&vm_page_array[atop(pa) - first_page ])
+#define PHYS_TO_VM_PAGE(pa)	vm_page_from_phys(pa)
 
 extern struct mtx vm_page_queue_mtx;
 #define vm_page_lock_queues()   mtx_lock(&vm_page_queue_mtx)
@@ -356,6 +355,7 @@
 void vm_page_remove (vm_page_t);
 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
 vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
+vm_page_t vm_page_from_phys(vm_offset_t);
 vm_offset_t vm_page_startup (vm_offset_t, vm_offset_t, vm_offset_t);
 void vm_page_unmanage (vm_page_t);
 void vm_page_unwire (vm_page_t, int);

To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe p4-projects" in the body of the message




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200211282243.gASMhHv5006003>