Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 24 Jan 2008 04:04:13 GMT
From:      Kip Macy <kmacy@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 133969 for review
Message-ID:  <200801240404.m0O44DqB061272@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=133969

Change 133969 by kmacy@pandemonium:kmacy:xen31 on 2008/01/24 04:04:04

	don't use ptes as a free list for pv chunks 
	keep unused pv chunks in a free list - may need to re-visit for low-mem handling
	tentatively re-enable pre-faulting

Affected files ...

.. //depot/projects/xen31/sys/i386/xen/pmap.c#32 edit

Differences ...

==== //depot/projects/xen31/sys/i386/xen/pmap.c#32 (text+ko) ====

@@ -225,6 +225,7 @@
 static int shpgperproc = PMAP_SHPGPERPROC;
 
 struct pv_chunk *pv_chunkbase;		/* KVA block for pv_chunks */
+struct pv_chunk *pv_next_unused;	/* KVA free block for pv_chunks */
 int pv_maxchunks;			/* How many chunks we have KVA for */
 vm_offset_t pv_vafree;			/* freelist stored in the PTE */
 
@@ -293,6 +294,7 @@
 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
 static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr);
+static void pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq);
 
 #if defined(PAE) && !defined(XEN)
 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
@@ -308,8 +310,41 @@
  */
 CTASSERT(KERNBASE % (1 << 24) == 0);
 
+SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
+	"Max number of PV entries");
+SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
+	"Page share factor per proc");
+
+#ifdef PV_STATS
+static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
+	"Current number of pv entry chunks");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
+	"Current number of pv entry chunks allocated");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
+	"Current number of pv entry chunks frees");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
+	"Number of times tried to get a chunk page but failed.");
+
+static long pv_entry_frees, pv_entry_allocs;
+static int pv_entry_spare;
+
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
+	"Current number of pv entry frees");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
+	"Current number of pv entry allocs");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
+	"Current number of spare pv entries");
 
+static int pmap_collect_inactive, pmap_collect_active;
 
+SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
+	"Current number times pmap_collect called on inactive queue");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
+	"Current number times pmap_collect called on active queue");
+#endif
 
 static __inline void
 pagezero(void *page)
@@ -613,45 +648,73 @@
  * Because PG_V is never set, there can be no mappings to invalidate.
  */
 static vm_offset_t
-pmap_ptelist_alloc(vm_offset_t *head)
+pmap_ptelist_alloc(pmap_t pmap, int try, vm_offset_t *head)
 {
-	pt_entry_t *pte;
 	vm_offset_t va;
+	vm_page_t m;
+	struct vpgqueues *pq;
+	static vm_pindex_t colour;
+	
+	if (head != NULL) {
+		va = (vm_offset_t) *head;
+		*head = **(vm_offset_t **)head;
+		return (va);
+	}
+	
+	if ((uint32_t)pv_next_unused < (uint32_t)((uint8_t *)pv_chunkbase) + pv_maxchunks*PAGE_SIZE) {
+		va = (vm_offset_t)pv_next_unused;
+		pv_next_unused++;
+	} else 
+		return (0);
+	
+	/*
+	 * Access to the ptelist "pv_vafree" is synchronized by the page
+	 * queues lock.  If "pv_vafree" is currently non-empty, it will
+	 * remain non-empty until pmap_ptelist_alloc() completes.
+	 */
+	if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
+	    &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
+	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
+		if (try) {
+			pv_entry_count--;
+			PV_STAT(pc_chunk_tryfail++);
+			return (0);
+		}
+		/*
+		 * Reclaim pv entries: At first, destroy mappings to
+		 * inactive pages.  After that, if a pv chunk entry
+		 * is still needed, destroy mappings to active pages.
+		 */
+		if (pq == NULL) {
+			PV_STAT(pmap_collect_inactive++);
+			pq = &vm_page_queues[PQ_INACTIVE];
+		} else if (pq == &vm_page_queues[PQ_INACTIVE]) {
+			PV_STAT(pmap_collect_active++);
+			pq = &vm_page_queues[PQ_ACTIVE];
+		} else
+			panic("get_pv_entry: increase vm.pmap.shpgperproc");
+		pmap_collect(pmap, pq);
+		return (1); /* ~\0_o/~ */
+	}
+	colour++;
+	pmap_qenter(va, &m, 1);
+	if ((m->flags & PG_ZERO) == 0)
+		pagezero((void *)va);
 
-	va = *head;
-	if (va == 0)
-		return (va);	/* Out of memory */
-	pte = vtopte(va);
-	*head = *pte;
-	if (*head & PG_V)
-		panic("pmap_ptelist_alloc: va with PG_V set!");
-	*pte = 0;
 	return (va);
 }
 
 static void
 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
 {
-	pt_entry_t *pte;
-
-	if (va & PG_V)
-		panic("pmap_ptelist_free: freeing va with PG_V set!");
-	pte = vtopte(va);
-	*pte = *head;		/* virtual! PG_V is 0 though */
+	*(vm_offset_t *)va = (vm_offset_t)head;
 	*head = va;
 }
 
 static void
 pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
 {
-	int i;
-	vm_offset_t va;
-
 	*head = 0;
-	for (i = npages - 1; i >= 0; i--) {
-		va = (vm_offset_t)base + i * PAGE_SIZE;
-		pmap_ptelist_free(head, va);
-	}
 }
 
 
@@ -676,7 +739,7 @@
 	pv_entry_high_water = 9 * (pv_entry_max / 10);
 
 	pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
-	pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
+	pv_next_unused = pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
 	    PAGE_SIZE * pv_maxchunks);
 	if (pv_chunkbase == NULL)
 		panic("pmap_init: not enough kvm for pv chunks");
@@ -690,12 +753,6 @@
 }
 
 
-SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
-SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
-	"Max number of PV entries");
-SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
-	"Page share factor per proc");
-
 /***************************************************
  * Low level helper routines.....
  ***************************************************/
@@ -1882,36 +1939,6 @@
 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 	"Current number of pv entries");
 
-#ifdef PV_STATS
-static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
-
-SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
-	"Current number of pv entry chunks");
-SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
-	"Current number of pv entry chunks allocated");
-SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
-	"Current number of pv entry chunks frees");
-SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
-	"Number of times tried to get a chunk page but failed.");
-
-static long pv_entry_frees, pv_entry_allocs;
-static int pv_entry_spare;
-
-SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
-	"Current number of pv entry frees");
-SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
-	"Current number of pv entry allocs");
-SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
-	"Current number of spare pv entries");
-
-static int pmap_collect_inactive, pmap_collect_active;
-
-SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
-	"Current number times pmap_collect called on inactive queue");
-SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
-	"Current number times pmap_collect called on active queue");
-#endif
-
 /*
  * We are in a serious low memory condition.  Resort to
  * drastic measures to free some pages so we can allocate
@@ -1974,7 +2001,6 @@
 static void
 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 {
-	vm_page_t m;
 	struct pv_chunk *pc;
 	int idx, field, bit;
 
@@ -1999,10 +2025,6 @@
 	PV_STAT(pc_chunk_frees++);
 	/* entire chunk is free, return it */
 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
-	m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
-	pmap_qremove((vm_offset_t)pc, 1);
-	vm_page_unwire(m, 0);
-	vm_page_free(m);
 	pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 }
 
@@ -2015,12 +2037,10 @@
 {
 	static const struct timeval printinterval = { 60, 0 };
 	static struct timeval lastprint;
-	static vm_pindex_t colour;
 	struct vpgqueues *pq;
 	int bit, field;
 	pv_entry_t pv;
 	struct pv_chunk *pc;
-	vm_page_t m;
 
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@@ -2056,42 +2076,13 @@
 			return (pv);
 		}
 	}
-	/*
-	 * Access to the ptelist "pv_vafree" is synchronized by the page
-	 * queues lock.  If "pv_vafree" is currently non-empty, it will
-	 * remain non-empty until pmap_ptelist_alloc() completes.
-	 */
-	if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
-	    &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
-	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
-		if (try) {
-			pv_entry_count--;
-			PV_STAT(pc_chunk_tryfail++);
-			return (NULL);
-		}
-		/*
-		 * Reclaim pv entries: At first, destroy mappings to
-		 * inactive pages.  After that, if a pv chunk entry
-		 * is still needed, destroy mappings to active pages.
-		 */
-		if (pq == NULL) {
-			PV_STAT(pmap_collect_inactive++);
-			pq = &vm_page_queues[PQ_INACTIVE];
-		} else if (pq == &vm_page_queues[PQ_INACTIVE]) {
-			PV_STAT(pmap_collect_active++);
-			pq = &vm_page_queues[PQ_ACTIVE];
-		} else
-			panic("get_pv_entry: increase vm.pmap.shpgperproc");
-		pmap_collect(pmap, pq);
+	pc = (struct pv_chunk *)pmap_ptelist_alloc(pmap, try, &pv_vafree);
+	if (((vm_offset_t)pc) == 1)
 		goto retry;
-	}
+	if (((vm_offset_t)pc) == 0)
+		return (NULL);
 	PV_STAT(pc_chunk_count++);
 	PV_STAT(pc_chunk_allocs++);
-	colour++;
-	pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
-	pmap_qenter((vm_offset_t)pc, &m, 1);
-	if ((m->flags & PG_ZERO) == 0)
-		pagezero(pc);
 	pc->pc_pmap = pmap;
 	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
 	for (field = 1; field < _NPCM; field++)
@@ -3439,10 +3430,6 @@
 			PV_STAT(pc_chunk_count--);
 			PV_STAT(pc_chunk_frees++);
 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
-			m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
-			pmap_qremove((vm_offset_t)pc, 1);
-			vm_page_unwire(m, 0);
-			vm_page_free(m);
 			pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 		}
 	}
@@ -3502,8 +3489,6 @@
 	pt_entry_t *pte;
 	boolean_t rv = FALSE;
 
-	return (rv);
-	
 	if (*pmap_pde(pmap, addr)) {
 		pte = vtopte(addr);
 		rv = ((*pte & PG_V) == 0);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200801240404.m0O44DqB061272>