Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 5 Feb 2018 22:21:51 +0000 (UTC)
From:      Jeff Roberson <jeff@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-user@freebsd.org
Subject:   svn commit: r328903 - user/jeff/numa/sys/vm
Message-ID:  <201802052221.w15MLphJ012904@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jeff
Date: Mon Feb  5 22:21:51 2018
New Revision: 328903
URL: https://svnweb.freebsd.org/changeset/base/328903

Log:
  Improve vm_page_import by allocating contig pages as long as they are
  available but allowing fall back when we are fragmented.
  
  Honor vm_domain_available() limits on page allocation.
  
  Implement an optimistic phys allocator which will return the largest
  contig region less than or equal to the request.

Modified:
  user/jeff/numa/sys/vm/vm_page.c
  user/jeff/numa/sys/vm/vm_phys.c
  user/jeff/numa/sys/vm/vm_phys.h

Modified: user/jeff/numa/sys/vm/vm_page.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_page.c	Mon Feb  5 21:29:27 2018	(r328902)
+++ user/jeff/numa/sys/vm/vm_page.c	Mon Feb  5 22:21:51 2018	(r328903)
@@ -217,6 +217,12 @@ vm_page_init_cache_zones(void *dummy __unused)
 
 	for (i = 0; i < vm_ndomains; i++) {
 		vmd = VM_DOMAIN(i);
+		/*
+		 * Don't allow the page cache to take up more than .25% of
+		 * memory.
+		 */
+		if (vmd->vmd_page_count / 400 < 256 * mp_ncpus)
+			continue;
 		vmd->vmd_pgcache = uma_zcache_create("vm pgcache",
 		    sizeof(struct vm_page), NULL, NULL, NULL, NULL,
 		    vm_page_import, vm_page_release, vmd,
@@ -2182,28 +2188,25 @@ vm_page_import(void *arg, void **store, int cnt, int d
 {
 	struct vm_domain *vmd;
 	vm_page_t m;
-	int i;
+	int i, j, n;
 
 	vmd = arg;
 	domain = vmd->vmd_domain;
-	cnt = rounddown2(cnt, 8);
+	n = 64;	/* Starting stride. */
 	vm_domain_free_lock(vmd);
-	for (i = 0; i < cnt; i+=8) {
-		m = vm_phys_alloc_pages(domain, VM_FREELIST_DEFAULT, 3);
-		if (m == NULL)
+	for (i = 0; i < cnt; i+=n) {
+		if (!vm_domain_available(vmd, VM_ALLOC_NORMAL, n))
 			break;
-		store[i] = m;
+		n = vm_phys_alloc_npages(domain, VM_FREELIST_DEFAULT, &m,
+		    MIN(n, cnt-i));
+		if (n == 0)
+			break;
+		for (j = 0; j < n; j++)
+			store[i+j] = m++;
 	}
 	if (i != 0)
 		vm_domain_freecnt_adj(vmd, -i);
 	vm_domain_free_unlock(vmd);
-	cnt = i;
-	for (i = 0; i < cnt; i++) {
-		if ((i % 8) == 0)
-			m = store[i];
-		else
-			store[i] = ++m;
-	}
 
 	return (i);
 }

Modified: user/jeff/numa/sys/vm/vm_phys.c
==============================================================================
--- user/jeff/numa/sys/vm/vm_phys.c	Mon Feb  5 21:29:27 2018	(r328902)
+++ user/jeff/numa/sys/vm/vm_phys.c	Mon Feb  5 22:21:51 2018	(r328903)
@@ -73,14 +73,14 @@ _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
     "Too many physsegs.");
 
 #ifdef NUMA
-struct mem_affinity *mem_affinity;
-int *mem_locality;
+struct mem_affinity __read_mostly *mem_affinity;
+int __read_mostly *mem_locality;
 #endif
 
-int vm_ndomains = 1;
+int __read_mostly vm_ndomains = 1;
 
-struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
-int vm_phys_nsegs;
+struct vm_phys_seg __read_mostly vm_phys_segs[VM_PHYSSEG_MAX];
+int __read_mostly vm_phys_nsegs;
 
 struct vm_phys_fictitious_seg;
 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
@@ -100,18 +100,18 @@ struct vm_phys_fictitious_seg {
 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
     vm_phys_fictitious_cmp);
 
-static struct rwlock vm_phys_fictitious_reg_lock;
+static struct rwlock_padalign vm_phys_fictitious_reg_lock;
 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
 
-static struct vm_freelist
+static struct vm_freelist __aligned(CACHE_LINE_SIZE)
     vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
 
-static int vm_nfreelists;
+static int __read_mostly vm_nfreelists;
 
 /*
  * Provides the mapping from VM_FREELIST_* to free list indices (flind).
  */
-static int vm_freelist_to_flind[VM_NFREELIST];
+static int __read_mostly vm_freelist_to_flind[VM_NFREELIST];
 
 CTASSERT(VM_FREELIST_DEFAULT == 0);
 
@@ -622,6 +622,26 @@ vm_phys_alloc_pages(int domain, int pool, int order)
 			return (m);
 	}
 	return (NULL);
+}
+
+int
+vm_phys_alloc_npages(int domain, int pool, vm_page_t *mp, int cnt)
+{
+	vm_page_t m;
+	int order, freelist;
+
+	for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
+		for (order = fls(cnt) -1; order >= 0; order--) {
+			m = vm_phys_alloc_freelist_pages(domain, freelist,
+			    pool, order);
+			if (m != NULL) {
+				*mp = m;
+				return (1 << order);
+			}
+		}
+	}
+	*mp = NULL;
+	return (0);
 }
 
 /*

Modified: user/jeff/numa/sys/vm/vm_phys.h
==============================================================================
--- user/jeff/numa/sys/vm/vm_phys.h	Mon Feb  5 21:29:27 2018	(r328902)
+++ user/jeff/numa/sys/vm/vm_phys.h	Mon Feb  5 22:21:51 2018	(r328903)
@@ -79,6 +79,7 @@ vm_page_t vm_phys_alloc_contig(int domain, u_long npag
 vm_page_t vm_phys_alloc_freelist_pages(int domain, int freelist, int pool,
     int order);
 vm_page_t vm_phys_alloc_pages(int domain, int pool, int order);
+int vm_phys_alloc_npages(int domain, int pool, vm_page_t *m, int cnt);
 int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high);
 int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
     vm_memattr_t memattr);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201802052221.w15MLphJ012904>