Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 2 Oct 2016 21:13:46 +0000 (UTC)
From:      Mark Johnston <markj@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-user@freebsd.org
Subject:   svn commit: r306595 - in user/alc/PQ_LAUNDRY/sys: sys vm
Message-ID:  <201610022113.u92LDkDY060472@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: markj
Date: Sun Oct  2 21:13:45 2016
New Revision: 306595
URL: https://svnweb.freebsd.org/changeset/base/306595

Log:
  Make the shortfall target smaller and wake the laundry thread on demand.
  
  Rather than waking up periodically, the laundry is now woken up by the
  pagedaemon after an inactive queue scan. If the scan failed to reach its
  target, the laundry thread will attempt to make up the shortfall before
  the next inactive queue scan. This helps avoid excessive laundering in
  scenarios where the inactive queue is not a significant source of
  reclaimable memory (e.g., when most of a system's memory belongs to the ZFS
  ARC) by giving lowmem handlers a chance to help make up for a page shortage
  before setting a laundering target.
  
  Reported by:	pho
  Reviewed by:	alc

Modified:
  user/alc/PQ_LAUNDRY/sys/sys/vmmeter.h
  user/alc/PQ_LAUNDRY/sys/vm/vm_page.h
  user/alc/PQ_LAUNDRY/sys/vm/vm_pageout.c

Modified: user/alc/PQ_LAUNDRY/sys/sys/vmmeter.h
==============================================================================
--- user/alc/PQ_LAUNDRY/sys/sys/vmmeter.h	Sun Oct  2 21:11:25 2016	(r306594)
+++ user/alc/PQ_LAUNDRY/sys/sys/vmmeter.h	Sun Oct  2 21:13:45 2016	(r306595)
@@ -77,6 +77,7 @@ struct vmmeter {
 	u_int v_intrans;	/* (p) intransit blocking page faults */
 	u_int v_reactivated;	/* (p) pages reactivated by the pagedaemon */
 	u_int v_pdwakeups;	/* (p) times daemon has awaken from sleep */
+	u_int v_ltwakeups;	/* (p) times laundry thread has been woken */
 	u_int v_pdpages;	/* (p) pages analyzed by daemon */
 
 	u_int v_tcached;	/* (p) total pages cached */
@@ -112,7 +113,6 @@ struct vmmeter {
 	u_int v_vforkpages;	/* (p) VM pages affected by vfork() */
 	u_int v_rforkpages;	/* (p) VM pages affected by rfork() */
 	u_int v_kthreadpages;	/* (p) VM pages affected by fork() by kernel */
-	u_int v_spare[1];
 };
 #ifdef _KERNEL
 
@@ -193,8 +193,7 @@ static inline int
 vm_laundry_target(void)
 {
 
-	return (vm_cnt.v_inactive_target - vm_cnt.v_inactive_count +
-	    vm_paging_target());
+	return (vm_paging_target());
 }
 
 /*

Modified: user/alc/PQ_LAUNDRY/sys/vm/vm_page.h
==============================================================================
--- user/alc/PQ_LAUNDRY/sys/vm/vm_page.h	Sun Oct  2 21:11:25 2016	(r306594)
+++ user/alc/PQ_LAUNDRY/sys/vm/vm_page.h	Sun Oct  2 21:13:45 2016	(r306595)
@@ -239,6 +239,7 @@ extern struct vm_domain vm_dom[MAXMEMDOM
 
 #define	vm_pagequeue_assert_locked(pq)	mtx_assert(&(pq)->pq_mutex, MA_OWNED)
 #define	vm_pagequeue_lock(pq)		mtx_lock(&(pq)->pq_mutex)
+#define	vm_pagequeue_lockptr(pq)	(&(pq)->pq_mutex)
 #define	vm_pagequeue_unlock(pq)		mtx_unlock(&(pq)->pq_mutex)
 
 #ifdef _KERNEL

Modified: user/alc/PQ_LAUNDRY/sys/vm/vm_pageout.c
==============================================================================
--- user/alc/PQ_LAUNDRY/sys/vm/vm_pageout.c	Sun Oct  2 21:11:25 2016	(r306594)
+++ user/alc/PQ_LAUNDRY/sys/vm/vm_pageout.c	Sun Oct  2 21:13:45 2016	(r306595)
@@ -166,6 +166,12 @@ static int vm_pageout_oom_seq = 12;
 bool vm_pageout_wanted;		/* Event on which pageout daemon sleeps */
 bool vm_pages_needed;		/* Are threads waiting for free pages? */
 
+static enum {
+	VM_LAUNDRY_IDLE,
+	VM_LAUNDRY_BACKGROUND,
+	VM_LAUNDRY_SHORTFALL,
+} vm_laundry_request;		/* Pending request for dirty page laundering. */
+
 #if !defined(NO_SWAPPING)
 static int vm_pageout_req_swapout;	/* XXX */
 static int vm_daemon_needed;
@@ -1105,19 +1111,21 @@ static void
 vm_pageout_laundry_worker(void *arg)
 {
 	struct vm_domain *domain;
+	struct vm_pagequeue *pq;
 	uint64_t nclean, ndirty;
 	u_int last_launder, wakeups;
 	int cycle, domidx, last_target, launder, prev_shortfall, shortfall;
-	int target;
+	int sleeptime, target;
 
 	domidx = (uintptr_t)arg;
 	domain = &vm_dom[domidx];
+	pq = &domain->vmd_pagequeues[PQ_LAUNDRY];
 	KASSERT(domain->vmd_segs != 0, ("domain without segments"));
 	vm_pageout_init_marker(&domain->vmd_laundry_marker, PQ_LAUNDRY);
 
 	cycle = 0;
 	last_launder = 0;
-	prev_shortfall = 0;
+	shortfall = prev_shortfall = 0;
 	target = 0;
 
 	/*
@@ -1133,18 +1141,9 @@ vm_pageout_laundry_worker(void *arg)
 		 * First determine whether we need to launder pages to meet a
 		 * shortage of free pages.
 		 */
-		shortfall = vm_laundry_target() + vm_pageout_deficit;
 		if (shortfall > 0) {
-			/*
-			 * If we're in shortfall and we haven't yet started a
-			 * laundering cycle to get us out of it, begin a run.
-			 * If we're still in shortfall despite a previous
-			 * laundering run, start a new one.
-			 */
-			if (prev_shortfall == 0 || cycle == 0) {
-				target = shortfall;
-				cycle = VM_LAUNDER_RATE;
-			}
+			target = shortfall;
+			cycle = VM_LAUNDER_RATE;
 			prev_shortfall = shortfall;
 		}
 		if (prev_shortfall > 0) {
@@ -1155,7 +1154,7 @@ vm_pageout_laundry_worker(void *arg)
 			 * shortfall, we have no immediate need to launder
 			 * pages.  Otherwise keep laundering.
 			 */
-			if (shortfall <= 0 || cycle == 0) {
+			if (vm_laundry_target() <= 0 || cycle == 0) {
 				prev_shortfall = target = 0;
 			} else {
 				last_launder = wakeups;
@@ -1211,17 +1210,34 @@ vm_pageout_laundry_worker(void *arg)
 		}
 
 dolaundry:
-		if (launder > 0) {
+		if (launder > 0)
 			/*
 			 * Because of I/O clustering, the number of laundered
 			 * pages could exceed "target" by the maximum size of
 			 * a cluster minus one. 
 			 */
 			target -= min(vm_pageout_launder(domain, launder,
-			    prev_shortfall > 0), target);
-		}
-		tsleep(&vm_cnt.v_laundry_count, PVM, "laundr",
-		    hz / VM_LAUNDER_INTERVAL);
+			    shortfall > 0), target);
+
+		/*
+		 * Sleep for a little bit if we're in the middle of a laundering
+		 * run or a pagedaemon thread has signalled us since the last run
+		 * started.  Otherwise, wait for a kick from the pagedaemon.
+		 */
+		vm_pagequeue_lock(pq);
+		if (target > 0 || vm_laundry_request != VM_LAUNDRY_IDLE)
+			sleeptime = hz / VM_LAUNDER_INTERVAL;
+		else
+			sleeptime = 0;
+		(void)mtx_sleep(&vm_laundry_request, vm_pagequeue_lockptr(pq),
+		    PVM, "laundr", sleeptime);
+		if (vm_laundry_request == VM_LAUNDRY_SHORTFALL)
+			shortfall = vm_laundry_target() + vm_pageout_deficit;
+		else
+			shortfall = 0;
+		if (target == 0)
+			vm_laundry_request = VM_LAUNDRY_IDLE;
+		vm_pagequeue_unlock(pq);
 	}
 }
 
@@ -1235,7 +1251,7 @@ static void
 vm_pageout_scan(struct vm_domain *vmd, int pass)
 {
 	vm_page_t m, next;
-	struct vm_pagequeue *pq;
+	struct vm_pagequeue *pq, *laundryq;
 	vm_object_t object;
 	long min_scan;
 	int act_delta, addl_page_shortage, deficit, maxscan;
@@ -1456,11 +1472,22 @@ drop_page:
 	vm_pagequeue_unlock(pq);
 
 	/*
-	 * Wakeup the laundry thread(s) if we didn't free the targeted number
-	 * of pages.
-	 */
-	if (page_shortage > 0)
-		wakeup(&vm_cnt.v_laundry_count);
+	 * Wake up the laundry thread so that it can perform any needed
+	 * laundering.  If we didn't meet our target, we're in shortfall and
+	 * need to launder more aggressively.
+	 */
+	if (vm_laundry_request == VM_LAUNDRY_IDLE &&
+	    starting_page_shortage > 0) {
+		laundryq = &vm_dom[0].vmd_pagequeues[PQ_LAUNDRY];
+		vm_pagequeue_lock(laundryq);
+		if (page_shortage > 0)
+			vm_laundry_request = VM_LAUNDRY_SHORTFALL;
+		else if (vm_laundry_request != VM_LAUNDRY_SHORTFALL)
+			vm_laundry_request = VM_LAUNDRY_BACKGROUND;
+		wakeup(&vm_laundry_request);
+		vm_pagequeue_unlock(laundryq);
+		PCPU_INC(cnt.v_ltwakeups);
+	}
 
 #if !defined(NO_SWAPPING)
 	/*



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201610022113.u92LDkDY060472>