Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 18 Mar 2018 16:40:57 +0000 (UTC)
From:      Mark Johnston <markj@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r331130 - in head/sys: kern vm
Message-ID:  <201803181640.w2IGevY0066821@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: markj
Date: Sun Mar 18 16:40:56 2018
New Revision: 331130
URL: https://svnweb.freebsd.org/changeset/base/331130

Log:
  Have vm_page_{deactivate,launder}() requeue already-queued pages.
  
  In many cases the page is not enqueued so the change will have no
  effect. However, the change is needed to support an optimization in
  the fault handler and in some cases (sendfile, the buffer cache) it
  was being emulated by the caller anyway.
  
  Reviewed by:	alc
  Tested by:	pho
  MFC after:	2 weeks
  X-Differential Revision: https://reviews.freebsd.org/D14625

Modified:
  head/sys/kern/kern_sendfile.c
  head/sys/kern/vfs_bio.c
  head/sys/vm/vm_fault.c
  head/sys/vm/vm_page.c

Modified: head/sys/kern/kern_sendfile.c
==============================================================================
--- head/sys/kern/kern_sendfile.c	Sun Mar 18 16:36:14 2018	(r331129)
+++ head/sys/kern/kern_sendfile.c	Sun Mar 18 16:40:56 2018	(r331130)
@@ -167,10 +167,8 @@ sendfile_free_page(vm_page_t pg, bool nocache)
 					vm_page_deactivate_noreuse(pg);
 				else if (pg->queue == PQ_ACTIVE)
 					vm_page_reference(pg);
-				else if (pg->queue != PQ_INACTIVE)
-					vm_page_deactivate(pg);
 				else
-					vm_page_requeue(pg);
+					vm_page_deactivate(pg);
 			}
 		}
 	}

Modified: head/sys/kern/vfs_bio.c
==============================================================================
--- head/sys/kern/vfs_bio.c	Sun Mar 18 16:36:14 2018	(r331129)
+++ head/sys/kern/vfs_bio.c	Sun Mar 18 16:40:56 2018	(r331130)
@@ -2936,10 +2936,8 @@ vfs_vmio_unwire(struct buf *bp, vm_page_t m)
 				vm_page_deactivate_noreuse(m);
 			else if (m->queue == PQ_ACTIVE)
 				vm_page_reference(m);
-			else if (m->queue != PQ_INACTIVE)
-				vm_page_deactivate(m);
 			else
-				vm_page_requeue(m);
+				vm_page_deactivate(m);
 		}
 	}
 	vm_page_unlock(m);

Modified: head/sys/vm/vm_fault.c
==============================================================================
--- head/sys/vm/vm_fault.c	Sun Mar 18 16:36:14 2018	(r331129)
+++ head/sys/vm/vm_fault.c	Sun Mar 18 16:40:56 2018	(r331130)
@@ -1379,7 +1379,8 @@ vm_fault_dontneed(const struct faultstate *fs, vm_offs
 				 * active queue.
 				 */
 				vm_page_lock(m);
-				vm_page_deactivate(m);
+				if (!vm_page_inactive(m))
+					vm_page_deactivate(m);
 				vm_page_unlock(m);
 			}
 		}

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Sun Mar 18 16:36:14 2018	(r331129)
+++ head/sys/vm/vm_page.c	Sun Mar 18 16:40:56 2018	(r331130)
@@ -3363,7 +3363,8 @@ vm_page_unwire_noq(vm_page_t m)
 }
 
 /*
- * Move the specified page to the inactive queue.
+ * Move the specified page to the inactive queue, or requeue the page if it is
+ * already in the inactive queue.
  *
  * Normally, "noreuse" is FALSE, resulting in LRU ordering of the inactive
  * queue.  However, setting "noreuse" to TRUE will accelerate the specified
@@ -3381,15 +3382,10 @@ _vm_page_deactivate(vm_page_t m, boolean_t noreuse)
 
 	vm_page_assert_locked(m);
 
-	/*
-	 * Ignore if the page is already inactive, unless it is unlikely to be
-	 * reactivated.
-	 */
-	if ((queue = m->queue) == PQ_INACTIVE && !noreuse)
-		return;
 	if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
 		pq = &vm_pagequeue_domain(m)->vmd_pagequeues[PQ_INACTIVE];
 		/* Avoid multiple acquisitions of the inactive queue lock. */
+		queue = m->queue;
 		if (queue == PQ_INACTIVE) {
 			vm_pagequeue_lock(pq);
 			vm_page_dequeue_locked(m);
@@ -3411,7 +3407,8 @@ _vm_page_deactivate(vm_page_t m, boolean_t noreuse)
 }
 
 /*
- * Move the specified page to the inactive queue.
+ * Move the specified page to the inactive queue, or requeue the page if it is
+ * already in the inactive queue.
  *
  * The page must be locked.
  */
@@ -3438,19 +3435,20 @@ vm_page_deactivate_noreuse(vm_page_t m)
 /*
  * vm_page_launder
  *
- * 	Put a page in the laundry.
+ * 	Put a page in the laundry, or requeue it if it is already there.
  */
 void
 vm_page_launder(vm_page_t m)
 {
-	int queue;
 
 	vm_page_assert_locked(m);
-	if ((queue = m->queue) != PQ_LAUNDRY && m->wire_count == 0 &&
-	    (m->oflags & VPO_UNMANAGED) == 0) {
-		if (queue != PQ_NONE)
-			vm_page_dequeue(m);
-		vm_page_enqueue(PQ_LAUNDRY, m);
+	if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
+		if (m->queue == PQ_LAUNDRY)
+			vm_page_requeue(m);
+		else {
+			vm_page_remque(m);
+			vm_page_enqueue(PQ_LAUNDRY, m);
+		}
 	}
 }
 
@@ -3540,7 +3538,7 @@ vm_page_advise(vm_page_t m, int advice)
 	 */
 	if (m->dirty == 0)
 		vm_page_deactivate_noreuse(m);
-	else
+	else if (!vm_page_in_laundry(m))
 		vm_page_launder(m);
 }
 



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201803181640.w2IGevY0066821>