Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 19 Feb 2020 09:10:11 +0000 (UTC)
From:      Jeff Roberson <jeff@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r358098 - head/sys/vm
Message-ID:  <202002190910.01J9ABx9092808@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: jeff
Date: Wed Feb 19 09:10:11 2020
New Revision: 358098
URL: https://svnweb.freebsd.org/changeset/base/358098

Log:
  Don't release xbusy on kmem pages.  After lockless page lookup we will not
  be able to guarantee that they can be racquired without blocking.
  
  Reviewed by:	kib
  Discussed with:	markj
  Differential Revision:	https://reviews.freebsd.org/D23506

Modified:
  head/sys/vm/vm_glue.c
  head/sys/vm/vm_kern.c
  head/sys/vm/vm_page.h
  head/sys/vm/vm_swapout.c

Modified: head/sys/vm/vm_glue.c
==============================================================================
--- head/sys/vm/vm_glue.c	Wed Feb 19 08:17:27 2020	(r358097)
+++ head/sys/vm/vm_glue.c	Wed Feb 19 09:10:11 2020	(r358098)
@@ -342,10 +342,8 @@ vm_thread_stack_create(struct domainset *ds, vm_object
 	VM_OBJECT_WLOCK(ksobj);
 	(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED,
 	    ma, pages);
-	for (i = 0; i < pages; i++) {
+	for (i = 0; i < pages; i++)
 		vm_page_valid(ma[i]);
-		vm_page_xunbusy(ma[i]);
-	}
 	VM_OBJECT_WUNLOCK(ksobj);
 	pmap_qenter(ks, ma, pages);
 	*ksobjp = ksobj;
@@ -365,7 +363,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t
 		m = vm_page_lookup(ksobj, i);
 		if (m == NULL)
 			panic("%s: kstack already missing?", __func__);
-		vm_page_busy_acquire(m, 0);
+		vm_page_xbusy_claim(m);
 		vm_page_unwire_noq(m);
 		vm_page_free(m);
 	}

Modified: head/sys/vm/vm_kern.c
==============================================================================
--- head/sys/vm/vm_kern.c	Wed Feb 19 08:17:27 2020	(r358097)
+++ head/sys/vm/vm_kern.c	Wed Feb 19 09:10:11 2020	(r358098)
@@ -224,7 +224,6 @@ retry:
 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
 		vm_page_valid(m);
-		vm_page_xunbusy(m);
 		pmap_enter(kernel_pmap, addr + i, m, prot,
 		    prot | PMAP_ENTER_WIRED, 0);
 	}
@@ -317,7 +316,6 @@ retry:
 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
 		vm_page_valid(m);
-		vm_page_xunbusy(m);
 		pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW,
 		    VM_PROT_RW | PMAP_ENTER_WIRED, 0);
 		tmp += PAGE_SIZE;
@@ -501,7 +499,6 @@ retry:
 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
 		    ("kmem_malloc: page %p is managed", m));
 		vm_page_valid(m);
-		vm_page_xunbusy(m);
 		pmap_enter(kernel_pmap, addr + i, m, prot,
 		    prot | PMAP_ENTER_WIRED, 0);
 #if VM_NRESERVLEVEL > 0
@@ -591,7 +588,7 @@ _kmem_unback(vm_object_t object, vm_offset_t addr, vm_
 #endif
 	for (; offset < end; offset += PAGE_SIZE, m = next) {
 		next = vm_page_next(m);
-		vm_page_busy_acquire(m, 0);
+		vm_page_xbusy_claim(m);
 		vm_page_unwire_noq(m);
 		vm_page_free(m);
 	}

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h	Wed Feb 19 08:17:27 2020	(r358097)
+++ head/sys/vm/vm_page.h	Wed Feb 19 09:10:11 2020	(r358098)
@@ -764,9 +764,14 @@ void vm_page_object_busy_assert(vm_page_t m);
 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
 #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)				\
 	vm_page_assert_pga_writeable(m, bits)
+#define	vm_page_xbusy_claim(m) do {					\
+	vm_page_assert_xbusied_unchecked((m));				\
+	(m)->busy_lock = VPB_CURTHREAD_EXCLUSIVE;			\
+} while (0)
 #else
 #define	VM_PAGE_OBJECT_BUSY_ASSERT(m)	(void)0
 #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)	(void)0
+#define	vm_page_xbusy_claim(m)
 #endif
 
 #if BYTE_ORDER == BIG_ENDIAN

Modified: head/sys/vm/vm_swapout.c
==============================================================================
--- head/sys/vm/vm_swapout.c	Wed Feb 19 08:17:27 2020	(r358097)
+++ head/sys/vm/vm_swapout.c	Wed Feb 19 09:10:11 2020	(r358098)
@@ -540,6 +540,7 @@ vm_thread_swapout(struct thread *td)
 		if (m == NULL)
 			panic("vm_thread_swapout: kstack already missing?");
 		vm_page_dirty(m);
+		vm_page_xunbusy_unchecked(m);
 		vm_page_unwire(m, PQ_LAUNDRY);
 	}
 	VM_OBJECT_WUNLOCK(ksobj);
@@ -564,7 +565,6 @@ vm_thread_swapin(struct thread *td, int oom_alloc)
 	for (i = 0; i < pages;) {
 		vm_page_assert_xbusied(ma[i]);
 		if (vm_page_all_valid(ma[i])) {
-			vm_page_xunbusy(ma[i]);
 			i++;
 			continue;
 		}
@@ -581,8 +581,6 @@ vm_thread_swapin(struct thread *td, int oom_alloc)
 		KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
 		    __func__, td->td_proc->p_pid));
 		vm_object_pip_wakeup(ksobj);
-		for (j = i; j < i + count; j++)
-			vm_page_xunbusy(ma[j]);
 		i += count;
 	}
 	pmap_qenter(td->td_kstack, ma, pages);



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202002190910.01J9ABx9092808>