Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 8 Feb 2009 20:23:46 +0000 (UTC)
From:      Konstantin Belousov <kib@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r188331 - head/sys/vm
Message-ID:  <200902082023.n18KNkdV033402@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: kib
Date: Sun Feb  8 20:23:46 2009
New Revision: 188331
URL: http://svn.freebsd.org/changeset/base/188331

Log:
  Do not sleep for vnode lock while holding map lock in vm_fault. Try to
  acquire vnode lock for OBJT_VNODE object after map lock is dropped.
  Because we have the busy page(s) in the object, sleeping there would
  result in deadlock with vnode resize. Try to get lock without sleeping,
  and, if the attempt failed, drop the state, lock the vnode, and restart
  the fault handler from the start with already locked vnode.
  
  Because the vnode_pager_lock() function is inlined in vm_fault(),
  axe it.
  
  Based on suggestion by:	alc
  Reviewed by:	tegge, alc
  Tested by:	pho

Modified:
  head/sys/vm/vm_fault.c
  head/sys/vm/vnode_pager.c
  head/sys/vm/vnode_pager.h

Modified: head/sys/vm/vm_fault.c
==============================================================================
--- head/sys/vm/vm_fault.c	Sun Feb  8 20:19:19 2009	(r188330)
+++ head/sys/vm/vm_fault.c	Sun Feb  8 20:23:46 2009	(r188331)
@@ -130,6 +130,7 @@ struct faultstate {
 	vm_map_entry_t entry;
 	int lookup_still_valid;
 	struct vnode *vp;
+	int vfslocked;
 };
 
 static inline void
@@ -171,13 +172,11 @@ unlock_and_deallocate(struct faultstate 
 	vm_object_deallocate(fs->first_object);
 	unlock_map(fs);	
 	if (fs->vp != NULL) { 
-		int vfslocked;
-
-		vfslocked = VFS_LOCK_GIANT(fs->vp->v_mount);
 		vput(fs->vp);
 		fs->vp = NULL;
-		VFS_UNLOCK_GIANT(vfslocked);
 	}
+	VFS_UNLOCK_GIANT(fs->vfslocked);
+	fs->vfslocked = 0;
 }
 
 /*
@@ -218,12 +217,17 @@ vm_fault(vm_map_t map, vm_offset_t vaddr
 	vm_object_t next_object;
 	vm_page_t marray[VM_FAULT_READ];
 	int hardfault;
-	int faultcount;
+	int faultcount, ahead, behind;
 	struct faultstate fs;
+	struct vnode *vp;
+	int locked, error;
 
 	hardfault = 0;
 	growstack = TRUE;
 	PCPU_INC(cnt.v_vm_faults);
+	fs.vp = NULL;
+	fs.vfslocked = 0;
+	faultcount = behind = 0;
 
 RetryFault:;
 
@@ -289,21 +293,9 @@ RetryFault:;
 	 * Bump the paging-in-progress count to prevent size changes (e.g. 
 	 * truncation operations) during I/O.  This must be done after
 	 * obtaining the vnode lock in order to avoid possible deadlocks.
-	 *
-	 * XXX vnode_pager_lock() can block without releasing the map lock.
 	 */
-	if (fs.first_object->flags & OBJ_NEEDGIANT)
-		mtx_lock(&Giant);
 	VM_OBJECT_LOCK(fs.first_object);
 	vm_object_reference_locked(fs.first_object);
-	fs.vp = vnode_pager_lock(fs.first_object);
-	KASSERT(fs.vp == NULL || !fs.map->system_map,
-	    ("vm_fault: vnode-backed object mapped by system map"));
-	KASSERT((fs.first_object->flags & OBJ_NEEDGIANT) == 0 ||
-	    !fs.map->system_map,
-	    ("vm_fault: Object requiring giant mapped by system map"));
-	if (fs.first_object->flags & OBJ_NEEDGIANT)
-		mtx_unlock(&Giant);
 	vm_object_pip_add(fs.first_object, 1);
 
 	fs.lookup_still_valid = TRUE;
@@ -380,14 +372,6 @@ RetryFault:;
 					fs.first_m = NULL;
 				}
 				unlock_map(&fs);
-				if (fs.vp != NULL) {
-					int vfslck;
-
-					vfslck = VFS_LOCK_GIANT(fs.vp->v_mount);
-					vput(fs.vp);
-					fs.vp = NULL;
-					VFS_UNLOCK_GIANT(vfslck);
-				}
 				VM_OBJECT_LOCK(fs.object);
 				if (fs.m == vm_page_lookup(fs.object,
 				    fs.pindex)) {
@@ -441,7 +425,9 @@ RetryFault:;
 				}
 #endif
 				fs.m = vm_page_alloc(fs.object, fs.pindex,
-				    (fs.vp || fs.object->backing_object)? VM_ALLOC_NORMAL: VM_ALLOC_ZERO);
+				    (fs.object->type == OBJT_VNODE ||
+				     fs.object->backing_object != NULL) ?
+				    VM_ALLOC_NORMAL : VM_ALLOC_ZERO);
 			}
 			if (fs.m == NULL) {
 				unlock_and_deallocate(&fs);
@@ -464,7 +450,6 @@ readrest:
 		if (TRYPAGER) {
 			int rv;
 			int reqpage = 0;
-			int ahead, behind;
 			u_char behavior = vm_map_entry_behavior(fs.entry);
 
 			if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
@@ -527,6 +512,65 @@ readrest:
 			}
 			if (is_first_object_locked)
 				VM_OBJECT_UNLOCK(fs.first_object);
+
+			/*
+			 * Call the pager to retrieve the data, if any, after
+			 * releasing the lock on the map.  We hold a ref on
+			 * fs.object and the pages are VPO_BUSY'd.
+			 */
+			unlock_map(&fs);
+
+vnode_lock:
+			if (fs.object->type == OBJT_VNODE) {
+				vp = fs.object->handle;
+				if (vp == fs.vp)
+					goto vnode_locked;
+				else if (fs.vp != NULL) {
+					vput(fs.vp);
+					fs.vp = NULL;
+				}
+				locked = VOP_ISLOCKED(vp);
+
+				if (VFS_NEEDSGIANT(vp->v_mount) && !fs.vfslocked) {
+					fs.vfslocked = 1;
+					if (!mtx_trylock(&Giant)) {
+						VM_OBJECT_UNLOCK(fs.object);
+						mtx_lock(&Giant);
+						VM_OBJECT_LOCK(fs.object);
+						goto vnode_lock;
+					}
+				}
+				if (locked != LK_EXCLUSIVE)
+					locked = LK_SHARED;
+				/* Do not sleep for vnode lock while fs.m is busy */
+				error = vget(vp, locked | LK_CANRECURSE |
+				    LK_NOWAIT, curthread);
+				if (error != 0) {
+					int vfslocked;
+
+					vfslocked = fs.vfslocked;
+					fs.vfslocked = 0; /* Keep Giant */
+					vhold(vp);
+					release_page(&fs);
+					unlock_and_deallocate(&fs);
+					error = vget(vp, locked | LK_RETRY |
+					    LK_CANRECURSE, curthread);
+					vdrop(vp);
+					fs.vp = vp;
+					fs.vfslocked = vfslocked;
+					KASSERT(error == 0,
+					    ("vm_fault: vget failed"));
+					goto RetryFault;
+				}
+				fs.vp = vp;
+			}
+vnode_locked:
+			KASSERT(fs.vp == NULL || !fs.map->system_map,
+			    ("vm_fault: vnode-backed object mapped by system map"));
+			KASSERT((fs.first_object->flags & OBJ_NEEDGIANT) == 0 ||
+			    !fs.map->system_map,
+			    ("vm_fault: Object requiring giant mapped by system map"));
+
 			/*
 			 * now we find out if any other pages should be paged
 			 * in at this time this routine checks to see if the
@@ -547,22 +591,6 @@ readrest:
 			faultcount = vm_fault_additional_pages(
 			    fs.m, behind, ahead, marray, &reqpage);
 
-			/*
-			 * update lastr imperfectly (we do not know how much
-			 * getpages will actually read), but good enough.
-			 *
-			 * XXX The following assignment modifies the map
-			 * without holding a write lock on it.
-			 */
-			fs.entry->lastr = fs.pindex + faultcount - behind;
-
-			/*
-			 * Call the pager to retrieve the data, if any, after
-			 * releasing the lock on the map.  We hold a ref on
-			 * fs.object and the pages are VPO_BUSY'd.
-			 */
-			unlock_map(&fs);
-
 			rv = faultcount ?
 			    vm_pager_get_pages(fs.object, marray, faultcount,
 				reqpage) : VM_PAGER_FAIL;
@@ -839,6 +867,15 @@ readrest:
 			prot &= retry_prot;
 		}
 	}
+	/*
+	 * update lastr imperfectly (we do not know how much
+	 * getpages will actually read), but good enough.
+	 *
+	 * XXX The following assignment modifies the map
+	 * without holding a write lock on it.
+	 */
+	fs.entry->lastr = fs.pindex + faultcount - behind;
+
 	if (prot & VM_PROT_WRITE) {
 		vm_object_set_writeable_dirty(fs.object);
 

Modified: head/sys/vm/vnode_pager.c
==============================================================================
--- head/sys/vm/vnode_pager.c	Sun Feb  8 20:19:19 2009	(r188330)
+++ head/sys/vm/vnode_pager.c	Sun Feb  8 20:23:46 2009	(r188331)
@@ -1174,56 +1174,3 @@ vnode_pager_generic_putpages(vp, m, byte
 	}
 	return rtvals[0];
 }
-
-struct vnode *
-vnode_pager_lock(vm_object_t first_object)
-{
-	struct vnode *vp;
-	vm_object_t backing_object, object;
-	int locked, lockf;
-
-	VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
-	for (object = first_object; object != NULL; object = backing_object) {
-		if (object->type != OBJT_VNODE) {
-			if ((backing_object = object->backing_object) != NULL)
-				VM_OBJECT_LOCK(backing_object);
-			if (object != first_object)
-				VM_OBJECT_UNLOCK(object);
-			continue;
-		}
-	retry:
-		if (object->flags & OBJ_DEAD) {
-			if (object != first_object)
-				VM_OBJECT_UNLOCK(object);
-			return NULL;
-		}
-		vp = object->handle;
-		locked = VOP_ISLOCKED(vp);
-		VI_LOCK(vp);
-		VM_OBJECT_UNLOCK(object);
-		if (first_object != object)
-			VM_OBJECT_UNLOCK(first_object);
-		VFS_ASSERT_GIANT(vp->v_mount);
-		if (locked == LK_EXCLUSIVE)
-			lockf = LK_CANRECURSE | LK_INTERLOCK | LK_RETRY |
-			    LK_EXCLUSIVE;
-		else
-			lockf = LK_CANRECURSE | LK_INTERLOCK | LK_RETRY |
-			    LK_SHARED;
-		if (vget(vp, lockf, curthread)) {
-			VM_OBJECT_LOCK(first_object);
-			if (object != first_object)
-				VM_OBJECT_LOCK(object);
-			if (object->type != OBJT_VNODE) {
-				if (object != first_object)
-					VM_OBJECT_UNLOCK(object);
-				return NULL;
-			}
-			printf("vnode_pager_lock: retrying\n");
-			goto retry;
-		}
-		VM_OBJECT_LOCK(first_object);
-		return (vp);
-	}
-	return NULL;
-}

Modified: head/sys/vm/vnode_pager.h
==============================================================================
--- head/sys/vm/vnode_pager.h	Sun Feb  8 20:19:19 2009	(r188330)
+++ head/sys/vm/vnode_pager.h	Sun Feb  8 20:23:46 2009	(r188331)
@@ -39,7 +39,6 @@
 #define	_VNODE_PAGER_	1
 
 #ifdef _KERNEL
-struct vnode *vnode_pager_lock(vm_object_t);
 
 /*
  * XXX Generic routines; currently called by badly written FS code; these



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200902082023.n18KNkdV033402>