Skip site navigation (1)Skip section navigation (2)
Date:      Sat, 23 Mar 2002 19:33:10 -0500 (EST)
From:      Robert Watson <rwatson@FreeBSD.org>
To:        "Bruce A. Mah" <bmah@FreeBSD.org>
Cc:        Perforce Change Reviews <perforce@FreeBSD.org>
Subject:   Re: PERFORCE change 8292 for review
Message-ID:  <Pine.NEB.3.96L.1020323193252.47668O-100000@fledge.watson.org>
In-Reply-To: <200203232317.g2NNHfQ14271@freefall.freebsd.org>

next in thread | previous in thread | raw e-mail | index | archive | help
I wonder if it would make sense to update $FreeBSD$ also..  I'm not sure.

Robert N M Watson             FreeBSD Core Team, TrustedBSD Project
robert@fledge.watson.org      NAI Labs, Safeport Network Services

On Sat, 23 Mar 2002, Bruce A. Mah wrote:

> http://people.freebsd.org/~peter/p4db/chv.cgi?CH=8292
> 
> Change 8292 by bmah@bmah_tomcat on 2002/03/23 15:17:17
> 
> 	MFCVS:  Backout greenvm.
> 	
> 	vm_fault.c	1.131->1.132
> 	vm_glue.c	1.130->1.131
> 	vm_map.c	1.216->1.217
> 	vm_map.h	1.71->1.72
> 	vm_pageout.c	1.191->1.192
> 	vm_zone.c	1.53->1.54
> 
> Affected files ...
> 
> ... //depot/releng/5_dp1/src/sys/vm/vm_fault.c#2 edit
> ... //depot/releng/5_dp1/src/sys/vm/vm_glue.c#2 edit
> ... //depot/releng/5_dp1/src/sys/vm/vm_map.c#2 edit
> ... //depot/releng/5_dp1/src/sys/vm/vm_map.h#2 edit
> ... //depot/releng/5_dp1/src/sys/vm/vm_pageout.c#2 edit
> ... //depot/releng/5_dp1/src/sys/vm/vm_zone.c#2 edit
> 
> Differences ...
> 
> ==== //depot/releng/5_dp1/src/sys/vm/vm_fault.c#2 (text+ko) ====
> 
> @@ -111,11 +111,7 @@
>  	vm_pindex_t first_pindex;
>  	vm_map_t map;
>  	vm_map_entry_t entry;
> -	enum {
> -		LSV_FALSE,	/* the lookup's lock has been dropped */
> -		LSV_TRUE,	/* the lookup's lock is still valid */
> -		LSV_UPGRADED	/* the lookup's lock is now exclusive */
> -	} lookup_still_valid;
> +	int lookup_still_valid;
>  	struct vnode *vp;
>  };
>  
> @@ -130,11 +126,9 @@
>  static __inline void
>  unlock_map(struct faultstate *fs)
>  {
> -	if (fs->lookup_still_valid != LSV_FALSE) {
> -		if (fs->lookup_still_valid == LSV_UPGRADED)
> -			vm_map_lock_downgrade(fs->map);
> +	if (fs->lookup_still_valid) {
>  		vm_map_lookup_done(fs->map, fs->entry);
> -		fs->lookup_still_valid = LSV_FALSE;
> +		fs->lookup_still_valid = FALSE;
>  	}
>  }
>  
> @@ -288,7 +282,7 @@
>  			fs.first_pindex, fs.first_pindex + 1);
>  	}
>  
> -	fs.lookup_still_valid = LSV_TRUE;
> +	fs.lookup_still_valid = TRUE;
>  
>  	if (wired)
>  		fault_type = prot;
> @@ -662,11 +656,11 @@
>  				/*
>  				 * grab the lock if we need to
>  				 */
> -				(fs.lookup_still_valid != LSV_FALSE ||
> -				 vm_map_try_lock(fs.map) == 0)
> +				(fs.lookup_still_valid ||
> +				 lockmgr(&fs.map->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curthread) == 0)
>  			    ) {
> -				if (fs.lookup_still_valid == LSV_FALSE)
> -					fs.lookup_still_valid = LSV_UPGRADED;
> +				
> +				fs.lookup_still_valid = 1;
>  				/*
>  				 * get rid of the unnecessary page
>  				 */
> @@ -721,7 +715,7 @@
>  	 * We must verify that the maps have not changed since our last
>  	 * lookup.
>  	 */
> -	if (fs.lookup_still_valid == LSV_FALSE &&
> +	if (!fs.lookup_still_valid &&
>  		(fs.map->timestamp != map_generation)) {
>  		vm_object_t retry_object;
>  		vm_pindex_t retry_pindex;
> @@ -770,7 +764,7 @@
>  			unlock_and_deallocate(&fs);
>  			return (result);
>  		}
> -		fs.lookup_still_valid = LSV_TRUE;
> +		fs.lookup_still_valid = TRUE;
>  
>  		if ((retry_object != fs.first_object) ||
>  		    (retry_pindex != fs.first_pindex)) {
> 
> ==== //depot/releng/5_dp1/src/sys/vm/vm_glue.c#2 (text+ko) ====
> 
> @@ -573,7 +573,9 @@
>  			 * data structures there is a
>  			 * possible deadlock.
>  			 */
> -			if (vm_map_try_lock(&vm->vm_map)) {
> +			if (lockmgr(&vm->vm_map.lock,
> +					LK_EXCLUSIVE | LK_NOWAIT,
> +					NULL, curthread)) {
>  				vmspace_free(vm);
>  				PROC_UNLOCK(p);
>  				goto nextproc;
> 
> ==== //depot/releng/5_dp1/src/sys/vm/vm_map.c#2 (text+ko) ====
> 
> @@ -277,61 +277,73 @@
>  }                       
>  
>  void
> -_vm_map_lock(vm_map_t map, const char *file, int line)
> +vm_map_lock(vm_map_t map)
>  {
>  	vm_map_printf("locking map LK_EXCLUSIVE: %p\n", map);
> -	_sx_xlock(&map->lock, file, line);
> +	if (lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread) != 0)
> +		panic("vm_map_lock: failed to get lock");
>  	map->timestamp++;
>  }
>  
> -int
> -_vm_map_try_lock(vm_map_t map, const char *file, int line)
> -{
> -	vm_map_printf("trying to lock map LK_EXCLUSIVE: %p\n", map);
> -	if (_sx_try_xlock(&map->lock, file, line)) {
> -		map->timestamp++;
> -		return (0);
> -	}
> -	return (EWOULDBLOCK);
> -}
> -
>  void
> -_vm_map_unlock(vm_map_t map, const char *file, int line)
> +vm_map_unlock(vm_map_t map)
>  {
>  	vm_map_printf("locking map LK_RELEASE: %p\n", map);
> -	_sx_xunlock(&map->lock, file, line);
> +	lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
>  }
>  
>  void
> -_vm_map_lock_read(vm_map_t map, const char *file, int line)
> +vm_map_lock_read(vm_map_t map)
>  {
>  	vm_map_printf("locking map LK_SHARED: %p\n", map);
> -	_sx_slock(&map->lock, file, line);
> +	lockmgr(&(map)->lock, LK_SHARED, NULL, curthread);
>  }
>  
>  void
> -_vm_map_unlock_read(vm_map_t map, const char *file, int line)
> +vm_map_unlock_read(vm_map_t map)
>  {
>  	vm_map_printf("locking map LK_RELEASE: %p\n", map);
> -	_sx_sunlock(&map->lock, file, line);
> +	lockmgr(&(map)->lock, LK_RELEASE, NULL, curthread);
> +}
> +
> +static __inline__ int
> +_vm_map_lock_upgrade(vm_map_t map, struct thread *td) {
> +	int error;
> +
> +	vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map); 
> +	error = lockmgr(&map->lock, LK_EXCLUPGRADE, NULL, td);
> +	if (error == 0)
> +		map->timestamp++;
> +	return error;
>  }
>  
>  int
> -_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
> +vm_map_lock_upgrade(vm_map_t map)
>  {
> -	vm_map_printf("locking map LK_EXCLUPGRADE: %p\n", map); 
> -	if (_sx_try_upgrade(&map->lock, file, line)) {
> -		map->timestamp++;
> -		return (0);
> -	}
> -	return (EWOULDBLOCK);
> +    return (_vm_map_lock_upgrade(map, curthread));
>  }
>  
>  void
> -_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
> +vm_map_lock_downgrade(vm_map_t map)
>  {
>  	vm_map_printf("locking map LK_DOWNGRADE: %p\n", map);
> -	_sx_downgrade(&map->lock, file, line);
> +	lockmgr(&map->lock, LK_DOWNGRADE, NULL, curthread);
> +}
> +
> +void
> +vm_map_set_recursive(vm_map_t map)
> +{
> +	mtx_lock((map)->lock.lk_interlock);
> +	map->lock.lk_flags |= LK_CANRECURSE;
> +	mtx_unlock((map)->lock.lk_interlock);
> +}
> +
> +void
> +vm_map_clear_recursive(vm_map_t map)
> +{
> +	mtx_lock((map)->lock.lk_interlock);
> +	map->lock.lk_flags &= ~LK_CANRECURSE;
> +	mtx_unlock((map)->lock.lk_interlock);
>  }
>  
>  vm_offset_t
> @@ -405,7 +417,7 @@
>  	map->first_free = &map->header;
>  	map->hint = &map->header;
>  	map->timestamp = 0;
> -	sx_init(&map->lock, "thrd_sleep");
> +	lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
>  }
>  
>  void
> @@ -413,7 +425,7 @@
>  	struct vm_map *map;
>  {
>  	GIANT_REQUIRED;
> -	sx_destroy(&map->lock);
> +	lockdestroy(&map->lock);
>  }
>  
>  /*
> @@ -1470,13 +1482,17 @@
>  			eend = entry->end;
>  
>  			/* First we need to allow map modifications */
> +			vm_map_set_recursive(map);
> +			vm_map_lock_downgrade(map);
>  			map->timestamp++;
>  
>  			rv = vm_fault_user_wire(map, entry->start, entry->end);
>  			if (rv) {
> -				vm_map_lock(map);
> +
>  				entry->wired_count--;
>  				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
> +
> +				vm_map_clear_recursive(map);
>  				vm_map_unlock(map);
>  			
>  				/*
> @@ -1490,14 +1506,8 @@
>  				return rv;
>  			}
>  
> -			/*
> -			 * XXX- This is only okay because we have the
> -			 * Giant lock.  If the VM system were to be
> -			 * reentrant, we'd know that we really can't 
> -			 * do this.  Still, this behavior is no worse
> -			 * than the old recursion...
> -			 */
> -			if (vm_map_try_lock(map)) {
> +			vm_map_clear_recursive(map);
> +			if (vm_map_lock_upgrade(map)) {
>  				vm_map_lock(map);
>  				if (vm_map_lookup_entry(map, estart, &entry) 
>  				    == FALSE) {
> @@ -1735,13 +1745,13 @@
>  			entry = entry->next;
>  		}
>  
> +		if (vm_map_pmap(map) == kernel_pmap) {
> +			vm_map_lock(map);
> +		}
>  		if (rv) {
> -			if (vm_map_pmap(map) != kernel_pmap)
> -				vm_map_unlock_read(map);
> +			vm_map_unlock(map);
>  			(void) vm_map_pageable(map, start, failed, TRUE);
>  			return (rv);
> -		} else if (vm_map_pmap(map) == kernel_pmap) {
> -			vm_map_lock(map);
>  		}
>  		/*
>  		 * An exclusive lock on the map is needed in order to call
> @@ -1750,7 +1760,6 @@
>  		 */
>  		if (vm_map_pmap(map) != kernel_pmap &&
>  		    vm_map_lock_upgrade(map)) {
> -			vm_map_unlock_read(map);
>  			vm_map_lock(map);
>  			if (vm_map_lookup_entry(map, start, &start_entry) ==
>  			    FALSE) {
> @@ -2522,10 +2531,8 @@
>  	 * might have intended by limiting the stack size.
>  	 */
>  	if (grow_amount > stack_entry->start - end) {
> -		if (vm_map_lock_upgrade(map)) {
> -			vm_map_unlock_read(map);
> +		if (vm_map_lock_upgrade(map))
>  			goto Retry;
> -		}
>  
>  		stack_entry->avail_ssize = stack_entry->start - end;
>  
> @@ -2555,10 +2562,8 @@
>  		              ctob(vm->vm_ssize);
>  	}
>  
> -	if (vm_map_lock_upgrade(map)) {
> -		vm_map_unlock_read(map);
> +	if (vm_map_lock_upgrade(map))
>  		goto Retry;
> -	}
>  
>  	/* Get the preliminary new entry start value */
>  	addr = stack_entry->start - grow_amount;
> @@ -2777,10 +2782,8 @@
>  			 * -- one just moved from the map to the new
>  			 * object.
>  			 */
> -			if (vm_map_lock_upgrade(map)) {
> -				vm_map_unlock_read(map);
> +			if (vm_map_lock_upgrade(map))
>  				goto RetryLookup;
> -			}
>  			vm_object_shadow(
>  			    &entry->object.vm_object,
>  			    &entry->offset,
> @@ -2801,10 +2804,8 @@
>  	 */
>  	if (entry->object.vm_object == NULL &&
>  	    !map->system_map) {
> -		if (vm_map_lock_upgrade(map)) {
> -			vm_map_unlock_read(map);
> +		if (vm_map_lock_upgrade(map)) 
>  			goto RetryLookup;
> -		}
>  		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
>  		    atop(entry->end - entry->start));
>  		entry->offset = 0;
> @@ -3037,10 +3038,7 @@
>  			pmap_remove (map->pmap, uaddr, tend);
>  
>  			vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
> -			if (vm_map_lock_upgrade(map)) {
> -				vm_map_unlock_read(map);
> -				vm_map_lock(map);
> -			}
> +			vm_map_lock_upgrade(map);
>  
>  			if (entry == &map->header) {
>  				map->first_free = &map->header;
> 
> ==== //depot/releng/5_dp1/src/sys/vm/vm_map.h#2 (text+ko) ====
> 
> @@ -70,8 +70,7 @@
>  #ifndef	_VM_MAP_
>  #define	_VM_MAP_
>  
> -#include <sys/lock.h>
> -#include <sys/sx.h>
> +#include <sys/lockmgr.h>
>  
>  #ifdef MAP_LOCK_DIAGNOSTIC
>  #include <sys/systm.h>
> @@ -153,7 +152,7 @@
>   */
>  struct vm_map {
>  	struct vm_map_entry header;	/* List of entries */
> -	struct sx lock;			/* Lock for map data */
> +	struct lock lock;		/* Lock for map data */
>  	int nentries;			/* Number of entries */
>  	vm_size_t size;			/* virtual size */
>  	u_char system_map;		/* Am I a system map? */
> @@ -215,23 +214,14 @@
>  	} while (0)
>  #endif
>  
> -void _vm_map_lock(vm_map_t map, const char *file, int line);
> -int _vm_map_try_lock(vm_map_t map, const char *file, int line);
> -void _vm_map_unlock(vm_map_t map, const char *file, int line);
> -void _vm_map_lock_read(vm_map_t map, const char *file, int line);
> -void _vm_map_unlock_read(vm_map_t map, const char *file, int line);
> -int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
> -void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
> -
> -#define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE)
> -#define vm_map_try_lock(map) _vm_map_try_lock(map, LOCK_FILE, LOCK_LINE)
> -#define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE)
> -#define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE)
> -#define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE)
> -#define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE)
> -#define vm_map_lock_downgrade(map) _vm_map_lock_downgrade(map, LOCK_FILE, \
> -    LOCK_LINE)
> -
> +void vm_map_lock(vm_map_t map);
> +void vm_map_unlock(vm_map_t map);
> +void vm_map_lock_read(vm_map_t map);
> +void vm_map_unlock_read(vm_map_t map);
> +int vm_map_lock_upgrade(vm_map_t map);
> +void vm_map_lock_downgrade(vm_map_t map);
> +void vm_map_set_recursive(vm_map_t map);
> +void vm_map_clear_recursive(vm_map_t map);
>  vm_offset_t vm_map_min(vm_map_t map);
>  vm_offset_t vm_map_max(vm_map_t map);
>  struct pmap *vm_map_pmap(vm_map_t map);
> 
> ==== //depot/releng/5_dp1/src/sys/vm/vm_pageout.c#2 (text+ko) ====
> 
> @@ -547,8 +547,9 @@
>  	int nothingwired;
>  
>  	GIANT_REQUIRED;
> -	if (vm_map_try_lock(map))
> +	if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curthread)) {
>  		return;
> +	}
>  
>  	bigobj = NULL;
>  	nothingwired = TRUE;
> 
> ==== //depot/releng/5_dp1/src/sys/vm/vm_zone.c#2 (text+ko) ====
> 
> @@ -411,14 +411,14 @@
>  		 * map.
>  		 */
>  		mtx_unlock(&z->zmtx);
> -		item = (void *)kmem_alloc(kernel_map, nbytes);
> -		if (item != NULL) {
> -			atomic_add_int(&zone_kern_pages, z->zalloc);
> +		if (lockstatus(&kernel_map->lock, NULL)) {
> +			item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
> +			if (item != NULL)
> +				atomic_add_int(&zone_kmem_pages, z->zalloc);
>  		} else {
> -			item = (void *)kmem_malloc(kmem_map, nbytes,
> -			    M_WAITOK);
> +			item = (void *) kmem_alloc(kernel_map, nbytes);
>  			if (item != NULL)
> -				atomic_add_int(&zone_kmem_pages, z->zalloc);
> +				atomic_add_int(&zone_kern_pages, z->zalloc);
>  		}
>  		if (item != NULL) {
>  			bzero(item, nbytes);
> 


To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe p4-releng" in the body of the message




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?Pine.NEB.3.96L.1020323193252.47668O-100000>