Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 19 Jun 2002 16:00:19 -0700 (PDT)
From:      Julian Elischer <julian@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 13176 for review
Message-ID:  <200206192300.g5JN0Jh19981@freefall.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://people.freebsd.org/~peter/p4db/chv.cgi?CH=13176

Change 13176 by julian@julian_ref on 2002/06/19 15:59:38

	MFC

Affected files ...

... //depot/projects/kse/sys/cam/scsi/scsi_sa.c#6 integrate
... //depot/projects/kse/sys/kern/kern_linker.c#16 integrate
... //depot/projects/kse/sys/kern/kern_switch.c#52 edit
... //depot/projects/kse/sys/sys/un.h#7 integrate
... //depot/projects/kse/sys/vm/uma.h#6 integrate
... //depot/projects/kse/sys/vm/uma_core.c#10 integrate
... //depot/projects/kse/sys/vm/vm_kern.c#7 integrate

Differences ...

==== //depot/projects/kse/sys/cam/scsi/scsi_sa.c#6 (text+ko) ====

@@ -1,5 +1,5 @@
 /*
- * $FreeBSD: src/sys/cam/scsi/scsi_sa.c,v 1.81 2001/09/14 19:00:51 mjacob Exp $
+ * $FreeBSD: src/sys/cam/scsi/scsi_sa.c,v 1.82 2002/06/19 20:44:48 robert Exp $
  *
  * Implementation of SCSI Sequential Access Peripheral driver for CAM.
  *
@@ -2298,7 +2298,7 @@
 saerror(union ccb *ccb, u_int32_t cflgs, u_int32_t sflgs)
 {
 	static const char *toobig =
-	    "%d-byte tape record bigger than suplied buffer\n";
+	    "%d-byte tape record bigger than supplied buffer\n";
 	struct	cam_periph *periph;
 	struct	sa_softc *softc;
 	struct	ccb_scsiio *csio;

==== //depot/projects/kse/sys/kern/kern_linker.c#16 (text+ko) ====

@@ -23,7 +23,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: src/sys/kern/kern_linker.c,v 1.89 2002/04/26 09:52:54 brian Exp $
+ * $FreeBSD: src/sys/kern/kern_linker.c,v 1.90 2002/06/19 21:25:59 arr Exp $
  */
 
 #include "opt_ddb.h"
@@ -68,22 +68,27 @@
 
 linker_file_t linker_kernel_file;
 
-static struct lock lock;	/* lock for the file list */
+static struct mtx kld_mtx;	/* kernel linker mutex */
+
 static linker_class_list_t classes;
 static linker_file_list_t linker_files;
 static int next_file_id = 1;
+static int linker_no_more_classes = 0;
 
 #define	LINKER_GET_NEXT_FILE_ID(a) do {					\
 	linker_file_t lftmp;						\
 									\
 retry:									\
+	mtx_lock(&kld_mtx);						\
 	TAILQ_FOREACH(lftmp, &linker_files, link) {			\
 		if (next_file_id == lftmp->id) {			\
 			next_file_id++;					\
+			mtx_unlock(&kld_mtx);				\
 			goto retry;					\
 		}							\
 	}								\
 	(a) = next_file_id;						\
+	mtx_unlock(&kld_mtx);	/* Hold for safe read of id variable */	\
 } while(0)
 
 
@@ -115,17 +120,32 @@
 linker_init(void *arg)
 {
 
-	lockinit(&lock, PVM, "klink", 0, 0);
+	mtx_init(&kld_mtx, "kernel linker", NULL, MTX_DEF);
 	TAILQ_INIT(&classes);
 	TAILQ_INIT(&linker_files);
 }
 
 SYSINIT(linker, SI_SUB_KLD, SI_ORDER_FIRST, linker_init, 0)
 
+static void
+linker_stop_class_add(void *arg)
+{
+
+	linker_no_more_classes = 1;
+}
+
+SYSINIT(linker_class, SI_SUB_KLD, SI_ORDER_ANY, linker_stop_class_add, NULL)
+
 int
 linker_add_class(linker_class_t lc)
 {
 
+	/*
+	 * We disallow any class registration passt SI_ORDER_ANY
+	 * of SI_SUB_KLD.
+	 */
+	if (linker_no_more_classes == 1)
+		return (EPERM);
 	kobj_class_compile((kobj_class_t) lc);
 	TAILQ_INSERT_TAIL(&classes, lc, link);
 	return (0);
@@ -315,6 +335,12 @@
 	}
 	lf = NULL;
 	foundfile = 0;
+
+	/*
+	 * We do not need to protect (lock) classes here because there is
+	 * no class registration past startup (SI_SUB_KLD, SI_ORDER_ANY)
+	 * and there is no class deregistration mechanism at this time.
+	 */
 	TAILQ_FOREACH(lc, &classes, link) {
 		KLD_DPF(FILE, ("linker_load_file: trying to load %s\n",
 		    filename));
@@ -374,14 +400,14 @@
 		goto out;
 	sprintf(koname, "%s.ko", filename);
 
-	lockmgr(&lock, LK_SHARED, 0, curthread);
+	mtx_lock(&kld_mtx);
 	TAILQ_FOREACH(lf, &linker_files, link) {
 		if (strcmp(lf->filename, koname) == 0)
 			break;
 		if (strcmp(lf->filename, filename) == 0)
 			break;
 	}
-	lockmgr(&lock, LK_RELEASE, 0, curthread);
+	mtx_unlock(&kld_mtx);
 out:
 	if (koname)
 		free(koname, M_LINKER);
@@ -392,12 +418,12 @@
 linker_find_file_by_id(int fileid)
 {
 	linker_file_t lf = 0;
-
-	lockmgr(&lock, LK_SHARED, 0, curthread);
+	
+	mtx_lock(&kld_mtx);
 	TAILQ_FOREACH(lf, &linker_files, link)
 		if (lf->id == fileid)
 			break;
-	lockmgr(&lock, LK_RELEASE, 0, curthread);
+	mtx_unlock(&kld_mtx);
 	return (lf);
 }
 
@@ -411,7 +437,6 @@
 	filename = linker_basename(pathname);
 
 	KLD_DPF(FILE, ("linker_make_file: new file, filename=%s\n", filename));
-	lockmgr(&lock, LK_EXCLUSIVE, 0, curthread);
 	lf = (linker_file_t)kobj_create((kobj_class_t)lc, M_LINKER, M_WAITOK);
 	if (lf == NULL)
 		goto out;
@@ -424,9 +449,10 @@
 	lf->deps = NULL;
 	STAILQ_INIT(&lf->common);
 	TAILQ_INIT(&lf->modules);
+	mtx_lock(&kld_mtx);
 	TAILQ_INSERT_TAIL(&linker_files, lf, link);
+	mtx_unlock(&kld_mtx);
 out:
-	lockmgr(&lock, LK_RELEASE, 0, curthread);
 	return (lf);
 }
 
@@ -445,7 +471,6 @@
 		return (EPERM);
 
 	KLD_DPF(FILE, ("linker_file_unload: lf->refs=%d\n", file->refs));
-	lockmgr(&lock, LK_EXCLUSIVE, 0, curthread);
 	if (file->refs == 1) {
 		KLD_DPF(FILE, ("linker_file_unload: file is unloading,"
 		    " informing modules\n"));
@@ -464,7 +489,6 @@
 			if ((error = module_unload(mod)) != 0) {
 				KLD_DPF(FILE, ("linker_file_unload: module %x"
 				    " vetoes unload\n", mod));
-				lockmgr(&lock, LK_RELEASE, 0, curthread);
 				goto out;
 			} else
 				MOD_XLOCK;
@@ -474,7 +498,6 @@
 	}
 	file->refs--;
 	if (file->refs > 0) {
-		lockmgr(&lock, LK_RELEASE, 0, curthread);
 		goto out;
 	}
 	for (ml = TAILQ_FIRST(&found_modules); ml; ml = nextml) {
@@ -491,8 +514,9 @@
 		linker_file_sysuninit(file);
 		linker_file_unregister_sysctls(file);
 	}
+	mtx_lock(&kld_mtx);
 	TAILQ_REMOVE(&linker_files, file, link);
-	lockmgr(&lock, LK_RELEASE, 0, curthread);
+	mtx_unlock(&kld_mtx);
 
 	if (file->deps) {
 		for (i = 0; i < file->ndeps; i++)
@@ -828,10 +852,12 @@
 	mtx_lock(&Giant);
 
 	if (SCARG(uap, fileid) == 0) {
+		mtx_lock(&kld_mtx);
 		if (TAILQ_FIRST(&linker_files))
 			td->td_retval[0] = TAILQ_FIRST(&linker_files)->id;
 		else
 			td->td_retval[0] = 0;
+		mtx_unlock(&kld_mtx);
 		goto out;
 	}
 	lf = linker_find_file_by_id(SCARG(uap, fileid));
@@ -963,6 +989,7 @@
 		} else
 			error = ENOENT;
 	} else {
+		mtx_lock(&kld_mtx);
 		TAILQ_FOREACH(lf, &linker_files, link) {
 			if (LINKER_LOOKUP_SYMBOL(lf, symstr, &sym) == 0 &&
 			    LINKER_SYMBOL_VALUES(lf, sym, &symval) == 0) {
@@ -973,6 +1000,7 @@
 				break;
 			}
 		}
+		mtx_unlock(&kld_mtx);
 		if (lf == NULL)
 			error = ENOENT;
 	}
@@ -1767,12 +1795,16 @@
 	linker_file_t lf;
 	int error;
 
+	mtx_lock(&kld_mtx);
 	TAILQ_FOREACH(lf, &linker_files, link) {
 		error = LINKER_EACH_FUNCTION_NAME(lf,
 		    sysctl_kern_function_list_iterate, req);
-		if (error)
+		if (error) {
+			mtx_unlock(&kld_mtx);
 			return (error);
+		}
 	}
+	mtx_unlock(&kld_mtx);
 	return (SYSCTL_OUT(req, "", 1));
 }
 

==== //depot/projects/kse/sys/kern/kern_switch.c#52 (text+ko) ====

@@ -225,7 +225,7 @@
  * Remove a thread from its KSEGRP's run queue.
  * This in turn may remove it from a KSE if it was already assigned
  * to one, possibly causing a new thread to be assigned to the KSE
- * and the KSE getting a new priority (unless it's a BOUND threa/KSE pair).
+ * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
  */
 void
 remrunqueue(struct thread *td)
@@ -525,6 +525,7 @@
 
 	mtx_assert(&sched_lock, MA_OWNED);
 	KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
+	KASSERT((ke->ke_thread->td_kse != NULL), ("runq_add: No KSE on thread"));
 	if (ke->ke_state == KES_ONRUNQ)
 		return;
 #if defined(INVARIANTS) && defined(DIAGNOSTIC)
@@ -593,6 +594,10 @@
 		}
 
 		ke->ke_state = KES_RUNNING;
+		KASSERT((ke->ke_thread != NULL),
+		    ("runq_choose: No thread on KSE"));
+		KASSERT((ke->ke_thread->td_kse != NULL),
+		    ("runq_choose: No KSE on thread"));
 		return (ke);
 	}
 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);

==== //depot/projects/kse/sys/sys/un.h#7 (text+ko) ====

@@ -31,7 +31,7 @@
  * SUCH DAMAGE.
  *
  *	@(#)un.h	8.3 (Berkeley) 2/19/95
- * $FreeBSD: src/sys/sys/un.h,v 1.23 2002/04/20 02:26:43 mike Exp $
+ * $FreeBSD: src/sys/sys/un.h,v 1.24 2002/06/19 19:05:41 mike Exp $
  */
 
 #ifndef _SYS_UN_H_
@@ -49,7 +49,7 @@
  * Definitions for UNIX IPC domain.
  */
 struct sockaddr_un {
-	u_char	sun_len;		/* sockaddr len including null */
+	unsigned char	sun_len;	/* sockaddr len including null */
 	sa_family_t	sun_family;	/* AF_UNIX */
 	char	sun_path[104];		/* path name (gag) */
 };

==== //depot/projects/kse/sys/vm/uma.h#6 (text+ko) ====

@@ -23,7 +23,7 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
- * $FreeBSD: src/sys/vm/uma.h,v 1.8 2002/06/17 22:02:41 jeff Exp $
+ * $FreeBSD: src/sys/vm/uma.h,v 1.9 2002/06/19 20:49:44 jeff Exp $
  *
  */
 
@@ -402,7 +402,6 @@
  */
 #define UMA_SLAB_BOOT	0x01		/* Slab alloced from boot pages */
 #define UMA_SLAB_KMEM	0x02		/* Slab alloced from kmem_map */
-#define UMA_SLAB_KMAP	0x04		/* Slab alloced from kernel_map */
 #define UMA_SLAB_PRIV	0x08		/* Slab alloced from priv allocator */
 #define UMA_SLAB_OFFP	0x10		/* Slab is managed separately  */
 #define UMA_SLAB_MALLOC	0x20		/* Slab is a large malloc slab */

==== //depot/projects/kse/sys/vm/uma_core.c#10 (text+ko) ====

@@ -23,7 +23,7 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
- * $FreeBSD: src/sys/vm/uma_core.c,v 1.29 2002/06/17 23:53:58 jeff Exp $
+ * $FreeBSD: src/sys/vm/uma_core.c,v 1.30 2002/06/19 20:49:44 jeff Exp $
  *
  */
 
@@ -697,6 +697,18 @@
 		}
 	}
 
+	/*
+	 * This reproduces the old vm_zone behavior of zero filling pages the
+	 * first time they are added to a zone.
+	 *
+	 * Malloced items are zeroed in uma_zalloc.
+	 */
+
+	if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
+		wait |= M_ZERO;
+	else
+		wait &= ~M_ZERO;
+
 	if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) {
 		mtx_lock(&Giant);
 		mem = zone->uz_allocf(zone, 
@@ -794,18 +806,8 @@
 {
 	void *p;	/* Returned page */
 
-	/*
-	 * XXX The original zone allocator did this, but I don't think it's
-	 * necessary in current.
-	 */
-
-	if (lockstatus(&kernel_map->lock, NULL)) {
-		*pflag = UMA_SLAB_KMEM;
-		p = (void *) kmem_malloc(kmem_map, bytes, wait);
-	} else {
-		*pflag = UMA_SLAB_KMAP;
-		p = (void *) kmem_alloc(kernel_map, bytes);
-	}
+	*pflag = UMA_SLAB_KMEM;
+	p = (void *) kmem_malloc(kmem_map, bytes, wait);
   
 	return (p);
 }
@@ -874,10 +876,9 @@
 page_free(void *mem, int size, u_int8_t flags)
 {
 	vm_map_t map;
+
 	if (flags & UMA_SLAB_KMEM)
 		map = kmem_map;
-	else if (flags & UMA_SLAB_KMAP)
-		map = kernel_map;
 	else
 		panic("UMA: page_free used with invalid flags %d\n", flags);
 
@@ -1620,8 +1621,9 @@
 	ZONE_UNLOCK(zone);
 
 	/* Only construct at this time if we're not filling a bucket */
-	if (bucket == NULL && zone->uz_ctor != NULL)  {
-		zone->uz_ctor(item, zone->uz_size, udata);
+	if (bucket == NULL) {
+		if (zone->uz_ctor != NULL) 
+			zone->uz_ctor(item, zone->uz_size, udata);
 		if (flags & M_ZERO)
 			bzero(item, zone->uz_size);
 	}

==== //depot/projects/kse/sys/vm/vm_kern.c#7 (text+ko) ====

@@ -61,7 +61,7 @@
  * any improvements or extensions that they make and grant Carnegie the
  * rights to redistribute these changes.
  *
- * $FreeBSD: src/sys/vm/vm_kern.c,v 1.78 2002/06/14 18:21:00 alc Exp $
+ * $FreeBSD: src/sys/vm/vm_kern.c,v 1.79 2002/06/19 20:47:18 jeff Exp $
  */
 
 /*
@@ -347,6 +347,7 @@
 		VM_PROT_ALL, VM_PROT_ALL, 0);
 
 	for (i = 0; i < size; i += PAGE_SIZE) {
+		int pflags;
 		/*
 		 * Note: if M_NOWAIT specified alone, allocate from 
 		 * interrupt-safe queues only (just the free list).  If 
@@ -356,10 +357,15 @@
 		 * are not allowed to mess with the cache queue.
 		 */
 retry:
-		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
-		    ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) ?
-			VM_ALLOC_INTERRUPT : 
-			VM_ALLOC_SYSTEM);
+		if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
+			pflags = VM_ALLOC_INTERRUPT;
+		else
+			pflags = VM_ALLOC_SYSTEM;
+
+		if (flags & M_ZERO)
+			pflags |= VM_ALLOC_ZERO;
+
+		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
 
 		/*
 		 * Ran out of space, free everything up and return. Don't need

To Unsubscribe: send mail to majordomo@FreeBSD.org
with "unsubscribe p4-projects" in the body of the message




Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200206192300.g5JN0Jh19981>