Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 15 Aug 2004 22:38:51 -0400
From:      Brian Fundakowski Feldman <green@FreeBSD.org>
To:        John-Mark Gurney <gurney_j@resnet.uoregon.edu>
Cc:        cvs-all@FreeBSD.org
Subject:   Re: cvs commit: src/sys/cam/scsi scsi_target.c src/sys/dev/mii mii.c src/sys/fs/fifofs fifo_vnops.c src/sys/gnu/ext2fs ext2_vnops.c src/sys/kern init_main.c kern_conf.c kern_descrip.c kern_event.c kern_exec.c kern_exit.c kern_fork.c kern_sig.c sys_pipe.c tty.c ...
Message-ID:  <20040816023851.GC3026@green.homeunix.org>
In-Reply-To: <20040816015108.GQ991@funkthat.com>
References:  <200408150624.i7F6OhhR074096@repoman.freebsd.org> <20040816014244.GB3026@green.homeunix.org> <20040816015108.GQ991@funkthat.com>

next in thread | previous in thread | raw e-mail | index | archive | help
On Sun, Aug 15, 2004 at 06:51:08PM -0700, John-Mark Gurney wrote:
> Brian Fundakowski Feldman wrote this message on Sun, Aug 15, 2004 at 21:42 -0400:
> > On Sun, Aug 15, 2004 at 06:24:43AM +0000, John-Mark Gurney wrote:
> > >   Log:
> > >   Add locking to the kqueue subsystem.  This also makes the kqueue subsystem
> > >   a more complete subsystem, and removes the knowlege of how things are
> > >   implemented from the drivers.  Include locking around filter ops, so a
> > >   module like aio will know when not to be unloaded if there are outstanding
> > >   knotes using it's filter ops.
> > >   
> > >   Currently, it uses the MTX_DUPOK even though it is not always safe to
> > >   aquire duplicate locks.  Witness currently doesn't support the ability
> > >   to discover if a dup lock is ok (in some cases).
> > 
> > Yay, kqueues for 5.3-RELEASE that won't panic/lock up my system!!  Do you
> > think we should make this change now?
> 
> Yep, looks like we should...
> 
> > Also, would you mind if I gave it a quick once-over for the bigger style(9)
> > concerns?  No functional changes/code moving, just parentheses and such.
> 
> sure, I'd like a quick peak at the patch though (if it takes me more than
> a day, go ahead and commit).

I'd very much like you to look it over.  I haven't tested it, just made
the stylistic changes.  If it were more complete (i.e. satisfy bde),
every spurious blank line (that is, all of them inside functions which
are not between variable declarations and code) would be gone, too, but
that can be kind of harsh.

--- /usr/src/sys/kern/kern_event.c	Sun Aug 15 15:44:41 2004
+++ kern_event.c	Sun Aug 15 22:36:25 2004
@@ -73,13 +73,13 @@
 } while (0)
 #define KQ_GLOBAL_UNLOCK(lck, haslck)	do {	\
 	if (haslck)				\
-		mtx_unlock(lck);			\
+		mtx_unlock(lck);		\
 	haslck = 0;				\
 } while (0)
 
 TASKQUEUE_DEFINE_THREAD(kqueue);
 
-static int	kqueue_aquire(struct file *fp, struct kqueue **kqp);
+static int	kqueue_acquire(struct file *fp, struct kqueue **kqp);
 static void	kqueue_release(struct kqueue *kq, int locked);
 static int	kqueue_expand(struct kqueue *kq, struct filterops *fops,
 		    uintptr_t ident, int waitok);
@@ -132,7 +132,7 @@
 	{ 1, filt_fileattach, NULL, NULL };
 static struct filterops kqread_filtops =
 	{ 1, NULL, filt_kqdetach, filt_kqueue };
-/* XXX - move to kern_proc.c?  */
+/* XXX - move to kern_proc.c? */
 static struct filterops proc_filtops =
 	{ 0, filt_procattach, filt_procdetach, filt_proc };
 static struct filterops timer_filtops =
@@ -144,14 +144,14 @@
 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
 
-/* XXX - ensure not KN_INFLUX?? */
+/* XXX - ensure not KN_INFLUX? */
 #define KNOTE_ACTIVATE(kn, islock) do { 				\
 	if ((islock))							\
 		mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);		\
 	else								\
 		KQ_LOCK((kn)->kn_kq);					\
 	(kn)->kn_status |= KN_ACTIVE;					\
-	if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
+	if (!((kn)->kn_status & (KN_QUEUED | KN_DISABLED)))		\
 		knote_enqueue((kn));					\
 	if (!(islock))							\
 		KQ_UNLOCK((kn)->kn_kq);					\
@@ -160,7 +160,7 @@
 	mtx_lock(&(kq)->kq_lock);					\
 } while (0)
 #define KQ_FLUX_WAKEUP(kq) do {						\
-	if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {		\
+	if ((kq)->kq_state & KQ_FLUXWAIT) {				\
 		(kq)->kq_state &= ~KQ_FLUXWAIT;				\
 		wakeup((kq));						\
 	}								\
@@ -193,7 +193,6 @@
 static int
 filt_nullattach(struct knote *kn)
 {
-
 	return (ENXIO);
 };
 
@@ -209,7 +208,7 @@
  */
 static struct mtx	filterops_lock;
 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
-	MTX_DEF);
+    MTX_DEF);
 static struct {
 	struct filterops *for_fop;
 	int for_refcnt;
@@ -232,7 +231,6 @@
 static int
 filt_fileattach(struct knote *kn)
 {
-
 	return (fo_kqfilter(kn->kn_fp, kn));
 }
 
@@ -270,7 +268,7 @@
 	return (kn->kn_data > 0);
 }
 
-/* XXX - move to kern_proc.c?  */
+/* XXX - move to kern_proc.c? */
 static int
 filt_procattach(struct knote *kn)
 {
@@ -278,25 +276,28 @@
 	int immediate;
 	int error;
 
-	immediate = 0;
 	p = pfind(kn->kn_id);
-	if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
+	if (p == NULL && kn->kn_sfflags & NOTE_EXIT) {
 		p = zpfind(kn->kn_id);
 		immediate = 1;
-	} else if (p != NULL && (p->p_flag & P_WEXIT)) {
+	} else if (p != NULL && p->p_flag & P_WEXIT) {
 		immediate = 1;
+	} else {
+		immediate = 0;
 	}
 
 	if (p == NULL)
 		return (ESRCH);
-	if ((error = p_cansee(curthread, p)))
+		
+	error = p_cansee(curthread, p);
+	if (error)
 		return (error);
 
 	kn->kn_ptr.p_proc = p;
 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
 
 	/*
-	 * internal flag indicating registration done by kernel
+	 * Internal flag indicating registration done by kernel
 	 */
 	if (kn->kn_flags & EV_FLAG1) {
 		kn->kn_data = kn->kn_sdata;		/* ppid */
@@ -304,7 +305,7 @@
 		kn->kn_flags &= ~EV_FLAG1;
 	}
 
-	if (immediate == 0)
+	if (!immediate)
 		knlist_add(&p->p_klist, kn, 1);
 
 	/*
@@ -328,7 +329,7 @@
  * this routine is called, so a check is needed to avoid actually performing
  * a detach, because the original process does not exist any more.
  */
-/* XXX - move to kern_proc.c?  */
+/* XXX - move to kern_proc.c? */
 static void
 filt_procdetach(struct knote *kn)
 {
@@ -342,7 +343,7 @@
 	kn->kn_ptr.p_proc = NULL;
 }
 
-/* XXX - move to kern_proc.c?  */
+/* XXX - move to kern_proc.c? */
 static int
 filt_proc(struct knote *kn, long hint)
 {
@@ -350,18 +351,18 @@
 	u_int event;
 
 	/*
-	 * mask off extra data
+	 * Mask off extra data.
 	 */
 	event = (u_int)hint & NOTE_PCTRLMASK;
 
 	/*
-	 * if the user is interested in this event, record it.
+	 * If the user is interested in this event, record it.
 	 */
 	if (kn->kn_sfflags & event)
 		kn->kn_fflags |= event;
 
 	/*
-	 * process is gone, so flag the event as finished.
+	 * Process is gone, so flag the event as finished.
 	 */
 	if (event == NOTE_EXIT) {
 		if (!(kn->kn_status & KN_DETACHED))
@@ -376,7 +377,7 @@
 	 * so attach a new knote to it, and immediately report an
 	 * event with the parent's pid.
 	 */
-	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
+	if (event == NOTE_FORK && kn->kn_sfflags & NOTE_TRACK) {
 		struct kevent kev;
 		int error;
 
@@ -407,7 +408,7 @@
 	tv.tv_usec = (data % 1000) * 1000;
 	tticks = tvtohz(&tv);
 
-	return tticks;
+	return (tticks);
 }
 
 /* XXX - move to kern_timeout.c? */
@@ -420,7 +421,7 @@
 	kn->kn_data++;
 	KNOTE_ACTIVATE(kn, 0);	/* XXX - handle locking */
 
-	if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
+	if (!(kn->kn_flags & EV_ONESHOT)) {
 		calloutp = (struct callout *)kn->kn_hook;
 		callout_reset(calloutp, timertoticks(kn->kn_sdata),
 		    filt_timerexpire, kn);
@@ -428,7 +429,7 @@
 }
 
 /*
- * data contains amount of time to sleep, in milliseconds
+ * Data contains amount of time to sleep, in milliseconds
  */
 /* XXX - move to kern_timeout.c? */
 static int
@@ -470,7 +471,6 @@
 static int
 filt_timer(struct knote *kn, long hint)
 {
-
 	return (kn->kn_data != 0);
 }
 
@@ -491,7 +491,7 @@
 		goto done2;
 
 	/* An extra reference on `nfp' has been held for us by falloc(). */
-	kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
+	kq = malloc(sizeof(*kq), M_KQUEUE, M_WAITOK | M_ZERO);
 	mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
 	TAILQ_INIT(&kq->kq_head);
 	kq->kq_fdp = fdp;
@@ -540,7 +540,7 @@
 
 	if ((error = fget(td, uap->fd, &fp)) != 0)
 		return (error);
-	if ((error = kqueue_aquire(fp, &kq)) != 0)
+	if ((error = kqueue_acquire(fp, &kq)) != 0)
 		goto done_norel;
 
 	if (uap->timeout != NULL) {
@@ -551,11 +551,9 @@
 	}
 
 	nerrors = 0;
-
 	while (uap->nchanges > 0) {
 		n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges;
-		error = copyin(uap->changelist, keva,
-		    n * sizeof *keva);
+		error = copyin(uap->changelist, keva, n * sizeof(*keva));
 		if (error)
 			goto done;
 		for (i = 0; i < n; i++) {
@@ -566,8 +564,7 @@
 				if (uap->nevents != 0) {
 					kevp->flags = EV_ERROR;
 					kevp->data = error;
-					(void) copyout(kevp,
-					    uap->eventlist,
+					(void)copyout(kevp, uap->eventlist,
 					    sizeof(*kevp));
 					uap->eventlist++;
 					uap->nevents--;
@@ -580,14 +577,13 @@
 		uap->nchanges -= n;
 		uap->changelist += n;
 	}
-	if (nerrors) {
+	if (nerrors != 0) {
 		td->td_retval[0] = nerrors;
 		error = 0;
-		goto done;
-	}
-
-	error = kqueue_scan(kq, uap->nevents, uap->eventlist, uap->timeout,
-	    keva, td);
+	} else {
+		error = kqueue_scan(kq, uap->nevents, uap->eventlist,
+		    uap->timeout, keva, td);
+	}	
 done:
 	kqueue_release(kq, 0);
 done_norel:
@@ -602,10 +598,9 @@
 	int error;
 
 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
-		printf(
-"trying to add a filterop that is out of range: %d is beyond %d\n",
-		    ~filt, EVFILT_SYSCOUNT);
-		return EINVAL;
+		printf("trying to add a filterop that is out of range: "
+		    "%d is beyond %d\n", ~filt, EVFILT_SYSCOUNT);
+		return (EINVAL);
 	}
 	mtx_lock(&filterops_lock);
 	if (sysfilt_ops[~filt].for_fop != &null_filtops &&
@@ -614,10 +609,11 @@
 	else {
 		sysfilt_ops[~filt].for_fop = filtops;
 		sysfilt_ops[~filt].for_refcnt = 0;
+		error = 0;
 	}
 	mtx_unlock(&filterops_lock);
 
-	return (0);
+	return (error);
 }
 
 int
@@ -625,9 +621,8 @@
 {
 	int error;
 
-	error = 0;
 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
-		return EINVAL;
+		return (EINVAL);
 
 	mtx_lock(&filterops_lock);
 	if (sysfilt_ops[~filt].for_fop == &null_filtops ||
@@ -638,18 +633,18 @@
 	else {
 		sysfilt_ops[~filt].for_fop = &null_filtops;
 		sysfilt_ops[~filt].for_refcnt = 0;
+		error = 0;
 	}
 	mtx_unlock(&filterops_lock);
 
-	return error;
+	return (error);
 }
 
 static struct filterops *
 kqueue_fo_find(int filt)
 {
-
 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
-		return NULL;
+		return (NULL);
 
 	mtx_lock(&filterops_lock);
 	sysfilt_ops[~filt].for_refcnt++;
@@ -657,13 +652,12 @@
 		sysfilt_ops[~filt].for_fop = &null_filtops;
 	mtx_unlock(&filterops_lock);
 
-	return sysfilt_ops[~filt].for_fop;
+	return (sysfilt_ops[~filt].for_fop);
 }
 
 static void
 kqueue_fo_release(int filt)
 {
-
 	if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
 		return;
 
@@ -675,12 +669,13 @@
 }
 
 /*
- * A ref to kq (obtained via kqueue_aquire) should be held.  waitok will
+ * A ref to kq (obtained via kqueue_acquire) should be held.  waitok will
  * influence if memory allocation should wait.  Make sure it is 0 if you
  * hold any mutexes.
  */
 int
-kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
+kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td,
+    int waitok)
 {
 	struct filedesc *fdp;
 	struct filterops *fops;
@@ -699,16 +694,15 @@
 	filt = kev->filter;
 	fops = kqueue_fo_find(filt);
 	if (fops == NULL)
-		return EINVAL;
-
-	tkn = knote_alloc(waitok);		/* prevent waiting with locks */
+		return (EINVAL);
 
+	tkn = knote_alloc(waitok);	/* Prevent waiting with locks. */
 findkn:
 	if (fops->f_isfd) {
 		KASSERT(td != NULL, ("td is NULL"));
 		fdp = td->td_proc->p_fd;
 		FILEDESC_LOCK(fdp);
-		/* validate descriptor */
+		/* Validate descriptor. */
 		fd = kev->ident;
 		if (fd < 0 || fd >= fdp->fd_nfiles ||
 		    (fp = fdp->fd_ofiles[fd]) == NULL) {
@@ -718,21 +712,25 @@
 		}
 		fhold(fp);
 
-		if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
-		    kev->ident, 0) != 0) {
-			/* unlock and try again */
-			FILEDESC_UNLOCK(fdp);
-			fdrop(fp, td);
-			fp = NULL;
-			error = kqueue_expand(kq, fops, kev->ident, waitok);
-			if (error)
-				goto done;
-			goto findkn;
+		if (kev->flags & EV_ADD) {
+			error = kqueue_expand(kq, fops, kev->ident, 0);
+			if (error) {
+				/* Unlock and try again */
+				FILEDESC_UNLOCK(fdp);
+				fdrop(fp, td);
+				fp = NULL;
+				if (!waitok)
+					goto done;
+				error = kqueue_expand(kq, fops, kev->ident, 1);
+				if (error)
+					goto done;
+				goto findkn;
+			}	
 		}
 
 		if (fp->f_type == DTYPE_KQUEUE) {
 			/*
-			 * if we add some inteligence about what we are doing,
+			 * If we add some inteligence about what we are doing,
 			 * we should be able to support events on ourselves.
 			 * We need to know when we are doing this to prevent
 			 * getting both the knlist lock and the kq lock since
@@ -755,15 +753,15 @@
 		}
 		FILEDESC_UNLOCK(fdp);
 	} else {
-		if ((kev->flags & EV_ADD) == EV_ADD)
-			kqueue_expand(kq, fops, kev->ident, waitok);
+		if (kev->flags & EV_ADD)
+			(void)kqueue_expand(kq, fops, kev->ident, waitok);
 
 		KQ_LOCK(kq);
 		if (kq->kq_knhashmask != 0) {
 			struct klist *list;
 
-			list = &kq->kq_knhash[
-			    KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
+			list = &kq->kq_knhash[KN_HASH((u_long)kev->ident,
+			    kq->kq_knhashmask)];
 			SLIST_FOREACH(kn, list, kn_link)
 				if (kev->ident == kn->kn_id &&
 				    kev->filter == kn->kn_filter)
@@ -771,8 +769,8 @@
 		}
 	}
 
-	/* knote is in the process of changing, wait for it to stablize. */
-	if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
+	/* Knote is in the process of changing, wait for it to stablize. */
+	if (kn != NULL && kn->kn_status & KN_INFLUX) {
 		if (fp != NULL) {
 			fdrop(fp, td);
 			fp = NULL;
@@ -783,14 +781,14 @@
 		goto findkn;
 	}
 
-	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
+	if (kn == NULL && !(kev->flags & EV_ADD)) {
 		KQ_UNLOCK(kq);
 		error = ENOENT;
 		goto done;
 	}
 
 	/*
-	 * kn now contains the matching knote, or NULL if no match
+	 * kn now contains the matching knote, or NULL if no match.
 	 */
 	if (kev->flags & EV_ADD) {
 		if (kn == NULL) {
@@ -804,8 +802,8 @@
 			kn->kn_kq = kq;
 			kn->kn_fop = fops;
 			/*
-			 * apply reference counts to knote structure, and
-			 * do not release it at the end of this routine.
+			 * Apply reference counts to knote structure, and
+			 * do not release them at the end of this routine.
 			 */
 			fops = NULL;
 			fp = NULL;
@@ -824,7 +822,8 @@
 				goto done;
 			}
 
-			if ((error = kn->kn_fop->f_attach(kn)) != 0) {
+			error = kn->kn_fop->f_attach(kn);
+			if (error) {
 				knote_drop(kn, td);
 				goto done;
 			}
@@ -864,15 +863,14 @@
 		goto done;
 	}
 
-	if ((kev->flags & EV_DISABLE) &&
-	    ((kn->kn_status & KN_DISABLED) == 0)) {
+	if (kev->flags & EV_DISABLE &&
+	    !(kn->kn_status & KN_DISABLED))
 		kn->kn_status |= KN_DISABLED;
-	}
 
-	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
+	if (kev->flags & EV_ENABLE && kn->kn_status & KN_DISABLED) {
 		kn->kn_status &= ~KN_DISABLED;
-		if ((kn->kn_status & KN_ACTIVE) &&
-		    ((kn->kn_status & KN_QUEUED) == 0))
+		if (kn->kn_status & KN_ACTIVE &&
+		    !(kn->kn_status & KN_QUEUED))
 			knote_enqueue(kn);
 	}
 	KQ_UNLOCK_FLUX(kq);
@@ -890,12 +888,10 @@
 }
 
 static int
-kqueue_aquire(struct file *fp, struct kqueue **kqp)
+kqueue_acquire(struct file *fp, struct kqueue **kqp)
 {
-	int error;
 	struct kqueue *kq;
-
-	error = 0;
+	int error;
 
 	FILE_LOCK(fp);
 	do {
@@ -906,17 +902,18 @@
 		}
 		*kqp = kq;
 		KQ_LOCK(kq);
-		if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
+		if (kq->kq_state & KQ_CLOSING) {
 			KQ_UNLOCK(kq);
 			error = EBADF;
 			break;
 		}
 		kq->kq_refcnt++;
 		KQ_UNLOCK(kq);
+		error = 0;
 	} while (0);
 	FILE_UNLOCK(fp);
 
-	return error;
+	return (error);
 }
 
 static void
@@ -936,12 +933,11 @@
 static void
 kqueue_schedtask(struct kqueue *kq)
 {
-
 	KQ_OWNED(kq);
-	KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
+	KASSERT(!(kq->kq_state & KQ_TASKDRAIN),
 	    ("scheduling kqueue task while draining"));
 
-	if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
+	if (!(kq->kq_state & KQ_TASKSCHED)) {
 		taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
 		kq->kq_state |= KQ_TASKSCHED;
 	}
@@ -958,13 +954,13 @@
  */
 static int
 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
-	int waitok)
+    int waitok)
 {
 	struct klist *list, *tmp_knhash;
 	u_long tmp_knhashmask;
+	int mflag = waitok ? M_WAITOK : M_NOWAIT;
 	int size;
 	int fd;
-	int mflag = waitok ? M_WAITOK : M_NOWAIT;
 
 	KQ_NOTOWNED(kq);
 
@@ -975,9 +971,9 @@
 			while (size <= fd)
 				size += KQEXTENT;
 			MALLOC(list, struct klist *,
-			    size * sizeof list, M_KQUEUE, mflag);
+			    size * sizeof(list), M_KQUEUE, mflag);
 			if (list == NULL)
-				return ENOMEM;
+				return (ENOMEM);
 			KQ_LOCK(kq);
 			if (kq->kq_knlistsize > fd) {
 				FREE(list, M_KQUEUE);
@@ -985,13 +981,13 @@
 			} else {
 				if (kq->kq_knlist != NULL) {
 					bcopy(kq->kq_knlist, list,
-					    kq->kq_knlistsize * sizeof list);
+					    kq->kq_knlistsize * sizeof(list));
 					FREE(kq->kq_knlist, M_KQUEUE);
 					kq->kq_knlist = NULL;
 				}
 				bzero((caddr_t)list +
-				    kq->kq_knlistsize * sizeof list,
-				    (size - kq->kq_knlistsize) * sizeof list);
+				    kq->kq_knlistsize * sizeof(list),
+				    (size - kq->kq_knlistsize) * sizeof(list));
 				kq->kq_knlistsize = size;
 				kq->kq_knlist = list;
 			}
@@ -1002,7 +998,7 @@
 			tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
 			    &tmp_knhashmask);
 			if (tmp_knhash == NULL)
-				return ENOMEM;
+				return (ENOMEM);
 			KQ_LOCK(kq);
 			if (kq->kq_knhashmask == 0) {
 				kq->kq_knhash = tmp_knhash;
@@ -1015,16 +1011,15 @@
 	}
 
 	KQ_NOTOWNED(kq);
-	return 0;
+	return (0);
 }
 
 static void
 kqueue_task(void *arg, int pending)
 {
 	struct kqueue *kq;
-	int haskqglobal;
+	int haskqglobal = 0;
 
-	haskqglobal = 0;
 	kq = arg;
 
 	KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
@@ -1033,9 +1028,8 @@
 	KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
 
 	kq->kq_state &= ~KQ_TASKSCHED;
-	if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
+	if (kq->kq_state & KQ_TASKDRAIN)
 		wakeup(&kq->kq_state);
-	}
 	KQ_UNLOCK(kq);
 	KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 }
@@ -1046,11 +1040,11 @@
  */
 static int
 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent *ulistp,
-	const struct timespec *tsp, struct kevent *keva, struct thread *td)
+    const struct timespec *tsp, struct kevent *keva, struct thread *td)
 {
-	struct kevent *kevp;
 	struct timeval atv, rtv, ttv;
 	struct knote *kn, marker;
+	struct kevent *kevp;
 	int count, timeout, nkev, error;
 	int haskqglobal;
 
@@ -1092,7 +1086,7 @@
 		ttv = atv;
 		timevalsub(&ttv, &rtv);
 		timeout = ttv.tv_sec > 24 * 60 * 60 ?
-			24 * 60 * 60 * hz : tvtohz(&ttv);
+		    24 * 60 * 60 * hz : tvtohz(&ttv);
 	}
 
 start:
@@ -1121,7 +1115,7 @@
 		kn = TAILQ_FIRST(&kq->kq_head);
 
 		if ((kn->kn_status == KN_MARKER && kn != &marker) ||
-		    (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
+		    kn->kn_status & KN_INFLUX) {
 			kq->kq_state |= KQ_FLUXWAIT;
 			error = msleep(kq, &kq->kq_lock, PSOCK,
 			    "kqflxwt", 0);
@@ -1129,7 +1123,7 @@
 		}
 
 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
-		if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
+		if (kn->kn_status & KN_DISABLED) {
 			kn->kn_status &= ~KN_QUEUED;
 			kq->kq_count--;
 			continue;
@@ -1140,17 +1134,17 @@
 				goto retry;
 			goto done;
 		}
-		KASSERT((kn->kn_status & KN_INFLUX) == 0,
+		KASSERT(!(kn->kn_status & KN_INFLUX),
 		    ("KN_INFLUX set when not suppose to be"));
 
-		if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
+		if (kn->kn_flags & EV_ONESHOT) {
 			kn->kn_status &= ~KN_QUEUED;
 			kn->kn_status |= KN_INFLUX;
 			kq->kq_count--;
 			KQ_UNLOCK(kq);
 			/*
 			 * We don't need to lock the list since we've marked
-			 * it _INFLUX.
+			 * it as in-flux.
 			 */
 			*kevp = kn->kn_kevent;
 			kn->kn_fop->f_detach(kn);
@@ -1160,7 +1154,7 @@
 		} else {
 			kn->kn_status |= KN_INFLUX;
 			KQ_UNLOCK(kq);
-			if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
+			if (kn->kn_status & KN_KQUEUE)
 				KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 			KN_LIST_LOCK(kn);
 			if (kn->kn_fop->f_event(kn, 0) == 0) {
@@ -1191,7 +1185,7 @@
 
 		if (nkev == KQ_NEVENTS) {
 			KQ_UNLOCK_FLUX(kq);
-			error = copyout(keva, ulistp, sizeof *keva * nkev);
+			error = copyout(keva, ulistp, sizeof(*keva) * nkev);
 			ulistp += nkev;
 			nkev = 0;
 			kevp = keva;
@@ -1208,7 +1202,7 @@
 done_nl:
 	KQ_NOTOWNED(kq);
 	if (nkev != 0)
-		error = copyout(keva, ulistp, sizeof *keva * nkev);
+		error = copyout(keva, ulistp, sizeof(*keva) * nkev);
 	td->td_retval[0] = maxevents - count;
 	return (error);
 }
@@ -1236,7 +1230,7 @@
 /*ARGSUSED*/
 static int
 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
-	struct ucred *active_cred, struct thread *td)
+    struct ucred *active_cred, struct thread *td)
 {
 	/*
 	 * Enabling sigio causes two major problems:
@@ -1284,14 +1278,15 @@
 /*ARGSUSED*/
 static int
 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
-	struct thread *td)
+    struct thread *td)
 {
 	struct kqueue *kq;
 	int revents = 0;
 	int error;
 
-	if ((error = kqueue_aquire(fp, &kq)))
-		return POLLERR;
+	error = kqueue_acquire(fp, &kq);
+	fi (error)
+		return (POLLERR);
 
 	KQ_LOCK(kq);
 	if (events & (POLLIN | POLLRDNORM)) {
@@ -1310,9 +1305,8 @@
 /*ARGSUSED*/
 static int
 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
-	struct thread *td)
+    struct thread *td)
 {
-
 	return (ENXIO);
 }
 
@@ -1328,13 +1322,13 @@
 
 	GIANT_REQUIRED;
 
-	if ((error = kqueue_aquire(fp, &kq)))
-		return error;
+	error = kqueue_acquire(fp, &kq);
+	if (error)
+		return (error);
 
 	KQ_LOCK(kq);
 
-	KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
-	    ("kqueue already closing"));
+	KASSERT(!(kq->kq_state & KQ_CLOSING), ("kqueue already closing"));
 	kq->kq_state |= KQ_CLOSING;
 	if (kq->kq_refcnt > 1)
 		msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
@@ -1346,7 +1340,8 @@
 	    ("kqueue's knlist not empty"));
 
 	for (i = 0; i < kq->kq_knlistsize; i++) {
-		while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
+		while (!SLIST_EMPTY(&kq->kq_knlist[i])) {
+			kn = SLIST_FIRST(&kq->kq_knlist[i]);
 			KASSERT((kn->kn_status & KN_INFLUX) == 0,
 			    ("KN_INFLUX set when not suppose to be"));
 			kn->kn_status |= KN_INFLUX;
@@ -1358,7 +1353,8 @@
 	}
 	if (kq->kq_knhashmask != 0) {
 		for (i = 0; i <= kq->kq_knhashmask; i++) {
-			while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
+			while (!SLIST_EMPTY(&kq->kq_knhash[i])) {
+				kn = SLIST_FIRST(&kq->kq_knhash[i]);
 				KASSERT((kn->kn_status & KN_INFLUX) == 0,
 				    ("KN_INFLUX set when not suppose to be"));
 				kn->kn_status |= KN_INFLUX;
@@ -1370,12 +1366,12 @@
 		}
 	}
 
-	if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
+	if (kq->kq_state & KQ_TASKSCHED) {
 		kq->kq_state |= KQ_TASKDRAIN;
 		msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
 	}
 
-	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
+	if (kq->kq_state & KQ_SEL) {
 		kq->kq_state &= ~KQ_SEL;
 		selwakeuppri(&kq->kq_sel, PSOCK);
 	}
@@ -1407,19 +1403,18 @@
 {
 	KQ_OWNED(kq);
 
-	if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
+	if (kq->kq_state & KQ_SLEEP) {
 		kq->kq_state &= ~KQ_SLEEP;
 		wakeup(kq);
 	}
-	if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
+	if (kq->kq_state & KQ_SEL) {
 		kq->kq_state &= ~KQ_SEL;
 		selwakeuppri(&kq->kq_sel, PSOCK);
 	}
 	if (!knlist_empty(&kq->kq_sel.si_note))
 		kqueue_schedtask(kq);
-	if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
+	if (kq->kq_state & KQ_ASYNC)
 		pgsigio(&kq->kq_sigio, SIGIO, 0);
-	}
 }
 
 /*
@@ -1452,9 +1447,9 @@
 	 */
 	SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
 		kq = kn->kn_kq;
-		if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
+		if (!(kn->kn_status & KN_INFLUX)) {
 			KQ_LOCK(kq);
-			if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
+			if (!(kn->kn_status & KN_INFLUX)) {
 				kn->kn_status |= KN_HASKQLOCK;
 				if (kn->kn_fop->f_event(kn, hint))
 					KNOTE_ACTIVATE(kn, 1);
@@ -1476,8 +1471,8 @@
 {
 	mtx_assert(knl->kl_lock, islocked ? MA_OWNED : MA_NOTOWNED);
 	KQ_NOTOWNED(kn->kn_kq);
-	KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
-	    (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
+	KASSERT((kn->kn_status & (KN_INFLUX | KN_DETACHED)) ==
+	    (KN_INFLUX | KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
 	if (!islocked)
 		mtx_lock(knl->kl_lock);
 	SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
@@ -1490,14 +1485,16 @@
 }
 
 static void
-knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
+knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
+    int kqislocked)
 {
-	KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
+	KASSERT(!(kqislocked && !knlislocked), ("kq locked w/o knl locked"));
 	mtx_assert(knl->kl_lock, knlislocked ? MA_OWNED : MA_NOTOWNED);
 	mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
 	if (!kqislocked)
-		KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
-    ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
+		KASSERT((kn->kn_status & (KN_INFLUX | KN_DETACHED)) ==
+		    KN_INFLUX, ("knlist_remove called w/o knote being "
+		    "KN_INFLUX or already removed"));
 	if (!knlislocked)
 		mtx_lock(knl->kl_lock);
 	SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
@@ -1517,7 +1514,6 @@
 void
 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
 {
-
 	knlist_remove_kq(knl, kn, islocked, 0);
 }
 
@@ -1527,27 +1523,23 @@
 void
 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
 {
-
-	knlist_remove_kq(knl, kn, 1,
-	    (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
+	knlist_remove_kq(knl, kn, 1, (kn->kn_status & KN_HASKQLOCK) != 0);
 }
 
 int
 knlist_empty(struct knlist *knl)
 {
-
 	mtx_assert(knl->kl_lock, MA_OWNED);
-	return SLIST_EMPTY(&knl->kl_list);
+	return (SLIST_EMPTY(&knl->kl_list));
 }
 
-static struct mtx	knlist_lock;
+static struct mtx knlist_lock;
 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
-	MTX_DEF);
+    MTX_DEF);
 
 void
 knlist_init(struct knlist *knl, struct mtx *mtx)
 {
-
 	if (mtx == NULL)
 		knl->kl_lock = &knlist_lock;
 	else
@@ -1583,19 +1575,19 @@
 	struct knote *kn;
 	struct kqueue *kq;
 
-	if (islocked)
+	if (islocked) {
 		mtx_assert(knl->kl_lock, MA_OWNED);
-	else {
+	} else {
 		mtx_assert(knl->kl_lock, MA_NOTOWNED);
-again:		/* need to reaquire lock since we have dropped it */
+again:		/* need to reacquire lock since we have dropped it */
 		mtx_lock(knl->kl_lock);
 	}
 
 	SLIST_FOREACH(kn, &knl->kl_list, kn_selnext) {
 		kq = kn->kn_kq;
 		KQ_LOCK(kq);
-		if ((kn->kn_status & KN_INFLUX) &&
-		    (kn->kn_status & KN_DETACHED) != KN_DETACHED) {
+		if (kn->kn_status & KN_INFLUX &&
+		    !(kn->kn_status & KN_DETACHED)) {
 			KQ_UNLOCK(kq);
 			continue;
 		}
@@ -1622,9 +1614,9 @@
 
 	SLIST_INIT(&knl->kl_list);
 
-	if (islocked)
+	if (islocked) {
 		mtx_assert(knl->kl_lock, MA_OWNED);
-	else {
+	} else {
 		mtx_unlock(knl->kl_lock);
 		mtx_assert(knl->kl_lock, MA_NOTOWNED);
 	}
@@ -1655,7 +1647,8 @@
 again:
 		influx = 0;
 		while (kq->kq_knlistsize > fd &&
-		    (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
+		    !SLIST_EMPTY(&kq->kq_knlist[fd])) {
+			kn = SLIST_FIRST(&kq->kq_knlist[fd]);
 			if (kn->kn_status & KN_INFLUX) {
 				/* someone else might be waiting on our knote */
 				if (influx)
@@ -1685,17 +1678,17 @@
 
 	if (kn->kn_fop->f_isfd) {
 		if (kn->kn_id >= kq->kq_knlistsize)
-			return ENOMEM;
+			return (ENOMEM);
 		list = &kq->kq_knlist[kn->kn_id];
 	} else {
 		if (kq->kq_knhash == NULL)
-			return ENOMEM;
+			return (ENOMEM);
 		list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 	}
 
 	SLIST_INSERT_HEAD(list, kn, kn_link);
 
-	return 0;
+	return (0);
 }
 
 /*
@@ -1712,7 +1705,7 @@
 	kq = kn->kn_kq;
 
 	KQ_NOTOWNED(kq);
-	KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
+	KASSERT(kn->kn_status & KN_INFLUX,
 	    ("knote_drop called without KN_INFLUX set in kn_status"));
 
 	KQ_LOCK(kq);
@@ -1741,7 +1734,7 @@
 	struct kqueue *kq = kn->kn_kq;
 
 	KQ_OWNED(kn->kn_kq);
-	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
+	KASSERT(!(kn->kn_status & KN_QUEUED), ("knote already queued"));
 
 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 	kn->kn_status |= KN_QUEUED;
@@ -1765,7 +1758,6 @@
 static void
 knote_init(void)
 {
-
 	knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
 	    NULL, NULL, UMA_ALIGN_PTR, 0);
 }

-- 
Brian Fundakowski Feldman                           \'[ FreeBSD ]''''''''''\
  <> green@FreeBSD.org                               \  The Power to Serve! \
 Opinions expressed are my own.                       \,,,,,,,,,,,,,,,,,,,,,,\



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20040816023851.GC3026>