Skip site navigation (1)Skip section navigation (2)
Date:      Sun, 12 Dec 2004 14:50:35 GMT
From:      David Xu <davidxu@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 66889 for review
Message-ID:  <200412121450.iBCEoZAA050020@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=66889

Change 66889 by davidxu@davidxu_tiger on 2004/12/12 14:49:47

	Make umtx to be sharable between processes, based on this
	change, it is possible to implement PTHREAD_PROCESS_SHARED
	pthread_mutex and pthread_cond etcs in thread library, posix
	requires PTHREAD_PROCESS_SHARED sync objects to be in shared
	memory page if user wants it to be shared betwee processes.

Affected files ...

.. //depot/projects/davidxu_thread/src/sys/kern/kern_umtx.c#4 edit

Differences ...

==== //depot/projects/davidxu_thread/src/sys/kern/kern_umtx.c#4 (text+ko) ====

@@ -37,21 +37,31 @@
 #include <sys/sysent.h>
 #include <sys/systm.h>
 #include <sys/sysproto.h>
+#include <sys/eventhandler.h>
 #include <sys/thr.h>
 #include <sys/umtx.h>
 
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+
+struct umtx_key {
+	vm_object_t	uk_object;
+	vm_ooffset_t	uk_offset;
+};
+
 struct umtx_q {
 	LIST_ENTRY(umtx_q)	uq_next;	/* Linked list for the hash. */
-	TAILQ_HEAD(, thread)	uq_tdq;		/* List of threads blocked here. */
-	struct umtx		*uq_umtx;	/* Pointer key component. */
-	pid_t			uq_pid;		/* Pid key component. */
-	int			uq_count;	/* How many threads blocked. */
+	struct umtx_key		uq_key;
+	struct thread		*uq_thread;
 };
 
 LIST_HEAD(umtx_head, umtx_q);
 struct umtxq_chain {
-	struct mtx		uc_lock;	/* lock for this chain. */
-	struct umtx_head	uc_queues;	/* List of sleep queues. */
+	struct mtx		uc_lock;	/* Lock for this chain. */
+	struct umtx_head	uc_queue;	/* List of sleep queues. */
 };
 
 #define	GOLDEN_RATIO_PRIME	2654404609U
@@ -64,17 +74,17 @@
 #define	UMTX_CONTESTED	LONG_MIN
 
 static void umtx_init_chains(void *);
-static int umtxq_hash(struct thread *, struct umtx *);
-static void umtxq_lock(struct thread *td, struct umtx *key);
-static void umtxq_unlock(struct thread *td, struct umtx *key);
-static struct umtx_q *umtxq_lookup(struct thread *, struct umtx *);
-static struct umtx_q *umtxq_insert(struct thread *, struct umtx *);
-static int umtxq_count(struct thread *td, struct umtx *umtx);
-static int umtx_sleep(struct thread *td, struct umtx *umtx, int priority,
-	   const char *wmesg, int timo);
-static void umtx_signal(struct thread *td, struct umtx *umtx);
+static int  umtxq_hash(struct umtx_key *);
+static struct mtx *umtxq_mtx(int chain);
+static void umtxq_lock(int chain);
+static void umtxq_unlock(int chain);
+static void umtxq_insert(int chain, struct umtx_q *);
+static int  umtxq_count(struct umtx_key *);
+static void umtxq_signal(struct umtx_key *);
+static void fork_handler(void *arg, struct proc *p1, struct proc *p2,
+		int flags);
 
-SYSINIT(umtx, SI_SUB_LOCK, SI_ORDER_MIDDLE, umtx_init_chains, NULL);
+SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtx_init_chains, NULL);
 
 static void
 umtx_init_chains(void *arg __unused)
@@ -84,154 +94,119 @@
 	for (i = 0; i < UMTX_CHAINS; ++i) {
 		mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL,
 			 MTX_DEF | MTX_DUPOK);
-		LIST_INIT(&umtxq_chains[i].uc_queues);
+		LIST_INIT(&umtxq_chains[i].uc_queue);
+	}
+	EVENTHANDLER_REGISTER(process_fork, fork_handler, 0, 10000);
+}
+
+static void
+fork_handler(void *arg, struct proc *p1, struct proc *p2, int flags)
+{
+	struct thread *td;
+
+	PROC_LOCK(p1);
+	FOREACH_THREAD_IN_PROC(p1, td) {
+		if (td->td_flags & TDF_UMTXQ)
+			wakeup(td);
 	}
+	PROC_UNLOCK(p1);
 }
 
 static inline int
-umtxq_hash(struct thread *td, struct umtx *umtx)
+umtxq_hash(struct umtx_key *key)
 {
-	unsigned n = (uintptr_t)umtx + td->td_proc->p_pid;
+	unsigned n = (uintptr_t)key->uk_object + key->uk_offset;
 	return (((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS);
 }
 
-static inline void
-umtxq_lock(struct thread *td, struct umtx *key)
+static inline struct mtx *
+umtxq_mtx(int chain)
 {
-	int chain = umtxq_hash(td, key);
-	mtx_lock(&umtxq_chains[chain].uc_lock);
+	return (&umtxq_chains[chain].uc_lock);
 }
 
 static inline void
-umtxq_unlock(struct thread *td, struct umtx *key)
+umtxq_lock(int chain)
 {
-	int chain = umtxq_hash(td, key);
-	mtx_unlock(&umtxq_chains[chain].uc_lock);
+	mtx_lock(umtxq_mtx(chain));
 }
 
-static struct umtx_q *
-umtxq_lookup(struct thread *td, struct umtx *umtx)
+static inline void
+umtxq_unlock(int chain)
 {
-	struct umtx_head *head;
-	struct umtx_q *uq;
-	pid_t pid;
-	int chain;
-
-	chain = umtxq_hash(td, umtx);
-	mtx_assert(&umtxq_chains[chain].uc_lock, MA_OWNED);
-	pid = td->td_proc->p_pid;
-	head = &umtxq_chains[chain].uc_queues;
-	LIST_FOREACH(uq, head, uq_next) {
-		if (uq->uq_pid == pid && uq->uq_umtx == umtx)
-			return (uq);
-	}
-	return (NULL);
+	mtx_unlock(umtxq_mtx(chain));
 }
 
 /*
  * Insert a thread onto the umtx queue.
  */
-static struct umtx_q *
-umtxq_insert(struct thread *td, struct umtx *umtx)
+static inline void
+umtxq_insert(int chain, struct umtx_q *uq)
 {
 	struct umtx_head *head;
-	struct umtx_q *uq, *ins = NULL;
-	pid_t pid;
-	int chain;
 
-	chain = umtxq_hash(td, umtx);
-	pid = td->td_proc->p_pid;
-	if ((uq = umtxq_lookup(td, umtx)) == NULL) {
-		umtxq_unlock(td, umtx);
-		ins = malloc(sizeof(*uq), M_UMTX, M_ZERO | M_WAITOK);
-		umtxq_lock(td, umtx);
-
-		/*
-		 * Some one else could have succeeded while we were blocked
-		 * waiting on memory.
-		 */
-		if ((uq = umtxq_lookup(td, umtx)) == NULL) {
-			head = &umtxq_chains[chain].uc_queues;
-			uq = ins;
-			uq->uq_pid = pid;
-			uq->uq_umtx = umtx;
-			uq->uq_count = 0;
-			LIST_INSERT_HEAD(head, uq, uq_next);
-			TAILQ_INIT(&uq->uq_tdq);
-			ins = NULL;
-		}
-	}
-	TAILQ_INSERT_TAIL(&uq->uq_tdq, td, td_umtx);
-	uq->uq_count++;
-	if (ins) {
-		umtxq_unlock(td, umtx);
-		free(ins, M_UMTX);
-		umtxq_lock(td, umtx);
-	}
-	return (uq);
+	head = &umtxq_chains[chain].uc_queue;
+	LIST_INSERT_HEAD(head, uq, uq_next);
+	mtx_lock_spin(&sched_lock);
+	uq->uq_thread->td_flags |= TDF_UMTXQ;
+	mtx_unlock_spin(&sched_lock);
 }
 
 /*
- * Remove thread from umtx queue, umtx chain lock is also
- * released.
+ * Remove thread from umtx queue released.
  */
-static void
-umtx_remove(struct umtx_q *uq, struct thread *td, struct umtx *umtx)
+static inline void
+umtxq_remove(struct umtx_q *uq)
 {
-	int chain;
-
-	chain = umtxq_hash(td, umtx);
-	mtx_assert(&umtxq_chains[chain].uc_lock, MA_OWNED);
-	TAILQ_REMOVE(&uq->uq_tdq, td, td_umtx);
-	uq->uq_count--;
-	if (TAILQ_EMPTY(&uq->uq_tdq)) {
+	if (uq->uq_thread->td_flags & TDF_UMTXQ) {
 		LIST_REMOVE(uq, uq_next);
-		umtxq_unlock(td, umtx);
-		free(uq, M_UMTX);
-	} else
-		umtxq_unlock(td, umtx);
+		mtx_lock_spin(&sched_lock);
+		uq->uq_thread->td_flags &= ~TDF_UMTXQ;
+		mtx_unlock_spin(&sched_lock);
+	}
 }
 
-static inline int
-umtxq_count(struct thread *td, struct umtx *umtx)
+static int
+umtxq_count(struct umtx_key *key)
 {
 	struct umtx_q *uq;
-	int count = 0;
+	struct umtx_head *head;
+	int chain, count = 0;
 
-	umtxq_lock(td, umtx);
-	if ((uq = umtxq_lookup(td, umtx)) != NULL)
-		count = uq->uq_count;
-	umtxq_unlock(td, umtx);
+	chain = umtxq_hash(key);
+	umtxq_lock(chain);
+	head = &umtxq_chains[chain].uc_queue;
+	LIST_FOREACH(uq, head, uq_next) {
+		if (uq->uq_key.uk_object == key->uk_object &&
+		    uq->uq_key.uk_offset == key->uk_offset) {
+			if (++count > 1)
+				break;
+		}
+	}
+	umtxq_unlock(chain);
 	return (count);
 }
 
-static inline int
-umtx_sleep(struct thread *td, struct umtx *umtx, int priority,
-	   const char *wmesg, int timo)
-{
-	int chain;
-
-	chain = umtxq_hash(td, umtx);
-	mtx_assert(&umtxq_chains[chain].uc_lock, MA_OWNED);
-	return (msleep(td, &umtxq_chains[chain].uc_lock, priority,
-		       wmesg, timo));	
-}
-
 static void
-umtx_signal(struct thread *td, struct umtx *umtx)
+umtxq_signal(struct umtx_key *key)
 {
 	struct umtx_q *uq;
+	struct umtx_head *head;
 	struct thread *blocked = NULL;
+	int chain;
 
-	umtxq_lock(td, umtx);
-	if ((uq = umtxq_lookup(td, umtx)) != NULL) {
-		if ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL) {
-			mtx_lock_spin(&sched_lock);
-			blocked->td_flags |= TDF_UMTXWAKEUP;
-			mtx_unlock_spin(&sched_lock);
+	chain = umtxq_hash(key);
+	umtxq_lock(chain);
+	head = &umtxq_chains[chain].uc_queue;
+	LIST_FOREACH(uq, head, uq_next) {
+		if (uq->uq_key.uk_object == key->uk_object &&
+		    uq->uq_key.uk_offset == key->uk_offset) {
+			blocked = uq->uq_thread;
+			umtxq_remove(uq);
+			break;
 		}
 	}
-	umtxq_unlock(td, umtx);
+	umtxq_unlock(chain);
 	if (blocked != NULL)
 		wakeup(blocked);
 }
@@ -240,19 +215,29 @@
 _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
     /* struct umtx *umtx */
 {
-	struct umtx_q *uq;
+	struct umtx_q uq;
 	struct umtx *umtx;
+	vm_map_t map;
+	vm_map_entry_t entry;
+	vm_object_t object;
+	vm_pindex_t pindex;
+	vm_prot_t prot;
+	boolean_t wired;
 	intptr_t owner;
 	intptr_t old;
+	int chain, page_off;
 	int error = 0;
 
-	uq = NULL;
-
 	/*
 	 * Care must be exercised when dealing with this structure.  It
 	 * can fault on any access.
 	 */
-	umtx = uap->umtx;	
+	umtx = uap->umtx;
+
+	page_off = ((unsigned long)umtx) % PAGE_SIZE;
+ 	/* Must not on page boundary. */
+	if (page_off + sizeof(void *) > PAGE_SIZE)
+		return (EINVAL);
 
 	for (;;) {
 		/*
@@ -292,9 +277,22 @@
 		if (error)
 			return (error);
 
-		umtxq_lock(td, umtx);
-		uq = umtxq_insert(td, umtx);
-		umtxq_unlock(td, umtx);
+		map = &td->td_proc->p_vmspace->vm_map;
+		if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE,
+		    &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) {
+			vm_map_lookup_done(map, entry);
+			return EFAULT;
+		}
+		vm_object_reference(object);
+		uq.uq_key.uk_object = object;
+		uq.uq_key.uk_offset = entry->offset + entry->start -
+			(vm_offset_t)umtx;
+		uq.uq_thread = td;
+		chain = umtxq_hash(&uq.uq_key);
+		umtxq_lock(chain);
+		umtxq_insert(chain, &uq);
+		umtxq_unlock(chain);
+		vm_map_lookup_done(map, entry);
 
 		/*
 		 * Set the contested bit so that a release in user space
@@ -307,9 +305,10 @@
 
 		/* The address was invalid. */
 		if (old == -1) {
-			umtxq_lock(td, umtx);
-			umtx_remove(uq, td, umtx);
-			/* unlocked by umtx_remove */
+			umtxq_lock(chain);
+			umtxq_remove(&uq);
+			umtxq_unlock(chain);
+			vm_object_deallocate(uq.uq_key.uk_object);
 			return (EFAULT);
 		}
 
@@ -318,31 +317,22 @@
 		 * and we need to retry or we lost a race to the thread
 		 * unlocking the umtx.
 		 */
-		umtxq_lock(td, umtx);
-		if (old == owner && (td->td_flags & TDF_UMTXWAKEUP) == 0)
-			error = umtx_sleep(td, umtx, td->td_priority | PCATCH,
-				    "umtx", 0);
-		else
+		umtxq_lock(chain);
+		if (old == owner && (td->td_flags & TDF_UMTXQ)) {
+			error = msleep(td, umtxq_mtx(chain),
+				       td->td_priority | PCATCH | PDROP,
+				       "umtx", 0);
+			if (td->td_flags & TDF_UMTXQ) {
+				umtxq_lock(chain);
+				umtxq_remove(&uq);
+				umtxq_unlock(chain);
+			}
+		} else {
 			error = 0;
-		umtx_remove(uq, td, umtx);
-		/* unlocked by umtx_remove */
-
-		if (td->td_flags & TDF_UMTXWAKEUP) {
-			/*
-			 * If we were resumed by umtxq_unlock, we should retry
-			 * to avoid a race.
-			 */
-			mtx_lock_spin(&sched_lock);
-			td->td_flags &= ~TDF_UMTXWAKEUP;
-			mtx_unlock_spin(&sched_lock);
-			continue;
+			umtxq_remove(&uq);
+			umtxq_unlock(chain);
 		}
-
-		/*
-		 * If we caught a signal, exit immediately.
-		 */
-		if (error)
-			return (error);
+		vm_object_deallocate(uq.uq_key.uk_object);
 	}
 
 	return (0);
@@ -352,7 +342,14 @@
 _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
     /* struct umtx *umtx */
 {
+	struct umtx_q uq;
 	struct umtx *umtx;
+	vm_map_t map;
+	vm_map_entry_t entry;
+	vm_object_t object;
+	vm_pindex_t pindex;
+	vm_prot_t prot;
+	boolean_t wired;
 	intptr_t owner;
 	intptr_t old;
 	int count;
@@ -368,7 +365,7 @@
 	if ((owner = fuword(&umtx->u_owner)) == -1)
 		return (EFAULT);
 
-	if ((owner & ~UMTX_CONTESTED) != td->td_tid)
+	if ((owner & ~UMTX_CONTESTED) != td->td_tid) 
 		return (EPERM);
 
 	/* We should only ever be in here for contested locks */
@@ -386,15 +383,28 @@
 	if (old != owner)
 		return (EINVAL);
 
+	map = &td->td_proc->p_vmspace->vm_map;
+	if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE,
+	    &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) {
+		vm_map_lookup_done(map, entry);
+		return EFAULT;
+	}
+	vm_object_reference(object);
+	uq.uq_key.uk_object = object;
+	uq.uq_key.uk_offset = entry->offset + entry->start - (vm_offset_t)umtx;
+	vm_map_lookup_done(map, entry);
+
 	/*
 	 * At the point, a new thread can lock the umtx before we
 	 * reach here, so contested bit will not be set, if there
 	 * are two or more threads on wait queue, we should set
 	 * contensted bit for them.
 	 */
-	count = umtxq_count(td, umtx);
-	if (count <= 0)
+	count = umtxq_count(&uq.uq_key);
+	if (count <= 0) {
+		vm_object_deallocate(object);
 		return (0);
+	}
 
 	/*
 	 * If there is second thread waiting on umtx, set contested bit,
@@ -408,8 +418,10 @@
 				    owner | UMTX_CONTESTED);
 			if (old == owner)
 				break;
-			if (old == -1)
+			if (old == -1) {
+				vm_object_deallocate(object);
 				return (EFAULT);
+			}
 			owner = old;
 		}
 		/*
@@ -417,12 +429,15 @@
 		 * to wake more threads, that thread will do it when it unlocks
 		 * the umtx.
 		 */
-		if ((owner & ~UMTX_CONTESTED) != 0)
+		if ((owner & ~UMTX_CONTESTED) != 0) {
+			vm_object_deallocate(object);
 			return (0);
+		}
 	}
 
 	/* Wake blocked thread. */
-	umtx_signal(td, umtx);
+	umtxq_signal(&uq.uq_key);
+	vm_object_deallocate(object);
 
 	return (0);
 }



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200412121450.iBCEoZAA050020>