From owner-p4-projects Fri May 10 8:32:17 2002 Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 56B5437B405; Fri, 10 May 2002 08:30:51 -0700 (PDT) Delivered-To: perforce@freebsd.org Received: from freefall.freebsd.org (freefall.FreeBSD.org [216.136.204.21]) by hub.freebsd.org (Postfix) with ESMTP id E790E37B403 for ; Fri, 10 May 2002 08:30:49 -0700 (PDT) Received: (from perforce@localhost) by freefall.freebsd.org (8.11.6/8.11.6) id g4AFUn510685 for perforce@freebsd.org; Fri, 10 May 2002 08:30:49 -0700 (PDT) (envelope-from mini@freebsd.org) Date: Fri, 10 May 2002 08:30:49 -0700 (PDT) Message-Id: <200205101530.g4AFUn510685@freefall.freebsd.org> X-Authentication-Warning: freefall.freebsd.org: perforce set sender to mini@freebsd.org using -f From: Jonathan Mini Subject: PERFORCE change 11120 for review To: Perforce Change Reviews Sender: owner-p4-projects@FreeBSD.ORG Precedence: bulk List-ID: List-Archive: (Web Archive) List-Help: (List Instructions) List-Subscribe: List-Unsubscribe: X-Loop: FreeBSD.ORG http://people.freebsd.org/~peter/p4db/chv.cgi?CH=11120 Change 11120 by mini@mini_stylus on 2002/05/10 08:30:36 Give UMA control over thread allocation and caching: - Use uma init/fini/ctor/dtor functions. - Instead of keeping a PCPU list of zombie threads to be reaped at earliest convenvience, keep a single "spare" thread in the KSE. - When abandoning a thread (in thread_exit()), push the thread into its KSE's spare thread slot, and free the thread that is already there (if any). - When performing an upcall, pull the spare thread (if available) before allocating a new thread from uma. This is especially useful in msleep(), where not blocking again is highly preferable. - When pulling the KSE spare thread, allocate a new spare thread for the KSE before returning to userland for the upcall. Affected files ... ... //depot/projects/kse/sys/kern/kern_exit.c#46 edit ... //depot/projects/kse/sys/kern/kern_synch.c#59 edit ... //depot/projects/kse/sys/kern/kern_thread.c#45 edit ... //depot/projects/kse/sys/kern/subr_pcpu.c#7 edit ... //depot/projects/kse/sys/kern/subr_trap.c#53 edit ... //depot/projects/kse/sys/sys/pcpu.h#11 edit ... //depot/projects/kse/sys/sys/proc.h#98 edit Differences ... ==== //depot/projects/kse/sys/kern/kern_exit.c#46 (text+ko) ==== @@ -600,7 +600,6 @@ mtx_unlock_spin(&sched_lock); } - thread_reap(); /* check for zombie threads */ td->td_retval[0] = p->p_pid; #ifdef COMPAT_43 if (compat) @@ -699,6 +698,10 @@ p->p_procsig = NULL; } + /* Free the KSE spare thread */ + if (p->p_kse.ke_tdspare != NULL) + thread_free(p->p_kse.ke_tdspare); + /* * Give vm and machine-dependent layer a chance * to free anything that cpu_exit couldn't ==== //depot/projects/kse/sys/kern/kern_synch.c#59 (text+ko) ==== @@ -461,9 +461,11 @@ */ if (TAILQ_FIRST(&td->td_ksegrp->kg_runq) == NULL) { /* don't recurse here! */ + mtx_lock_spin(&sched_lock); td->td_flags |= TDF_INMSLEEP; thread_schedule_upcall(td, td->td_kse); td->td_flags &= ~TDF_INMSLEEP; + mtx_unlock_spin(&sched_lock); } } } ==== //depot/projects/kse/sys/kern/kern_thread.c#45 (text+ko) ==== @@ -46,6 +46,7 @@ #include #include +#include #include #include #include @@ -56,22 +57,12 @@ * Thread related storage */ static uma_zone_t thread_zone; - -/* The cache of available threads */ -static tdlist_head_t free_threads; - static int allocated_threads; static int active_threads; static int cached_threads; -#define DEFAULT_THREAD_CACHE_SIZE 64; -static int thread_cache_size = DEFAULT_THREAD_CACHE_SIZE; - SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); -SYSCTL_INT(_kern_threads, OID_AUTO, cachesize, CTLFLAG_RW, - &thread_cache_size, 0, "Size of thread cache."); - SYSCTL_INT(_kern_threads, OID_AUTO, active, CTLFLAG_RD, &active_threads, 0, "Number of active threads in system."); @@ -79,88 +70,182 @@ &cached_threads, 0, "Number of threads in thread cache."); SYSCTL_INT(_kern_threads, OID_AUTO, allocated, CTLFLAG_RD, - &allocated_threads, 0, "Number of threads in allocated from zone."); + &allocated_threads, 0, "Number of threads in zone."); +#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) /* - * Initialize global thread allocation resources + * Pepare a thread for use. */ -void -threadinit(void) +static void +thread_ctor(void *mem, int size, void *arg) { + struct thread *td; + + KASSERT((size == sizeof(struct thread)), + ("size mismatch: %d != %d\n", size, sizeof(struct thread))); - thread_zone = uma_zcreate("THREAD", sizeof (struct thread), NULL, - NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); - TAILQ_INIT(&free_threads); + td = (struct thread *)mem; + bzero(&td->td_startzero, + (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); + td->td_state = TDS_NEW; + td->td_flags |= TDF_UNBOUND; + cached_threads--; /* XXXSMP */ + active_threads++; /* XXXSMP */ } -/* - * Called from thread-reap() to dissociate the zombie threads. - * We cache it if we need to or if we have enough of them, - * we can pull it apart and free it. +/* + * Reclaim a thread after use. */ -void -thread_free(struct thread *td) +static void +thread_dtor(void *mem, int size, void *arg) { + struct thread *td; + + KASSERT((size == sizeof(struct thread)), + ("size mismatch: %d != %d\n", size, sizeof(struct thread))); + + td = (struct thread *)mem; -#ifdef INVARIANTS - if (td == NULL) - return; +#ifdef INVARIANTS + /* Verify that this thread is in a safe state to free. */ + switch (td->td_state) { + case TDS_SLP: + case TDS_MTX: + case TDS_RUNQ: + /* + * We must never unlink a thread that is in one of + * these states, because it is currently active. + */ + panic("bad state for thread unlinking"); + break; + case TDS_UNQUEUED: + case TDS_NEW: + case TDS_RUNNING: + case TDS_SURPLUS: + break; + default: + panic("bad thread state"); + } #endif - active_threads--; - if (cached_threads < thread_cache_size) { - TAILQ_INSERT_HEAD(&free_threads, td, td_plist); - cached_threads++; /* XXXSMP */ - } else { - /* free tha pages from the stack object etc. */ - allocated_threads--; - pmap_dispose_thread(td); + mtx_lock_spin(&sched_lock); + + mtx_unlock_spin(&sched_lock); + + /* Update counters. */ + active_threads--; /* XXXSMP */ + cached_threads++; /* XXXSMP */ +} + +/* + * Initialize type-stable parts of a thread (when newly created). + */ +static void +thread_init(void *mem, int size) +{ + struct thread *td; + + KASSERT((size == sizeof(struct thread)), + ("size mismatch: %d != %d\n", size, sizeof(struct thread))); + + td = (struct thread *)mem; + pmap_new_thread(td); + cpu_thread_setup(td); + cached_threads++; /* XXXSMP */ + allocated_threads++; /* XXXSMP */ +} + +/* + * Tear down type-stable parts of a thread (just before being discarded). + */ +static void +thread_fini(void *mem, int size) +{ + struct thread *td; + + KASSERT((size == sizeof(struct thread)), + ("size mismatch: %d != %d\n", size, sizeof(struct thread))); + + td = (struct thread *)mem; + pmap_dispose_thread(td); + vm_object_deallocate(td->td_kstack_obj); + cached_threads--; /* XXXSMP */ + allocated_threads--; /* XXXSMP */ +} + +/* + * Unlink thread from its process, and reassign its KSE to another thread. + */ +static void +thread_unlink(struct thread *td) +{ + struct proc *p; + struct ksegrp *kg; + struct kse *ke; + + p = td->td_proc; + kg = td->td_ksegrp; + ke = td->td_kse; -#if 0 - /* Free the object itslef */ - /* As zones are type stable this can be skipped for now */ - vm_object_deallocate(td->td_kstack_obj); - td->td_kstack_obj = NULL; -#endif + /* Reassign this thread's KSE. */ + if (ke != NULL) { + ke->ke_thread = NULL; + td->td_kse = NULL; + ke->ke_state = KES_UNQUEUED; + kse_reassign(ke); + } - /* put the thread back in the zone */ - uma_zfree(thread_zone, td); + /* Unlink this thread from its proc. */ + if (p != NULL) { + TAILQ_REMOVE(&p->p_threads, td, td_plist); + if (kg != NULL) + TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); + p->p_numthreads--; + if (P_SHOULDSTOP(p) == P_STOPPED_SNGL) { + if (p->p_numthreads == + ((p->p_flag & P_SINGLE_EXIT) ? 1 : (p->p_suspcount + 1))) { + setrunqueue(p->p_singlethread); + p->p_singlethread = NULL; + } + } } + if (kg != NULL) + kg->kg_numthreads--; + td->td_state = TDS_SURPLUS; + td->td_proc = NULL; + td->td_ksegrp = NULL; + td->td_last_kse = NULL; +} + +/* + * Initialize global thread allocation resources. + */ +void +threadinit(void) +{ + + thread_zone = uma_zcreate("THREAD", sizeof (struct thread), + thread_ctor, thread_dtor, thread_init, thread_fini, + UMA_ALIGN_CACHE, 0); } -#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) -/* - * Try get a new thread from the cache, but if that fails, - * create one from the zone as per normal +/* + * Allocate a thread. */ struct thread * thread_alloc(void) { - struct thread *td; + return (uma_zalloc(thread_zone, M_WAITOK)); +} - thread_reap(); /* recover any partly deallocated threads */ - if (cached_threads) { - td = TAILQ_FIRST(&free_threads); - TAILQ_REMOVE(&free_threads, td, td_plist); - cached_threads--; /* XXXSMP */ - /* Probably should clean up stuff here */ - } else { - /* allocate the thread structure itself */ - td = uma_zalloc(thread_zone, M_WAITOK); - - allocated_threads++; - pmap_new_thread(td); - cpu_thread_setup(td); - } - /* may need to set some stuff here.. re state? */ - /* Make sure the zero'd section is in fact zero'd */ - bzero(&td->td_startzero, - (unsigned) RANGEOF(struct thread, td_startzero, td_endzero)); - td->td_state = TDS_NEW; - td->td_flags |= TDF_UNBOUND; - active_threads++; - return (td); +/* + * Deallocate a thread. + */ +void +thread_free(struct thread *td) +{ + uma_zfree(thread_zone, td); } int @@ -190,22 +275,36 @@ } -/* - * Put the half dead thread on a per CPU list of threads that need - * to be reaped. +/* + * Discard the current thread and exit from its context. + * + * Because we can't free a thread while we're operating under its context, + * push the current thread into our KSE's ke_tdspare slot, freeing the + * thread that might be there currently. Because we know that only this + * processor will run our KSE, we needn't worry about someone else grabbing + * our context before we do a cpu_throw. */ void thread_exit(void) { - struct thread *td = curthread; + struct thread *td; + struct kse *ke; + td = curthread; + ke = td->td_kse; CTR1(KTR_PROC, "thread_exit: thread %p", td); KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); - cpu_thread_exit(td); - td->td_state = TDS_SURPLUS; - td->td_nextzombie = PCPU_GET(freethread); - PCPU_SET(freethread, td); - thread_unlink(td); /* reassignes kse detach it from it's process etc. */ + + if (ke->ke_tdspare != NULL) { + mtx_unlock_spin(&sched_lock); + mtx_lock(&Giant); + thread_free(ke->ke_tdspare); + mtx_unlock(&Giant); + mtx_lock_spin(&sched_lock); + } + cpu_thread_exit(td); /* XXXSMP */ + thread_unlink(td); + ke->ke_tdspare = td; cpu_throw(); } @@ -232,71 +331,6 @@ td->td_kse = NULL; } - -void -thread_unlink(struct thread *td) -{ - struct proc *p = td->td_proc; - struct ksegrp *kg = td->td_ksegrp; - struct kse *ke = td->td_kse; /* may be NULL */ - - if (ke) { - ke->ke_thread = NULL; - td->td_kse = NULL; - ke->ke_state = KES_UNQUEUED; - kse_reassign(ke); - } - - switch(td->td_state) { - case TDS_SLP: /* we must never get to unlink a thread */ - case TDS_MTX: /* that is in one of these states as to */ - case TDS_RUNQ: /* do so might leak all sorts of stuff */ - panic ("bad state for thread unlinking"); - break; - case TDS_UNQUEUED: - case TDS_NEW: - break; - case TDS_RUNNING: - break; - case TDS_SURPLUS: - break; - default: - panic("bad thread state"); - } - TAILQ_REMOVE(&p->p_threads, td, td_plist); - TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); - p->p_numthreads--; - if (P_SHOULDSTOP(p) == P_STOPPED_SNGL) { - if (p->p_numthreads == - ((p->p_flag & P_SINGLE_EXIT) ? 1 : (p->p_suspcount + 1))){ - setrunqueue(p->p_singlethread); - p->p_singlethread = NULL; - } - } - kg->kg_numthreads--; - td->td_state = TDS_SURPLUS; - td->td_proc = NULL; - td->td_ksegrp = NULL; - td->td_last_kse = NULL; -} -/* - * reap any zombie threads for this Processor. - */ -void -thread_reap(void) -{ - struct thread *td_reaped, *td_next; - - if ((td_reaped = PCPU_GET(freethread))) { - PCPU_SET(freethread, NULL); - while (td_reaped) { - td_next = td_reaped->td_nextzombie; - thread_free(td_reaped); - td_reaped = td_next; - } - } -} - /* * set up the upcall pcb in either a given thread or a new one * if none given. Use the upcall for the given KSE @@ -307,18 +341,24 @@ { struct thread *td2; - td2 = thread_alloc(); - if (td2) { - CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", - td, td->td_proc->p_pid, td->td_proc->p_comm); - thread_link(td2, ke->ke_ksegrp); - cpu_set_upcall(td2, ke->ke_pcb); - td2->td_ucred = crhold(td->td_ucred); - td2->td_kse = NULL; /* back as it was */ - td2->td_flags = TDF_UNBOUND|TDF_UPCALLING; - td2->td_priority = td->td_priority; - setrunqueue(td2); + mtx_assert(&sched_lock, MA_OWNED); + if (ke->ke_tdspare != NULL) { + td2 = ke->ke_tdspare; + ke->ke_tdspare = NULL; + } else { + mtx_unlock_spin(&sched_lock); + td2 = thread_alloc(); + mtx_lock_spin(&sched_lock); } + CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", + td, td->td_proc->p_pid, td->td_proc->p_comm); + thread_link(td2, ke->ke_ksegrp); + cpu_set_upcall(td2, ke->ke_pcb); + td2->td_ucred = crhold(td->td_ucred); + td2->td_kse = NULL; /* back as it was */ + td2->td_flags = TDF_UNBOUND|TDF_UPCALLING; + td2->td_priority = td->td_priority; + setrunqueue(td2); return (td2); } ==== //depot/projects/kse/sys/kern/subr_pcpu.c#7 (text+ko) ==== @@ -119,12 +119,6 @@ td->td_proc->p_comm); else db_printf("none\n"); - db_printf("freethread = "); - td = pc->pc_freethread; - if (td != NULL) - db_printf("%p: next: %p\n", td, td->td_nextzombie); - else - db_printf("none\n"); db_printf("curpcb = %p\n", pc->pc_curpcb); db_printf("fpcurthread = "); td = pc->pc_fpcurthread; ==== //depot/projects/kse/sys/kern/subr_trap.c#53 (text+ko) ==== @@ -116,9 +116,16 @@ * We also need to check to see if we have to exit or wait * due to a single threading requirement. */ - PROC_LOCK(p); - thread_suspend_check(0); /* Can suspend or kill */ - PROC_UNLOCK(p); + if (p->p_flag & P_KSES) { + PROC_LOCK(p); + thread_suspend_check(0); /* Can suspend or kill */ + PROC_UNLOCK(p); + if (ke->ke_tdspare == NULL) { + mtx_lock(&Giant); + ke->ke_tdspare = thread_alloc(); + mtx_unlock(&Giant); + } + } if (td->td_flags & TDF_UNBOUND) { /* maybe this should be in a separate function */ /* ==== //depot/projects/kse/sys/sys/pcpu.h#11 (text+ko) ==== @@ -53,7 +53,6 @@ struct thread *pc_curthread; /* Current thread */ struct thread *pc_idlethread; /* Idle thread */ struct thread *pc_fpcurthread; /* Fp state owner */ - struct thread *pc_freethread; /* thread we are freeing */ struct pcb *pc_curpcb; /* Current pcb */ struct bintime pc_switchtime; int pc_switchticks; ==== //depot/projects/kse/sys/sys/proc.h#98 (text+ko) ==== @@ -270,7 +270,6 @@ int td_flags; /* (j) TDF_* flags. */ struct kse *td_last_kse; /* Where it wants to be if possible. */ struct kse *td_kse; /* Current KSE if running. */ - struct thread *td_nextzombie; /* PCPU chain of zombie threads */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ void *td_wchan; /* (j) Sleep address. */ const char *td_wmesg; /* (j) Reason for sleep. */ @@ -360,6 +359,7 @@ KES_RUNNING } ke_state; /* (j) S* process status. */ void *ke_mailbox; /* the userland mailbox address */ + struct thread *ke_tdspare; /* spare thread for upcalls */ #define ke_endzero ke_dummy #define ke_startcopy ke_endzero @@ -797,8 +797,8 @@ int cpu_coredump(struct thread *, struct vnode *, struct ucred *); /* new in KSE */ -struct thread *thread_alloc(void); -void thread_free(struct thread *td); +struct thread *thread_alloc(void); +void thread_free(struct thread *td); int cpu_export_context(struct thread *td); void cpu_free_kse_mdstorage(struct kse *kse); void cpu_save_upcall(struct thread *td, struct kse *newkse); @@ -814,14 +814,12 @@ void thread_exit(void) __dead2; int thread_export_context(struct thread *td); void thread_link(struct thread *td, struct ksegrp *kg); -void thread_reap(void); struct thread *thread_schedule_upcall(struct thread *td, struct kse *ke); int thread_single(int how); #define SNGLE_WAIT 0 /* values for 'how' */ #define SNGLE_EXIT 1 void thread_single_end(void); int thread_suspend_check(int how); -void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); #endif /* _KERNEL */ To Unsubscribe: send mail to majordomo@FreeBSD.org with "unsubscribe p4-projects" in the body of the message