Sorry folks; I accidentally committed a patch from what I was working

on a couple of days ago.  This should be the most recent changes.

Noticed by:	davidxu
This commit is contained in:
Daniel Eischen 2003-04-18 07:09:43 +00:00
parent 7cd650a972
commit e4c2ac1637
36 changed files with 637 additions and 403 deletions

View File

@ -25,7 +25,7 @@ CFLAGS+=-D_PTHREADS_INVARIANTS -Wall
AINC= -I${.CURDIR}/../libc/${MACHINE_ARCH} -I${.CURDIR}/thread
PRECIOUSLIB= yes
.include "${.CURDIR}/man/Makefile.inc"
#.include "${.CURDIR}/man/Makefile.inc"
.include "${.CURDIR}/thread/Makefile.inc"
.include "${.CURDIR}/sys/Makefile.inc"

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>.
* All rights reserved.
*

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>
* All rights reserved.
*
@ -7,9 +7,9 @@
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

View File

@ -31,12 +31,9 @@
#ifndef _PTHREAD_MD_H_
#define _PTHREAD_MD_H_
#include <sys/kse.h>
#include <setjmp.h>
#include <ucontext.h>
extern int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *);
extern int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **);
extern int _thr_setcontext(ucontext_t *);
extern int _thr_getcontext(ucontext_t *);

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>.
* Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>.
* Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2003 Daniel M. Eischen <deischen@gdeb.com>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@ -107,7 +108,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
curkse = curthread->kse;
/* Allocate memory for the thread structure: */
if ((new_thread = _thr_alloc(curkse)) == NULL) {
if ((new_thread = _thr_alloc(curthread)) == NULL) {
/* Insufficient memory to create a thread: */
ret = EAGAIN;
} else {
@ -124,21 +125,21 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (create_stack(&new_thread->attr) != 0) {
/* Insufficient memory to create a stack: */
ret = EAGAIN;
_thr_free(curkse, new_thread);
_thr_free(curthread, new_thread);
}
else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
(((kse = _kse_alloc(curkse)) == NULL)
|| ((kseg = _kseg_alloc(curkse)) == NULL))) {
(((kse = _kse_alloc(curthread)) == NULL)
|| ((kseg = _kseg_alloc(curthread)) == NULL))) {
/* Insufficient memory to create a new KSE/KSEG: */
ret = EAGAIN;
if (kse != NULL)
_kse_free(curkse, kse);
_kse_free(curthread, kse);
if ((new_thread->attr.flags & THR_STACK_USER) == 0) {
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
_thr_stack_free(&new_thread->attr);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
}
_thr_free(curkse, new_thread);
_thr_free(curthread, new_thread);
}
else {
if (kseg != NULL) {

View File

@ -42,7 +42,10 @@ __weak_reference(_pthread_detach, pthread_detach);
int
_pthread_detach(pthread_t pthread)
{
struct pthread *curthread, *joiner;
struct pthread *curthread = _get_curthread();
struct pthread *joiner;
kse_critical_t crit;
int dead;
int rval = 0;
/* Check for invalid calling parameters: */
@ -50,13 +53,19 @@ _pthread_detach(pthread_t pthread)
/* Return an invalid argument error: */
rval = EINVAL;
/* Check if the thread is already detached: */
else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0)
else if ((rval = _thr_ref_add(curthread, pthread,
/*include dead*/1)) != 0) {
/* Return an error: */
_thr_leave_cancellation_point(curthread);
}
/* Check if the thread is already detached: */
else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
/* Return an error: */
_thr_ref_delete(curthread, pthread);
rval = EINVAL;
else {
} else {
/* Lock the detached thread: */
curthread = _get_curthread();
THR_SCHED_LOCK(curthread, pthread);
/* Flag the thread as detached: */
@ -65,20 +74,35 @@ _pthread_detach(pthread_t pthread)
/* Retrieve any joining thread and remove it: */
joiner = pthread->joiner;
pthread->joiner = NULL;
if (joiner->kseg == pthread->kseg) {
/*
* We already own the scheduler lock for the joiner.
* Take advantage of that and make the joiner runnable.
*/
if (joiner->join_status.thread == pthread) {
/*
* Set the return value for the woken thread:
*/
joiner->join_status.error = ESRCH;
joiner->join_status.ret = NULL;
joiner->join_status.thread = NULL;
/* We are already in a critical region. */
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
if ((pthread->flags & THR_FLAGS_GC_SAFE) != 0) {
THR_LIST_REMOVE(pthread);
THR_GCLIST_ADD(pthread);
atomic_store_rel_int(&_gc_check, 1);
if (KSE_WAITING(_kse_initial))
KSE_WAKEUP(_kse_initial);
_thr_setrunnable_unlocked(joiner);
}
joiner = NULL;
}
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
dead = (pthread->flags & THR_FLAGS_GC_SAFE) != 0;
THR_SCHED_UNLOCK(curthread, pthread);
if (dead != 0) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
THR_GCLIST_ADD(pthread);
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
_kse_critical_leave(crit);
}
_thr_ref_delete(curthread, pthread);
/* See if there is a thread waiting in pthread_join(): */
if (joiner != NULL) {
/* Lock the joiner before fiddling with it. */

View File

@ -85,15 +85,6 @@ _thr_ref_delete(struct pthread *curthread, struct pthread *thread)
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
thread->refcount--;
curthread->critical_count--;
if (((thread->flags & THR_FLAGS_GC_SAFE) != 0) &&
(thread->refcount == 0) &&
((thread->attr.flags & PTHREAD_DETACHED) != 0)) {
THR_LIST_REMOVE(thread);
THR_GCLIST_ADD(thread);
_gc_check = 1;
if (KSE_WAITING(_kse_initial))
KSE_WAKEUP(_kse_initial);
}
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
_kse_critical_leave(crit);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003 Daniel M. Eischen <deischen@FreeBSD.org>
* Copyright (c) 2003 Daniel M. Eischen <deischen@freebsd.org>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@ -280,7 +280,7 @@ _libpthread_init(struct pthread *curthread)
*/
_thr_initial = curthread;
}
_kse_initial->k_kseg->kg_threadcount = 1;
_kse_initial->k_kseg->kg_threadcount = 0;
_thr_initial->kse = _kse_initial;
_thr_initial->kseg = _kse_initial->k_kseg;
_thr_initial->active = 1;
@ -290,7 +290,7 @@ _libpthread_init(struct pthread *curthread)
* queue.
*/
THR_LIST_ADD(_thr_initial);
TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_threadq, _thr_initial, kle);
KSEG_THRQ_ADD(_kse_initial->k_kseg, _thr_initial);
/* Setup the KSE/thread specific data for the current KSE/thread. */
if (_ksd_setprivate(&_thr_initial->kse->k_ksd) != 0)

View File

@ -40,8 +40,9 @@ __weak_reference(_pthread_join, pthread_join);
int
_pthread_join(pthread_t pthread, void **thread_return)
{
struct pthread *curthread = _get_curthread();
int ret = 0;
struct pthread *curthread = _get_curthread();
kse_critical_t crit;
int ret = 0;
_thr_enter_cancellation_point(curthread);
@ -83,8 +84,24 @@ _pthread_join(pthread_t pthread, void **thread_return)
/* Return the thread's return value: */
*thread_return = pthread->ret;
/* Unlock the thread and remove the reference. */
/* Detach the thread. */
pthread->attr.flags |= PTHREAD_DETACHED;
/* Unlock the thread. */
THR_SCHED_UNLOCK(curthread, pthread);
/*
* Remove the thread from the list of active
* threads and add it to the GC list.
*/
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
THR_LIST_REMOVE(pthread);
THR_GCLIST_ADD(pthread);
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
_kse_critical_leave(crit);
/* Remove the reference. */
_thr_ref_delete(curthread, pthread);
}
else if (pthread->joiner != NULL) {

View File

@ -83,16 +83,6 @@ __FBSDID("$FreeBSD");
#define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED
/*
* Add/remove threads from a KSE's scheduling queue.
* For now the scheduling queue is hung off the KSEG.
*/
#define KSEG_THRQ_ADD(kseg, thr) \
TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle)
#define KSEG_THRQ_REMOVE(kseg, thr) \
TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle)
/*
* Macros for manipulating the run queues. The priority queue
* routines use the thread's pqe link and also handle the setting
@ -116,6 +106,7 @@ static TAILQ_HEAD(, kse) active_kseq;
static TAILQ_HEAD(, kse) free_kseq;
static TAILQ_HEAD(, kse_group) free_kse_groupq;
static TAILQ_HEAD(, kse_group) active_kse_groupq;
static TAILQ_HEAD(, kse_group) gc_ksegq;
static struct lock kse_lock; /* also used for kseg queue */
static int free_kse_count = 0;
static int free_kseg_count = 0;
@ -135,13 +126,15 @@ static void kse_sched_multi(struct kse *curkse);
static void kse_sched_single(struct kse *curkse);
static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
static void kse_wait(struct kse *kse);
static void kse_free_unlocked(struct kse *kse);
static void kseg_free(struct kse_group *kseg);
static void kseg_init(struct kse_group *kseg);
static void kse_waitq_insert(struct pthread *thread);
static void thr_cleanup(struct kse *kse, struct pthread *curthread);
static void thr_gc(struct kse *curkse);
#ifdef NOT_YET
static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2,
ucontext_t *ucp);
#endif
static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
struct pthread_sigframe *psf);
static int thr_timedout(struct pthread *thread, struct timespec *curtime);
@ -232,6 +225,7 @@ _kse_single_thread(struct pthread *curthread)
while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
_lock_destroy(&kseg->kg_lock);
_pq_free(&kseg->kg_schedq.sq_runq);
free(kseg);
}
free_kseg_count = 0;
@ -242,6 +236,7 @@ _kse_single_thread(struct pthread *curthread)
kseg_next = TAILQ_NEXT(kseg, kg_qe);
TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
_lock_destroy(&kseg->kg_lock);
_pq_free(&kseg->kg_schedq.sq_runq);
free(kseg);
}
active_kseg_count = 0;
@ -261,9 +256,11 @@ _kse_single_thread(struct pthread *curthread)
/* Free the to-be-gc'd threads. */
while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
TAILQ_REMOVE(&_thread_gc_list, thread, tle);
TAILQ_REMOVE(&_thread_gc_list, thread, gcle);
free(thread);
}
TAILQ_INIT(&gc_ksegq);
_gc_count = 0;
if (inited != 0) {
/*
@ -309,6 +306,7 @@ _kse_init(void)
TAILQ_INIT(&free_kseq);
TAILQ_INIT(&free_kse_groupq);
TAILQ_INIT(&free_threadq);
TAILQ_INIT(&gc_ksegq);
if (_lock_init(&kse_lock, LCK_ADAPTIVE,
_kse_lock_wait, _kse_lock_wakeup) != 0)
PANIC("Unable to initialize free KSE queue lock");
@ -320,6 +318,7 @@ _kse_init(void)
PANIC("Unable to initialize thread list lock");
active_kse_count = 0;
active_kseg_count = 0;
_gc_count = 0;
inited = 1;
}
}
@ -766,10 +765,6 @@ kse_sched_multi(struct kse *curkse)
/* This has to be done without the scheduling lock held. */
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
kse_check_signals(curkse);
/* Check for GC: */
if (_gc_check != 0)
thr_gc(curkse);
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
dump_queues(curkse);
@ -785,8 +780,6 @@ kse_sched_multi(struct kse *curkse)
kse_check_waitq(curkse);
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
kse_check_signals(curkse);
if (_gc_check != 0)
thr_gc(curkse);
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
}
@ -853,7 +846,7 @@ kse_sched_multi(struct kse *curkse)
* signals or needs a cancellation check, we need to add a
* signal frame to the thread's context.
*/
#if 0
#ifdef NOT_YET
if ((curframe == NULL) && ((curthread->check_pending != 0) ||
(((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) {
@ -904,6 +897,7 @@ kse_check_signals(struct kse *curkse)
}
}
#ifdef NOT_YET
static void
thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp)
{
@ -911,6 +905,7 @@ thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp)
thr_resume_check(curthread, ucp, NULL);
}
#endif
static void
thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
@ -944,7 +939,6 @@ static void
thr_cleanup(struct kse *curkse, struct pthread *thread)
{
struct pthread *joiner;
int free_thread = 0;
if ((joiner = thread->joiner) != NULL) {
thread->joiner = NULL;
@ -969,71 +963,81 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
thread->attr.flags |= PTHREAD_DETACHED;
}
thread->flags |= THR_FLAGS_GC_SAFE;
thread->kseg->kg_threadcount--;
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
_thr_stack_free(&thread->attr);
if ((thread->attr.flags & PTHREAD_DETACHED) != 0) {
/* Remove this thread from the list of all threads: */
THR_LIST_REMOVE(thread);
if (thread->refcount == 0) {
THR_GCLIST_REMOVE(thread);
TAILQ_REMOVE(&thread->kseg->kg_threadq, thread, kle);
free_thread = 1;
}
if ((thread->attr.flags & PTHREAD_SCOPE_PROCESS) == 0) {
/*
* Remove the thread from the KSEG's list of threads.
*/
KSEG_THRQ_REMOVE(thread->kseg, thread);
/*
* Migrate the thread to the main KSE so that this
* KSE and KSEG can be cleaned when their last thread
* exits.
*/
thread->kseg = _kse_initial->k_kseg;
thread->kse = _kse_initial;
}
thread->flags |= THR_FLAGS_GC_SAFE;
/*
* We can't hold the thread list lock while holding the
* scheduler lock.
*/
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
DBG_MSG("Adding thread %p to GC list\n", thread);
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
THR_GCLIST_ADD(thread);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
if (free_thread != 0)
_thr_free(curkse, thread);
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
}
void
thr_gc(struct pthread *curthread)
_thr_gc(struct pthread *curthread)
{
struct pthread *td, *joiner;
struct kse_group *free_kseg;
struct pthread *td, *td_next;
kse_critical_t crit;
int clean;
_gc_check = 0;
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
while ((td = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
/* Check the threads waiting for GC. */
for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
td_next = TAILQ_NEXT(td, gcle);
if ((td->flags & THR_FLAGS_GC_SAFE) == 0)
continue;
#ifdef NOT_YET
else if (((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) &&
(td->kse->k_mbx.km_flags == 0)) {
/*
* The thread and KSE are operating on the same
* stack. Wait for the KSE to exit before freeing
* the thread's stack as well as everything else.
*/
continue;
}
#endif
THR_GCLIST_REMOVE(td);
clean = (td->attr.flags & PTHREAD_DETACHED) != 0;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
clean = ((td->attr.flags & PTHREAD_DETACHED) != 0) &&
(td->refcount == 0);
_thr_stack_free(&td->attr);
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
DBG_MSG("Found thread %p in GC list, clean? %d\n", td, clean);
KSE_SCHED_LOCK(curkse, td->kseg);
TAILQ_REMOVE(&td->kseg->kg_threadq, td, kle);
if (TAILQ_EMPTY(&td->kseg->kg_threadq))
free_kseg = td->kseg;
else
free_kseg = NULL;
joiner = NULL;
if ((td->joiner != NULL) && (td->joiner->state == PS_JOIN) &&
(td->joiner->join_status.thread == td)) {
joiner = td->joiner;
joiner->join_status.thread = NULL;
/* Set the return status for the joining thread: */
joiner->join_status.ret = td->ret;
/* Make the thread runnable. */
if (td->kseg == joiner->kseg) {
_thr_setrunnable_unlocked(joiner);
joiner = NULL;
}
if ((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) {
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
kse_free_unlocked(td->kse);
kseg_free(td->kseg);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
}
td->joiner = NULL;
KSE_SCHED_UNLOCK(curkse, td->kseg);
if (free_kseg != NULL)
kseg_free(free_kseg);
if (joiner != NULL) {
KSE_SCHED_LOCK(curkse, joiner->kseg);
_thr_setrunnable_unlocked(joiner);
KSE_SCHED_LOCK(curkse, joiner->kseg);
if (clean != 0) {
_kse_critical_leave(crit);
_thr_free(curthread, td);
crit = _kse_critical_enter();
}
_thr_free(curkse, td);
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
}
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -1402,11 +1406,33 @@ static void
kse_fini(struct kse *kse)
{
struct timespec ts;
struct kse_group *free_kseg = NULL;
if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0)
kse_exit();
/*
* Check to see if this is the main kse.
* Check to see if this is one of the main kses.
*/
if (kse == _kse_initial) {
else if (kse->k_kseg != _kse_initial->k_kseg) {
/* Remove this KSE from the KSEG's list of KSEs. */
KSE_SCHED_LOCK(kse, kse->k_kseg);
TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq))
free_kseg = kse->k_kseg;
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
/*
* Add this KSE to the list of free KSEs along with
* the KSEG if is now orphaned.
*/
KSE_LOCK_ACQUIRE(kse, &kse_lock);
if (free_kseg != NULL)
kseg_free(free_kseg);
kse_free_unlocked(kse);
KSE_LOCK_RELEASE(kse, &kse_lock);
kse_exit();
/* Never returns. */
} else {
/*
* Wait for the last KSE/thread to exit, or for more
* threads to be created (it is possible for additional
@ -1435,12 +1461,6 @@ kse_fini(struct kse *kse)
__isthreaded = 0;
exit(0);
}
} else {
/* Mark this KSE for GC: */
KSE_LOCK_ACQUIRE(kse, &_thread_list_lock);
TAILQ_INSERT_TAIL(&free_kseq, kse, k_qe);
KSE_LOCK_RELEASE(kse, &_thread_list_lock);
kse_exit();
}
}
@ -1580,25 +1600,28 @@ _set_curkse(struct kse *kse)
/*
* Allocate a new KSEG.
*
* We allow the current KSE (curkse) to be NULL in the case that this
* We allow the current thread to be NULL in the case that this
* is the first time a KSEG is being created (library initialization).
* In this case, we don't need to (and can't) take any locks.
*/
struct kse_group *
_kseg_alloc(struct kse *curkse)
_kseg_alloc(struct pthread *curthread)
{
struct kse_group *kseg = NULL;
kse_critical_t crit;
if ((curkse != NULL) && (free_kseg_count > 0)) {
if ((curthread != NULL) && (free_kseg_count > 0)) {
/* Use the kse lock for the kseg queue. */
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
if ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
free_kseg_count--;
active_kseg_count++;
TAILQ_INSERT_TAIL(&active_kse_groupq, kseg, kg_qe);
}
KSE_LOCK_RELEASE(curkse, &kse_lock);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
/*
@ -1608,15 +1631,27 @@ _kseg_alloc(struct kse *curkse)
*/
if ((kseg == NULL) &&
((kseg = (struct kse_group *)malloc(sizeof(*kseg))) != NULL)) {
THR_ASSERT(_pq_alloc(&kseg->kg_schedq.sq_runq,
THR_MIN_PRIORITY, THR_LAST_PRIORITY) == 0,
"Unable to allocate priority queue.");
kseg_init(kseg);
if (curkse != NULL)
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
kseg_free(kseg);
if (curkse != NULL)
KSE_LOCK_RELEASE(curkse, &kse_lock);
if (_pq_alloc(&kseg->kg_schedq.sq_runq,
THR_MIN_PRIORITY, THR_LAST_PRIORITY) != 0) {
free(kseg);
kseg = NULL;
} else {
kseg_init(kseg);
/* Add the KSEG to the list of active KSEGs. */
if (curthread != NULL) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
active_kseg_count++;
TAILQ_INSERT_TAIL(&active_kse_groupq,
kseg, kg_qe);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
} else {
active_kseg_count++;
TAILQ_INSERT_TAIL(&active_kse_groupq,
kseg, kg_qe);
}
}
}
return (kseg);
}
@ -1628,6 +1663,7 @@ _kseg_alloc(struct kse *curkse)
static void
kseg_free(struct kse_group *kseg)
{
TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe);
kseg_init(kseg);
free_kseg_count++;
@ -1637,19 +1673,21 @@ kseg_free(struct kse_group *kseg)
/*
* Allocate a new KSE.
*
* We allow the current KSE (curkse) to be NULL in the case that this
* We allow the current thread to be NULL in the case that this
* is the first time a KSE is being created (library initialization).
* In this case, we don't need to (and can't) take any locks.
*/
struct kse *
_kse_alloc(struct kse *curkse)
_kse_alloc(struct pthread *curthread)
{
struct kse *kse = NULL;
kse_critical_t crit;
int need_ksd = 0;
int i;
if ((curkse != NULL) && (free_kse_count > 0)) {
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
if ((curthread != NULL) && (free_kse_count > 0)) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
/* Search for a finished KSE. */
kse = TAILQ_FIRST(&free_kseq);
#define KEMBX_DONE 0x01
@ -1664,7 +1702,8 @@ _kse_alloc(struct kse *curkse)
active_kse_count++;
TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
}
KSE_LOCK_RELEASE(curkse, &kse_lock);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
if ((kse == NULL) &&
((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
@ -1700,12 +1739,16 @@ _kse_alloc(struct kse *curkse)
}
if ((kse != NULL) && (need_ksd != 0)) {
/* This KSE needs initialization. */
if (curkse != NULL)
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
if (curthread != NULL) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
}
/* Initialize KSD inside of the lock. */
if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) {
if (curkse != NULL)
KSE_LOCK_RELEASE(curkse, &kse_lock);
if (curthread != NULL) {
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
free(kse->k_mbx.km_stack.ss_sp);
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
@ -1716,36 +1759,38 @@ _kse_alloc(struct kse *curkse)
kse->k_flags = 0;
active_kse_count++;
TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
if (curkse != NULL)
KSE_LOCK_RELEASE(curkse, &kse_lock);
if (curthread != NULL) {
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
}
return (kse);
}
void
_kse_free(struct kse *curkse, struct kse *kse)
kse_free_unlocked(struct kse *kse)
{
struct kse_group *kseg = NULL;
if (curkse == kse)
PANIC("KSE trying to free itself");
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
active_kse_count--;
if ((kseg = kse->k_kseg) != NULL) {
TAILQ_REMOVE(&kseg->kg_kseq, kse, k_qe);
/*
* Free the KSEG if there are no more threads associated
* with it.
*/
if (TAILQ_EMPTY(&kseg->kg_threadq))
kseg_free(kseg);
}
kse->k_kseg = NULL;
kse->k_flags &= ~KF_INITIALIZED;
TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe);
free_kse_count++;
KSE_LOCK_RELEASE(curkse, &kse_lock);
}
void
_kse_free(struct pthread *curthread, struct kse *kse)
{
kse_critical_t crit;
if (curthread == NULL)
kse_free_unlocked(kse);
else {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
kse_free_unlocked(kse);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
}
static void
@ -1754,7 +1799,6 @@ kseg_init(struct kse_group *kseg)
TAILQ_INIT(&kseg->kg_kseq);
TAILQ_INIT(&kseg->kg_threadq);
TAILQ_INIT(&kseg->kg_schedq.sq_waitq);
TAILQ_INIT(&kseg->kg_schedq.sq_blockedq);
_lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait,
_kse_lock_wakeup);
kseg->kg_threadcount = 0;
@ -1769,16 +1813,16 @@ _thr_alloc(struct pthread *curthread)
struct pthread *thread = NULL;
if (curthread != NULL) {
if (_gc_check != 0)
thread_gc(curthread);
if (GC_NEEDED())
_thr_gc(curthread);
if (free_thread_count > 0) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curkse, &thread_lock);
KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
TAILQ_REMOVE(&free_threadq, thread, tle);
free_thread_count--;
}
KSE_LOCK_RELEASE(curkse, &thread_lock);
KSE_LOCK_RELEASE(curthread->kse, &thread_lock);
}
}
if (thread == NULL)
@ -1791,14 +1835,16 @@ _thr_free(struct pthread *curthread, struct pthread *thread)
{
kse_critical_t crit;
DBG_MSG("Freeing thread %p\n", thread);
if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS))
free(thread);
else {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curkse, &thread_lock);
KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
THR_LIST_REMOVE(thread);
TAILQ_INSERT_HEAD(&free_threadq, thread, tle);
free_thread_count++;
KSE_LOCK_RELEASE(curkse, &thread_lock);
KSE_LOCK_RELEASE(curthread->kse, &thread_lock);
_kse_critical_leave(crit);
}
}

View File

@ -101,6 +101,13 @@ _pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
return (ret);
}
void
_pq_free(pq_queue_t *pq)
{
if ((pq != NULL) && (pq->pq_lists != NULL))
free(pq->pq_lists);
}
int
_pq_init(pq_queue_t *pq)
{

View File

@ -153,7 +153,6 @@ typedef struct pq_queue {
struct sched_queue {
pq_queue_t sq_runq;
TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */
TAILQ_HEAD(, pthread) sq_blockedq; /* waiting in kernel */
};
/* Used to maintain pending and active signals: */
@ -180,7 +179,8 @@ struct kse {
struct kse_group *k_kseg; /* parent KSEG */
struct sched_queue *k_schedq; /* scheduling queue */
/* -- end of location and order specific items -- */
TAILQ_ENTRY(kse) k_qe; /* link entry */
TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */
TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */
struct ksd k_ksd; /* KSE specific data */
/*
* Items that are only modified by the kse, or that otherwise
@ -220,6 +220,23 @@ struct kse_group {
#define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */
};
/*
* Add/remove threads from a KSE's scheduling queue.
* For now the scheduling queue is hung off the KSEG.
*/
#define KSEG_THRQ_ADD(kseg, thr) \
do { \
TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\
(kseg)->kg_threadcount++; \
} while (0)
#define KSEG_THRQ_REMOVE(kseg, thr) \
do { \
TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \
(kseg)->kg_threadcount--; \
} while (0)
/*
* Lock acquire and release for KSEs.
*/
@ -860,17 +877,21 @@ do { \
} while (0)
#define THR_GCLIST_ADD(thrd) do { \
if (((thrd)->flags & THR_FLAGS_IN_GCLIST) == 0) { \
TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, tle); \
TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
(thrd)->flags |= THR_FLAGS_IN_GCLIST; \
_gc_count++; \
} \
} while (0)
#define THR_GCLIST_REMOVE(thrd) do { \
if (((thrd)->flags & THR_FLAGS_IN_GCLIST) != 0) { \
TAILQ_REMOVE(&_thread_gc_list, thrd, tle); \
TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
(thrd)->flags &= ~THR_FLAGS_IN_GCLIST; \
_gc_count--; \
} \
} while (0)
#define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5)
/*
* Locking the scheduling queue for another thread uses that thread's
* KSEG lock.
@ -965,7 +986,7 @@ SCLASS pid_t _thr_pid SCLASS_PRESET(0);
/* Garbage collector lock. */
SCLASS struct lock _gc_lock;
SCLASS int _gc_check SCLASS_PRESET(0);
SCLASS pthread_t _gc_thread;
SCLASS int _gc_count SCLASS_PRESET(0);
SCLASS struct lock _mutex_static_lock;
SCLASS struct lock _rwlock_static_lock;
@ -990,12 +1011,12 @@ void _cond_wait_backout(struct pthread *);
struct pthread *_get_curthread(void);
struct kse *_get_curkse(void);
void _set_curkse(struct kse *);
struct kse *_kse_alloc(struct kse *);
struct kse *_kse_alloc(struct pthread *);
kse_critical_t _kse_critical_enter(void);
void _kse_critical_leave(kse_critical_t);
void _kse_free(struct kse *, struct kse *);
void _kse_free(struct pthread *, struct kse *);
void _kse_init();
struct kse_group *_kseg_alloc(struct kse *);
struct kse_group *_kseg_alloc(struct pthread *);
void _kse_lock_wait(struct lock *, struct lockuser *lu);
void _kse_lock_wakeup(struct lock *, struct lockuser *lu);
void _kse_sig_check_pending(struct kse *);
@ -1011,6 +1032,7 @@ int _mutex_reinit(struct pthread_mutex *);
void _mutex_unlock_private(struct pthread *);
void _libpthread_init(struct pthread *);
int _pq_alloc(struct pq_queue *, int, int);
void _pq_free(struct pq_queue *);
int _pq_init(struct pq_queue *);
void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
@ -1030,7 +1052,9 @@ int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
int _pthread_once(pthread_once_t *, void (*) (void));
struct pthread *_pthread_self(void);
int _pthread_setspecific(pthread_key_t, const void *);
struct pthread *_thr_alloc(struct kse *);
struct pthread *_thr_alloc(struct pthread *);
int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *);
int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **);
void _thr_exit(char *, int, char *);
void _thr_exit_cleanup(void);
void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
@ -1046,7 +1070,8 @@ void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
int _thr_stack_alloc(struct pthread_attr *);
void _thr_stack_free(struct pthread_attr *);
void _thr_exit_cleanup(void);
void _thr_free(struct kse *, struct pthread *);
void _thr_free(struct pthread *, struct pthread *);
void _thr_gc(struct pthread *);
void _thr_panic_exit(char *, int, char *);
void _thread_cleanupspecific(void);
void _thread_dump_info(void);

View File

@ -55,7 +55,10 @@ _pthread_resume_np(pthread_t thread)
/* Lock the threads scheduling queue: */
THR_SCHED_LOCK(curthread, thread);
resume_common(thread);
if ((curthread->state != PS_DEAD) &&
(curthread->state != PS_DEADLOCK) &&
((curthread->flags & THR_FLAGS_EXITING) != 0))
resume_common(thread);
/* Unlock the threads scheduling queue: */
THR_SCHED_UNLOCK(curthread, thread);

View File

@ -64,6 +64,13 @@ _pthread_setschedparam(pthread_t pthread, int policy,
* its priority:
*/
THR_SCHED_LOCK(curthread, pthread);
if ((pthread->state == PS_DEAD) ||
(pthread->state == PS_DEADLOCK) ||
((pthread->flags & THR_FLAGS_EXITING) != 0)) {
THR_SCHED_UNLOCK(curthread, pthread);
_thr_ref_delete(curthread, pthread);
return (ESRCH);
}
in_syncq = pthread->flags & THR_FLAGS_IN_SYNCQ;
/* Set the scheduling policy: */

View File

@ -56,9 +56,7 @@ _pthread_suspend_np(pthread_t thread)
== 0) {
/* Lock the threads scheduling queue: */
THR_SCHED_LOCK(curthread, thread);
suspend_common(thread);
/* Unlock the threads scheduling queue: */
THR_SCHED_UNLOCK(curthread, thread);
@ -80,10 +78,7 @@ _pthread_suspend_all_np(void)
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
TAILQ_FOREACH(thread, &_thread_list, tle) {
if ((thread != curthread) &&
(thread->state != PS_DEAD) &&
(thread->state != PS_DEADLOCK) &&
((thread->flags & THR_FLAGS_EXITING) == 0)) {
if (thread != curthread) {
THR_SCHED_LOCK(curthread, thread);
suspend_common(thread);
THR_SCHED_UNLOCK(curthread, thread);
@ -98,9 +93,13 @@ _pthread_suspend_all_np(void)
void
suspend_common(struct pthread *thread)
{
thread->flags |= THR_FLAGS_SUSPENDED;
if (thread->flags & THR_FLAGS_IN_RUNQ) {
THR_RUNQ_REMOVE(thread);
THR_SET_STATE(thread, PS_SUSPENDED);
if ((thread->state != PS_DEAD) &&
(thread->state != PS_DEADLOCK) &&
((thread->flags & THR_FLAGS_EXITING) == 0)) {
thread->flags |= THR_FLAGS_SUSPENDED;
if ((thread->flags & THR_FLAGS_IN_RUNQ) != 0) {
THR_RUNQ_REMOVE(thread);
THR_SET_STATE(thread, PS_SUSPENDED);
}
}
}

View File

@ -25,7 +25,7 @@ CFLAGS+=-D_PTHREADS_INVARIANTS -Wall
AINC= -I${.CURDIR}/../libc/${MACHINE_ARCH} -I${.CURDIR}/thread
PRECIOUSLIB= yes
.include "${.CURDIR}/man/Makefile.inc"
#.include "${.CURDIR}/man/Makefile.inc"
.include "${.CURDIR}/thread/Makefile.inc"
.include "${.CURDIR}/sys/Makefile.inc"

View File

@ -1,6 +1,6 @@
/*-
* Copyright (C) 2003 David Xu <davidxu@freebsd.org>
* Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
* Copyright (c) 2001,2003 Daniel Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -8,9 +8,9 @@
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>.
* All rights reserved.
*

View File

@ -1,4 +1,4 @@
/*
/*-
* Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>
* All rights reserved.
*
@ -7,9 +7,9 @@
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

View File

@ -8,9 +8,9 @@
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

View File

@ -31,12 +31,9 @@
#ifndef _PTHREAD_MD_H_
#define _PTHREAD_MD_H_
#include <sys/kse.h>
#include <setjmp.h>
#include <ucontext.h>
extern int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *);
extern int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **);
extern int _thr_setcontext(ucontext_t *);
extern int _thr_getcontext(ucontext_t *);

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>.
* Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001 Daniel Eischen <deischen@FreeBSD.org>.
* Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2003 Daniel M. Eischen <deischen@gdeb.com>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@ -107,7 +108,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
curkse = curthread->kse;
/* Allocate memory for the thread structure: */
if ((new_thread = _thr_alloc(curkse)) == NULL) {
if ((new_thread = _thr_alloc(curthread)) == NULL) {
/* Insufficient memory to create a thread: */
ret = EAGAIN;
} else {
@ -124,21 +125,21 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (create_stack(&new_thread->attr) != 0) {
/* Insufficient memory to create a stack: */
ret = EAGAIN;
_thr_free(curkse, new_thread);
_thr_free(curthread, new_thread);
}
else if (((new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
(((kse = _kse_alloc(curkse)) == NULL)
|| ((kseg = _kseg_alloc(curkse)) == NULL))) {
(((kse = _kse_alloc(curthread)) == NULL)
|| ((kseg = _kseg_alloc(curthread)) == NULL))) {
/* Insufficient memory to create a new KSE/KSEG: */
ret = EAGAIN;
if (kse != NULL)
_kse_free(curkse, kse);
_kse_free(curthread, kse);
if ((new_thread->attr.flags & THR_STACK_USER) == 0) {
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
_thr_stack_free(&new_thread->attr);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
}
_thr_free(curkse, new_thread);
_thr_free(curthread, new_thread);
}
else {
if (kseg != NULL) {

View File

@ -42,7 +42,10 @@ __weak_reference(_pthread_detach, pthread_detach);
int
_pthread_detach(pthread_t pthread)
{
struct pthread *curthread, *joiner;
struct pthread *curthread = _get_curthread();
struct pthread *joiner;
kse_critical_t crit;
int dead;
int rval = 0;
/* Check for invalid calling parameters: */
@ -50,13 +53,19 @@ _pthread_detach(pthread_t pthread)
/* Return an invalid argument error: */
rval = EINVAL;
/* Check if the thread is already detached: */
else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0)
else if ((rval = _thr_ref_add(curthread, pthread,
/*include dead*/1)) != 0) {
/* Return an error: */
_thr_leave_cancellation_point(curthread);
}
/* Check if the thread is already detached: */
else if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
/* Return an error: */
_thr_ref_delete(curthread, pthread);
rval = EINVAL;
else {
} else {
/* Lock the detached thread: */
curthread = _get_curthread();
THR_SCHED_LOCK(curthread, pthread);
/* Flag the thread as detached: */
@ -65,20 +74,35 @@ _pthread_detach(pthread_t pthread)
/* Retrieve any joining thread and remove it: */
joiner = pthread->joiner;
pthread->joiner = NULL;
if (joiner->kseg == pthread->kseg) {
/*
* We already own the scheduler lock for the joiner.
* Take advantage of that and make the joiner runnable.
*/
if (joiner->join_status.thread == pthread) {
/*
* Set the return value for the woken thread:
*/
joiner->join_status.error = ESRCH;
joiner->join_status.ret = NULL;
joiner->join_status.thread = NULL;
/* We are already in a critical region. */
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
if ((pthread->flags & THR_FLAGS_GC_SAFE) != 0) {
THR_LIST_REMOVE(pthread);
THR_GCLIST_ADD(pthread);
atomic_store_rel_int(&_gc_check, 1);
if (KSE_WAITING(_kse_initial))
KSE_WAKEUP(_kse_initial);
_thr_setrunnable_unlocked(joiner);
}
joiner = NULL;
}
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
dead = (pthread->flags & THR_FLAGS_GC_SAFE) != 0;
THR_SCHED_UNLOCK(curthread, pthread);
if (dead != 0) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
THR_GCLIST_ADD(pthread);
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
_kse_critical_leave(crit);
}
_thr_ref_delete(curthread, pthread);
/* See if there is a thread waiting in pthread_join(): */
if (joiner != NULL) {
/* Lock the joiner before fiddling with it. */

View File

@ -85,15 +85,6 @@ _thr_ref_delete(struct pthread *curthread, struct pthread *thread)
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
thread->refcount--;
curthread->critical_count--;
if (((thread->flags & THR_FLAGS_GC_SAFE) != 0) &&
(thread->refcount == 0) &&
((thread->attr.flags & PTHREAD_DETACHED) != 0)) {
THR_LIST_REMOVE(thread);
THR_GCLIST_ADD(thread);
_gc_check = 1;
if (KSE_WAITING(_kse_initial))
KSE_WAKEUP(_kse_initial);
}
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
_kse_critical_leave(crit);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003 Daniel M. Eischen <deischen@FreeBSD.org>
* Copyright (c) 2003 Daniel M. Eischen <deischen@freebsd.org>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@ -280,7 +280,7 @@ _libpthread_init(struct pthread *curthread)
*/
_thr_initial = curthread;
}
_kse_initial->k_kseg->kg_threadcount = 1;
_kse_initial->k_kseg->kg_threadcount = 0;
_thr_initial->kse = _kse_initial;
_thr_initial->kseg = _kse_initial->k_kseg;
_thr_initial->active = 1;
@ -290,7 +290,7 @@ _libpthread_init(struct pthread *curthread)
* queue.
*/
THR_LIST_ADD(_thr_initial);
TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_threadq, _thr_initial, kle);
KSEG_THRQ_ADD(_kse_initial->k_kseg, _thr_initial);
/* Setup the KSE/thread specific data for the current KSE/thread. */
if (_ksd_setprivate(&_thr_initial->kse->k_ksd) != 0)

View File

@ -40,8 +40,9 @@ __weak_reference(_pthread_join, pthread_join);
int
_pthread_join(pthread_t pthread, void **thread_return)
{
struct pthread *curthread = _get_curthread();
int ret = 0;
struct pthread *curthread = _get_curthread();
kse_critical_t crit;
int ret = 0;
_thr_enter_cancellation_point(curthread);
@ -83,8 +84,24 @@ _pthread_join(pthread_t pthread, void **thread_return)
/* Return the thread's return value: */
*thread_return = pthread->ret;
/* Unlock the thread and remove the reference. */
/* Detach the thread. */
pthread->attr.flags |= PTHREAD_DETACHED;
/* Unlock the thread. */
THR_SCHED_UNLOCK(curthread, pthread);
/*
* Remove the thread from the list of active
* threads and add it to the GC list.
*/
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
THR_LIST_REMOVE(pthread);
THR_GCLIST_ADD(pthread);
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
_kse_critical_leave(crit);
/* Remove the reference. */
_thr_ref_delete(curthread, pthread);
}
else if (pthread->joiner != NULL) {

View File

@ -83,16 +83,6 @@ __FBSDID("$FreeBSD");
#define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED
/*
* Add/remove threads from a KSE's scheduling queue.
* For now the scheduling queue is hung off the KSEG.
*/
#define KSEG_THRQ_ADD(kseg, thr) \
TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle)
#define KSEG_THRQ_REMOVE(kseg, thr) \
TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle)
/*
* Macros for manipulating the run queues. The priority queue
* routines use the thread's pqe link and also handle the setting
@ -116,6 +106,7 @@ static TAILQ_HEAD(, kse) active_kseq;
static TAILQ_HEAD(, kse) free_kseq;
static TAILQ_HEAD(, kse_group) free_kse_groupq;
static TAILQ_HEAD(, kse_group) active_kse_groupq;
static TAILQ_HEAD(, kse_group) gc_ksegq;
static struct lock kse_lock; /* also used for kseg queue */
static int free_kse_count = 0;
static int free_kseg_count = 0;
@ -135,13 +126,15 @@ static void kse_sched_multi(struct kse *curkse);
static void kse_sched_single(struct kse *curkse);
static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
static void kse_wait(struct kse *kse);
static void kse_free_unlocked(struct kse *kse);
static void kseg_free(struct kse_group *kseg);
static void kseg_init(struct kse_group *kseg);
static void kse_waitq_insert(struct pthread *thread);
static void thr_cleanup(struct kse *kse, struct pthread *curthread);
static void thr_gc(struct kse *curkse);
#ifdef NOT_YET
static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2,
ucontext_t *ucp);
#endif
static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
struct pthread_sigframe *psf);
static int thr_timedout(struct pthread *thread, struct timespec *curtime);
@ -232,6 +225,7 @@ _kse_single_thread(struct pthread *curthread)
while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
_lock_destroy(&kseg->kg_lock);
_pq_free(&kseg->kg_schedq.sq_runq);
free(kseg);
}
free_kseg_count = 0;
@ -242,6 +236,7 @@ _kse_single_thread(struct pthread *curthread)
kseg_next = TAILQ_NEXT(kseg, kg_qe);
TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
_lock_destroy(&kseg->kg_lock);
_pq_free(&kseg->kg_schedq.sq_runq);
free(kseg);
}
active_kseg_count = 0;
@ -261,9 +256,11 @@ _kse_single_thread(struct pthread *curthread)
/* Free the to-be-gc'd threads. */
while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
TAILQ_REMOVE(&_thread_gc_list, thread, tle);
TAILQ_REMOVE(&_thread_gc_list, thread, gcle);
free(thread);
}
TAILQ_INIT(&gc_ksegq);
_gc_count = 0;
if (inited != 0) {
/*
@ -309,6 +306,7 @@ _kse_init(void)
TAILQ_INIT(&free_kseq);
TAILQ_INIT(&free_kse_groupq);
TAILQ_INIT(&free_threadq);
TAILQ_INIT(&gc_ksegq);
if (_lock_init(&kse_lock, LCK_ADAPTIVE,
_kse_lock_wait, _kse_lock_wakeup) != 0)
PANIC("Unable to initialize free KSE queue lock");
@ -320,6 +318,7 @@ _kse_init(void)
PANIC("Unable to initialize thread list lock");
active_kse_count = 0;
active_kseg_count = 0;
_gc_count = 0;
inited = 1;
}
}
@ -766,10 +765,6 @@ kse_sched_multi(struct kse *curkse)
/* This has to be done without the scheduling lock held. */
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
kse_check_signals(curkse);
/* Check for GC: */
if (_gc_check != 0)
thr_gc(curkse);
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
dump_queues(curkse);
@ -785,8 +780,6 @@ kse_sched_multi(struct kse *curkse)
kse_check_waitq(curkse);
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
kse_check_signals(curkse);
if (_gc_check != 0)
thr_gc(curkse);
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
}
@ -853,7 +846,7 @@ kse_sched_multi(struct kse *curkse)
* signals or needs a cancellation check, we need to add a
* signal frame to the thread's context.
*/
#if 0
#ifdef NOT_YET
if ((curframe == NULL) && ((curthread->check_pending != 0) ||
(((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) {
@ -904,6 +897,7 @@ kse_check_signals(struct kse *curkse)
}
}
#ifdef NOT_YET
static void
thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp)
{
@ -911,6 +905,7 @@ thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp)
thr_resume_check(curthread, ucp, NULL);
}
#endif
static void
thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
@ -944,7 +939,6 @@ static void
thr_cleanup(struct kse *curkse, struct pthread *thread)
{
struct pthread *joiner;
int free_thread = 0;
if ((joiner = thread->joiner) != NULL) {
thread->joiner = NULL;
@ -969,71 +963,81 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
thread->attr.flags |= PTHREAD_DETACHED;
}
thread->flags |= THR_FLAGS_GC_SAFE;
thread->kseg->kg_threadcount--;
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
_thr_stack_free(&thread->attr);
if ((thread->attr.flags & PTHREAD_DETACHED) != 0) {
/* Remove this thread from the list of all threads: */
THR_LIST_REMOVE(thread);
if (thread->refcount == 0) {
THR_GCLIST_REMOVE(thread);
TAILQ_REMOVE(&thread->kseg->kg_threadq, thread, kle);
free_thread = 1;
}
if ((thread->attr.flags & PTHREAD_SCOPE_PROCESS) == 0) {
/*
* Remove the thread from the KSEG's list of threads.
*/
KSEG_THRQ_REMOVE(thread->kseg, thread);
/*
* Migrate the thread to the main KSE so that this
* KSE and KSEG can be cleaned when their last thread
* exits.
*/
thread->kseg = _kse_initial->k_kseg;
thread->kse = _kse_initial;
}
thread->flags |= THR_FLAGS_GC_SAFE;
/*
* We can't hold the thread list lock while holding the
* scheduler lock.
*/
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
DBG_MSG("Adding thread %p to GC list\n", thread);
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
THR_GCLIST_ADD(thread);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
if (free_thread != 0)
_thr_free(curkse, thread);
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
}
void
thr_gc(struct pthread *curthread)
_thr_gc(struct pthread *curthread)
{
struct pthread *td, *joiner;
struct kse_group *free_kseg;
struct pthread *td, *td_next;
kse_critical_t crit;
int clean;
_gc_check = 0;
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
while ((td = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
/* Check the threads waiting for GC. */
for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
td_next = TAILQ_NEXT(td, gcle);
if ((td->flags & THR_FLAGS_GC_SAFE) == 0)
continue;
#ifdef NOT_YET
else if (((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) &&
(td->kse->k_mbx.km_flags == 0)) {
/*
* The thread and KSE are operating on the same
* stack. Wait for the KSE to exit before freeing
* the thread's stack as well as everything else.
*/
continue;
}
#endif
THR_GCLIST_REMOVE(td);
clean = (td->attr.flags & PTHREAD_DETACHED) != 0;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
clean = ((td->attr.flags & PTHREAD_DETACHED) != 0) &&
(td->refcount == 0);
_thr_stack_free(&td->attr);
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
DBG_MSG("Found thread %p in GC list, clean? %d\n", td, clean);
KSE_SCHED_LOCK(curkse, td->kseg);
TAILQ_REMOVE(&td->kseg->kg_threadq, td, kle);
if (TAILQ_EMPTY(&td->kseg->kg_threadq))
free_kseg = td->kseg;
else
free_kseg = NULL;
joiner = NULL;
if ((td->joiner != NULL) && (td->joiner->state == PS_JOIN) &&
(td->joiner->join_status.thread == td)) {
joiner = td->joiner;
joiner->join_status.thread = NULL;
/* Set the return status for the joining thread: */
joiner->join_status.ret = td->ret;
/* Make the thread runnable. */
if (td->kseg == joiner->kseg) {
_thr_setrunnable_unlocked(joiner);
joiner = NULL;
}
if ((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) {
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
kse_free_unlocked(td->kse);
kseg_free(td->kseg);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
}
td->joiner = NULL;
KSE_SCHED_UNLOCK(curkse, td->kseg);
if (free_kseg != NULL)
kseg_free(free_kseg);
if (joiner != NULL) {
KSE_SCHED_LOCK(curkse, joiner->kseg);
_thr_setrunnable_unlocked(joiner);
KSE_SCHED_LOCK(curkse, joiner->kseg);
if (clean != 0) {
_kse_critical_leave(crit);
_thr_free(curthread, td);
crit = _kse_critical_enter();
}
_thr_free(curkse, td);
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
}
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -1402,11 +1406,33 @@ static void
kse_fini(struct kse *kse)
{
struct timespec ts;
struct kse_group *free_kseg = NULL;
if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0)
kse_exit();
/*
* Check to see if this is the main kse.
* Check to see if this is one of the main kses.
*/
if (kse == _kse_initial) {
else if (kse->k_kseg != _kse_initial->k_kseg) {
/* Remove this KSE from the KSEG's list of KSEs. */
KSE_SCHED_LOCK(kse, kse->k_kseg);
TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq))
free_kseg = kse->k_kseg;
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
/*
* Add this KSE to the list of free KSEs along with
* the KSEG if is now orphaned.
*/
KSE_LOCK_ACQUIRE(kse, &kse_lock);
if (free_kseg != NULL)
kseg_free(free_kseg);
kse_free_unlocked(kse);
KSE_LOCK_RELEASE(kse, &kse_lock);
kse_exit();
/* Never returns. */
} else {
/*
* Wait for the last KSE/thread to exit, or for more
* threads to be created (it is possible for additional
@ -1435,12 +1461,6 @@ kse_fini(struct kse *kse)
__isthreaded = 0;
exit(0);
}
} else {
/* Mark this KSE for GC: */
KSE_LOCK_ACQUIRE(kse, &_thread_list_lock);
TAILQ_INSERT_TAIL(&free_kseq, kse, k_qe);
KSE_LOCK_RELEASE(kse, &_thread_list_lock);
kse_exit();
}
}
@ -1580,25 +1600,28 @@ _set_curkse(struct kse *kse)
/*
* Allocate a new KSEG.
*
* We allow the current KSE (curkse) to be NULL in the case that this
* We allow the current thread to be NULL in the case that this
* is the first time a KSEG is being created (library initialization).
* In this case, we don't need to (and can't) take any locks.
*/
struct kse_group *
_kseg_alloc(struct kse *curkse)
_kseg_alloc(struct pthread *curthread)
{
struct kse_group *kseg = NULL;
kse_critical_t crit;
if ((curkse != NULL) && (free_kseg_count > 0)) {
if ((curthread != NULL) && (free_kseg_count > 0)) {
/* Use the kse lock for the kseg queue. */
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
if ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
free_kseg_count--;
active_kseg_count++;
TAILQ_INSERT_TAIL(&active_kse_groupq, kseg, kg_qe);
}
KSE_LOCK_RELEASE(curkse, &kse_lock);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
/*
@ -1608,15 +1631,27 @@ _kseg_alloc(struct kse *curkse)
*/
if ((kseg == NULL) &&
((kseg = (struct kse_group *)malloc(sizeof(*kseg))) != NULL)) {
THR_ASSERT(_pq_alloc(&kseg->kg_schedq.sq_runq,
THR_MIN_PRIORITY, THR_LAST_PRIORITY) == 0,
"Unable to allocate priority queue.");
kseg_init(kseg);
if (curkse != NULL)
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
kseg_free(kseg);
if (curkse != NULL)
KSE_LOCK_RELEASE(curkse, &kse_lock);
if (_pq_alloc(&kseg->kg_schedq.sq_runq,
THR_MIN_PRIORITY, THR_LAST_PRIORITY) != 0) {
free(kseg);
kseg = NULL;
} else {
kseg_init(kseg);
/* Add the KSEG to the list of active KSEGs. */
if (curthread != NULL) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
active_kseg_count++;
TAILQ_INSERT_TAIL(&active_kse_groupq,
kseg, kg_qe);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
} else {
active_kseg_count++;
TAILQ_INSERT_TAIL(&active_kse_groupq,
kseg, kg_qe);
}
}
}
return (kseg);
}
@ -1628,6 +1663,7 @@ _kseg_alloc(struct kse *curkse)
static void
kseg_free(struct kse_group *kseg)
{
TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe);
kseg_init(kseg);
free_kseg_count++;
@ -1637,19 +1673,21 @@ kseg_free(struct kse_group *kseg)
/*
* Allocate a new KSE.
*
* We allow the current KSE (curkse) to be NULL in the case that this
* We allow the current thread to be NULL in the case that this
* is the first time a KSE is being created (library initialization).
* In this case, we don't need to (and can't) take any locks.
*/
struct kse *
_kse_alloc(struct kse *curkse)
_kse_alloc(struct pthread *curthread)
{
struct kse *kse = NULL;
kse_critical_t crit;
int need_ksd = 0;
int i;
if ((curkse != NULL) && (free_kse_count > 0)) {
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
if ((curthread != NULL) && (free_kse_count > 0)) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
/* Search for a finished KSE. */
kse = TAILQ_FIRST(&free_kseq);
#define KEMBX_DONE 0x01
@ -1664,7 +1702,8 @@ _kse_alloc(struct kse *curkse)
active_kse_count++;
TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
}
KSE_LOCK_RELEASE(curkse, &kse_lock);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
if ((kse == NULL) &&
((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
@ -1700,12 +1739,16 @@ _kse_alloc(struct kse *curkse)
}
if ((kse != NULL) && (need_ksd != 0)) {
/* This KSE needs initialization. */
if (curkse != NULL)
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
if (curthread != NULL) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
}
/* Initialize KSD inside of the lock. */
if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) {
if (curkse != NULL)
KSE_LOCK_RELEASE(curkse, &kse_lock);
if (curthread != NULL) {
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
free(kse->k_mbx.km_stack.ss_sp);
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
@ -1716,36 +1759,38 @@ _kse_alloc(struct kse *curkse)
kse->k_flags = 0;
active_kse_count++;
TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
if (curkse != NULL)
KSE_LOCK_RELEASE(curkse, &kse_lock);
if (curthread != NULL) {
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
}
return (kse);
}
void
_kse_free(struct kse *curkse, struct kse *kse)
kse_free_unlocked(struct kse *kse)
{
struct kse_group *kseg = NULL;
if (curkse == kse)
PANIC("KSE trying to free itself");
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
active_kse_count--;
if ((kseg = kse->k_kseg) != NULL) {
TAILQ_REMOVE(&kseg->kg_kseq, kse, k_qe);
/*
* Free the KSEG if there are no more threads associated
* with it.
*/
if (TAILQ_EMPTY(&kseg->kg_threadq))
kseg_free(kseg);
}
kse->k_kseg = NULL;
kse->k_flags &= ~KF_INITIALIZED;
TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe);
free_kse_count++;
KSE_LOCK_RELEASE(curkse, &kse_lock);
}
void
_kse_free(struct pthread *curthread, struct kse *kse)
{
kse_critical_t crit;
if (curthread == NULL)
kse_free_unlocked(kse);
else {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
kse_free_unlocked(kse);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
}
static void
@ -1754,7 +1799,6 @@ kseg_init(struct kse_group *kseg)
TAILQ_INIT(&kseg->kg_kseq);
TAILQ_INIT(&kseg->kg_threadq);
TAILQ_INIT(&kseg->kg_schedq.sq_waitq);
TAILQ_INIT(&kseg->kg_schedq.sq_blockedq);
_lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait,
_kse_lock_wakeup);
kseg->kg_threadcount = 0;
@ -1769,16 +1813,16 @@ _thr_alloc(struct pthread *curthread)
struct pthread *thread = NULL;
if (curthread != NULL) {
if (_gc_check != 0)
thread_gc(curthread);
if (GC_NEEDED())
_thr_gc(curthread);
if (free_thread_count > 0) {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curkse, &thread_lock);
KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
TAILQ_REMOVE(&free_threadq, thread, tle);
free_thread_count--;
}
KSE_LOCK_RELEASE(curkse, &thread_lock);
KSE_LOCK_RELEASE(curthread->kse, &thread_lock);
}
}
if (thread == NULL)
@ -1791,14 +1835,16 @@ _thr_free(struct pthread *curthread, struct pthread *thread)
{
kse_critical_t crit;
DBG_MSG("Freeing thread %p\n", thread);
if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS))
free(thread);
else {
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curkse, &thread_lock);
KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
THR_LIST_REMOVE(thread);
TAILQ_INSERT_HEAD(&free_threadq, thread, tle);
free_thread_count++;
KSE_LOCK_RELEASE(curkse, &thread_lock);
KSE_LOCK_RELEASE(curthread->kse, &thread_lock);
_kse_critical_leave(crit);
}
}

View File

@ -101,6 +101,13 @@ _pq_alloc(pq_queue_t *pq, int minprio, int maxprio)
return (ret);
}
void
_pq_free(pq_queue_t *pq)
{
if ((pq != NULL) && (pq->pq_lists != NULL))
free(pq->pq_lists);
}
int
_pq_init(pq_queue_t *pq)
{

View File

@ -153,7 +153,6 @@ typedef struct pq_queue {
struct sched_queue {
pq_queue_t sq_runq;
TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */
TAILQ_HEAD(, pthread) sq_blockedq; /* waiting in kernel */
};
/* Used to maintain pending and active signals: */
@ -180,7 +179,8 @@ struct kse {
struct kse_group *k_kseg; /* parent KSEG */
struct sched_queue *k_schedq; /* scheduling queue */
/* -- end of location and order specific items -- */
TAILQ_ENTRY(kse) k_qe; /* link entry */
TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */
TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */
struct ksd k_ksd; /* KSE specific data */
/*
* Items that are only modified by the kse, or that otherwise
@ -220,6 +220,23 @@ struct kse_group {
#define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */
};
/*
* Add/remove threads from a KSE's scheduling queue.
* For now the scheduling queue is hung off the KSEG.
*/
#define KSEG_THRQ_ADD(kseg, thr) \
do { \
TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\
(kseg)->kg_threadcount++; \
} while (0)
#define KSEG_THRQ_REMOVE(kseg, thr) \
do { \
TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \
(kseg)->kg_threadcount--; \
} while (0)
/*
* Lock acquire and release for KSEs.
*/
@ -860,17 +877,21 @@ do { \
} while (0)
#define THR_GCLIST_ADD(thrd) do { \
if (((thrd)->flags & THR_FLAGS_IN_GCLIST) == 0) { \
TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, tle); \
TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
(thrd)->flags |= THR_FLAGS_IN_GCLIST; \
_gc_count++; \
} \
} while (0)
#define THR_GCLIST_REMOVE(thrd) do { \
if (((thrd)->flags & THR_FLAGS_IN_GCLIST) != 0) { \
TAILQ_REMOVE(&_thread_gc_list, thrd, tle); \
TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
(thrd)->flags &= ~THR_FLAGS_IN_GCLIST; \
_gc_count--; \
} \
} while (0)
#define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5)
/*
* Locking the scheduling queue for another thread uses that thread's
* KSEG lock.
@ -965,7 +986,7 @@ SCLASS pid_t _thr_pid SCLASS_PRESET(0);
/* Garbage collector lock. */
SCLASS struct lock _gc_lock;
SCLASS int _gc_check SCLASS_PRESET(0);
SCLASS pthread_t _gc_thread;
SCLASS int _gc_count SCLASS_PRESET(0);
SCLASS struct lock _mutex_static_lock;
SCLASS struct lock _rwlock_static_lock;
@ -990,12 +1011,12 @@ void _cond_wait_backout(struct pthread *);
struct pthread *_get_curthread(void);
struct kse *_get_curkse(void);
void _set_curkse(struct kse *);
struct kse *_kse_alloc(struct kse *);
struct kse *_kse_alloc(struct pthread *);
kse_critical_t _kse_critical_enter(void);
void _kse_critical_leave(kse_critical_t);
void _kse_free(struct kse *, struct kse *);
void _kse_free(struct pthread *, struct kse *);
void _kse_init();
struct kse_group *_kseg_alloc(struct kse *);
struct kse_group *_kseg_alloc(struct pthread *);
void _kse_lock_wait(struct lock *, struct lockuser *lu);
void _kse_lock_wakeup(struct lock *, struct lockuser *lu);
void _kse_sig_check_pending(struct kse *);
@ -1011,6 +1032,7 @@ int _mutex_reinit(struct pthread_mutex *);
void _mutex_unlock_private(struct pthread *);
void _libpthread_init(struct pthread *);
int _pq_alloc(struct pq_queue *, int, int);
void _pq_free(struct pq_queue *);
int _pq_init(struct pq_queue *);
void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
@ -1030,7 +1052,9 @@ int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
int _pthread_once(pthread_once_t *, void (*) (void));
struct pthread *_pthread_self(void);
int _pthread_setspecific(pthread_key_t, const void *);
struct pthread *_thr_alloc(struct kse *);
struct pthread *_thr_alloc(struct pthread *);
int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *);
int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **);
void _thr_exit(char *, int, char *);
void _thr_exit_cleanup(void);
void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
@ -1046,7 +1070,8 @@ void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
int _thr_stack_alloc(struct pthread_attr *);
void _thr_stack_free(struct pthread_attr *);
void _thr_exit_cleanup(void);
void _thr_free(struct kse *, struct pthread *);
void _thr_free(struct pthread *, struct pthread *);
void _thr_gc(struct pthread *);
void _thr_panic_exit(char *, int, char *);
void _thread_cleanupspecific(void);
void _thread_dump_info(void);

View File

@ -55,7 +55,10 @@ _pthread_resume_np(pthread_t thread)
/* Lock the threads scheduling queue: */
THR_SCHED_LOCK(curthread, thread);
resume_common(thread);
if ((curthread->state != PS_DEAD) &&
(curthread->state != PS_DEADLOCK) &&
((curthread->flags & THR_FLAGS_EXITING) != 0))
resume_common(thread);
/* Unlock the threads scheduling queue: */
THR_SCHED_UNLOCK(curthread, thread);

View File

@ -64,6 +64,13 @@ _pthread_setschedparam(pthread_t pthread, int policy,
* its priority:
*/
THR_SCHED_LOCK(curthread, pthread);
if ((pthread->state == PS_DEAD) ||
(pthread->state == PS_DEADLOCK) ||
((pthread->flags & THR_FLAGS_EXITING) != 0)) {
THR_SCHED_UNLOCK(curthread, pthread);
_thr_ref_delete(curthread, pthread);
return (ESRCH);
}
in_syncq = pthread->flags & THR_FLAGS_IN_SYNCQ;
/* Set the scheduling policy: */

View File

@ -56,9 +56,7 @@ _pthread_suspend_np(pthread_t thread)
== 0) {
/* Lock the threads scheduling queue: */
THR_SCHED_LOCK(curthread, thread);
suspend_common(thread);
/* Unlock the threads scheduling queue: */
THR_SCHED_UNLOCK(curthread, thread);
@ -80,10 +78,7 @@ _pthread_suspend_all_np(void)
KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
TAILQ_FOREACH(thread, &_thread_list, tle) {
if ((thread != curthread) &&
(thread->state != PS_DEAD) &&
(thread->state != PS_DEADLOCK) &&
((thread->flags & THR_FLAGS_EXITING) == 0)) {
if (thread != curthread) {
THR_SCHED_LOCK(curthread, thread);
suspend_common(thread);
THR_SCHED_UNLOCK(curthread, thread);
@ -98,9 +93,13 @@ _pthread_suspend_all_np(void)
void
suspend_common(struct pthread *thread)
{
thread->flags |= THR_FLAGS_SUSPENDED;
if (thread->flags & THR_FLAGS_IN_RUNQ) {
THR_RUNQ_REMOVE(thread);
THR_SET_STATE(thread, PS_SUSPENDED);
if ((thread->state != PS_DEAD) &&
(thread->state != PS_DEADLOCK) &&
((thread->flags & THR_FLAGS_EXITING) == 0)) {
thread->flags |= THR_FLAGS_SUSPENDED;
if ((thread->flags & THR_FLAGS_IN_RUNQ) != 0) {
THR_RUNQ_REMOVE(thread);
THR_SET_STATE(thread, PS_SUSPENDED);
}
}
}