mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-15 10:17:20 +00:00
Use a generic way to back threads out of wait queues when handling
signals instead of having more intricate knowledge of thread state within signal handling. Simplify signal code because of above (by David Xu). Use macros for libpthread usage of pthread_cleanup_push() and pthread_cleanup_pop(). This removes some instances of malloc() and free() from the semaphore and pthread_once() implementations. When single threaded and forking(), make sure that the current thread's signal mask is inherited by the forked thread. Use private mutexes for libc and libpthread. Signals are deferred while threads hold private mutexes. This fix also breaks www/linuxpluginwrapper; a patch that fixes it is at http://people.freebsd.org/~deischen/kse/linuxpluginwrapper.diff Fix race condition in condition variables where handling a signal (pthread_kill() or kill()) may not see a wakeup (pthread_cond_signal() or pthread_cond_broadcast()). In collaboration with: davidxu
This commit is contained in:
parent
885dabe5f7
commit
843d4004b3
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=139023
@ -37,6 +37,7 @@
|
||||
#include "libc_private.h"
|
||||
#include "thr_private.h"
|
||||
|
||||
#undef errno
|
||||
extern int errno;
|
||||
|
||||
int *
|
||||
|
@ -14,18 +14,26 @@ __weak_reference(_pthread_testcancel, pthread_testcancel);
|
||||
static inline int
|
||||
checkcancel(struct pthread *curthread)
|
||||
{
|
||||
if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
|
||||
((curthread->cancelflags & THR_CANCELLING) != 0)) {
|
||||
if ((curthread->cancelflags & THR_CANCELLING) != 0) {
|
||||
/*
|
||||
* It is possible for this thread to be swapped out
|
||||
* while performing cancellation; do not allow it
|
||||
* to be cancelled again.
|
||||
*/
|
||||
curthread->cancelflags &= ~THR_CANCELLING;
|
||||
return (1);
|
||||
if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
|
||||
/*
|
||||
* This may happen once, but after this, it
|
||||
* shouldn't happen again.
|
||||
*/
|
||||
curthread->cancelflags &= ~THR_CANCELLING;
|
||||
return (0);
|
||||
}
|
||||
if ((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) {
|
||||
curthread->cancelflags &= ~THR_CANCELLING;
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
else
|
||||
return (0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -50,6 +50,7 @@ _pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
|
||||
malloc(sizeof(struct pthread_cleanup))) != NULL) {
|
||||
new->routine = routine;
|
||||
new->routine_arg = routine_arg;
|
||||
new->onstack = 0;
|
||||
new->next = curthread->cleanup;
|
||||
|
||||
curthread->cleanup = new;
|
||||
@ -67,6 +68,7 @@ _pthread_cleanup_pop(int execute)
|
||||
if (execute) {
|
||||
old->routine(old->routine_arg);
|
||||
}
|
||||
free(old);
|
||||
if (old->onstack == 0)
|
||||
free(old);
|
||||
}
|
||||
}
|
||||
|
@ -84,6 +84,13 @@ _thr_setconcurrency(int new_level)
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Turn on threaded mode, if failed, it is unnecessary to
|
||||
* do further work.
|
||||
*/
|
||||
if (_kse_isthreaded() == 0 && _kse_setthreaded(1))
|
||||
return (EAGAIN);
|
||||
|
||||
ret = 0;
|
||||
curthread = _get_curthread();
|
||||
/* Race condition, but so what. */
|
||||
|
@ -47,6 +47,9 @@
|
||||
static inline struct pthread *cond_queue_deq(pthread_cond_t);
|
||||
static inline void cond_queue_remove(pthread_cond_t, pthread_t);
|
||||
static inline void cond_queue_enq(pthread_cond_t, pthread_t);
|
||||
static void cond_wait_backout(void *);
|
||||
static inline void check_continuation(struct pthread *,
|
||||
struct pthread_cond *, pthread_mutex_t *);
|
||||
|
||||
/*
|
||||
* Double underscore versions are cancellation points. Single underscore
|
||||
@ -171,8 +174,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int rval = 0;
|
||||
int done = 0;
|
||||
int interrupted = 0;
|
||||
int unlock_mutex = 1;
|
||||
int mutex_locked = 1;
|
||||
int seqno;
|
||||
|
||||
if (cond == NULL)
|
||||
@ -198,10 +200,11 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
* and backed out of the waiting queue prior to executing the
|
||||
* signal handler.
|
||||
*/
|
||||
do {
|
||||
/* Lock the condition variable structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
seqno = (*cond)->c_seqno;
|
||||
do {
|
||||
/*
|
||||
* If the condvar was statically allocated, properly
|
||||
* initialize the tail queue.
|
||||
@ -217,9 +220,6 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
case COND_TYPE_FAST:
|
||||
if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
|
||||
((*cond)->c_mutex != *mutex))) {
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return invalid argument error: */
|
||||
rval = EINVAL;
|
||||
} else {
|
||||
@ -233,15 +233,11 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
*/
|
||||
cond_queue_enq(*cond, curthread);
|
||||
|
||||
/* Remember the mutex and sequence number: */
|
||||
(*cond)->c_mutex = *mutex;
|
||||
seqno = (*cond)->c_seqno;
|
||||
|
||||
/* Wait forever: */
|
||||
curthread->wakeup_time.tv_sec = -1;
|
||||
|
||||
/* Unlock the mutex: */
|
||||
if ((unlock_mutex != 0) &&
|
||||
if (mutex_locked &&
|
||||
((rval = _mutex_cv_unlock(mutex)) != 0)) {
|
||||
/*
|
||||
* Cannot unlock the mutex, so remove
|
||||
@ -249,22 +245,18 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
* variable queue:
|
||||
*/
|
||||
cond_queue_remove(*cond, curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
}
|
||||
else {
|
||||
/* Remember the mutex: */
|
||||
(*cond)->c_mutex = *mutex;
|
||||
|
||||
/*
|
||||
* Don't unlock the mutex the next
|
||||
* time through the loop (if the
|
||||
* thread has to be requeued after
|
||||
* handling a signal).
|
||||
*/
|
||||
unlock_mutex = 0;
|
||||
mutex_locked = 0;
|
||||
|
||||
/*
|
||||
* This thread is active and is in a
|
||||
@ -277,6 +269,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
|
||||
/* Remember the CV: */
|
||||
curthread->data.cond = *cond;
|
||||
curthread->sigbackout = cond_wait_backout;
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
/* Unlock the CV structure: */
|
||||
@ -286,8 +279,6 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.cond = NULL;
|
||||
|
||||
/*
|
||||
* XXX - This really isn't a good check
|
||||
* since there can be more than one
|
||||
@ -299,41 +290,39 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
* should be sent "as soon as possible".
|
||||
*/
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
if (THR_IN_SYNCQ(curthread)) {
|
||||
if (done && !THR_IN_CONDQ(curthread)) {
|
||||
/*
|
||||
* Lock the condition variable
|
||||
* while removing the thread.
|
||||
* The thread is dequeued, so
|
||||
* it is safe to clear these.
|
||||
*/
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
curthread->data.cond = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
check_continuation(curthread,
|
||||
NULL, mutex);
|
||||
return (_mutex_cv_lock(mutex));
|
||||
}
|
||||
|
||||
/* Relock the CV structure: */
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* Clear these after taking the lock to
|
||||
* prevent a race condition where a
|
||||
* signal can arrive before dequeueing
|
||||
* the thread.
|
||||
*/
|
||||
curthread->data.cond = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
if (THR_IN_CONDQ(curthread)) {
|
||||
cond_queue_remove(*cond,
|
||||
curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the interrupted flag; locking
|
||||
* the mutex may destroy it.
|
||||
*/
|
||||
interrupted = curthread->interrupted;
|
||||
|
||||
/*
|
||||
* Note that even though this thread may
|
||||
* have been canceled, POSIX requires
|
||||
* that the mutex be reaquired prior to
|
||||
* cancellation.
|
||||
*/
|
||||
if (done || interrupted) {
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
unlock_mutex = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -341,18 +330,21 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
|
||||
/* Trap invalid condition variable types: */
|
||||
default:
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((interrupted != 0) && (curthread->continuation != NULL))
|
||||
curthread->continuation((void *) curthread);
|
||||
check_continuation(curthread, *cond,
|
||||
mutex_locked ? NULL : mutex);
|
||||
} while ((done == 0) && (rval == 0));
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
if (mutex_locked == 0)
|
||||
_mutex_cv_lock(mutex);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
}
|
||||
@ -378,8 +370,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int rval = 0;
|
||||
int done = 0;
|
||||
int interrupted = 0;
|
||||
int unlock_mutex = 1;
|
||||
int mutex_locked = 1;
|
||||
int seqno;
|
||||
|
||||
THR_ASSERT(curthread->locklevel == 0,
|
||||
@ -407,10 +398,11 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
* and backed out of the waiting queue prior to executing the
|
||||
* signal handler.
|
||||
*/
|
||||
do {
|
||||
/* Lock the condition variable structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
seqno = (*cond)->c_seqno;
|
||||
do {
|
||||
/*
|
||||
* If the condvar was statically allocated, properly
|
||||
* initialize the tail queue.
|
||||
@ -428,9 +420,6 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
((*cond)->c_mutex != *mutex))) {
|
||||
/* Return invalid argument error: */
|
||||
rval = EINVAL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
} else {
|
||||
/* Set the wakeup time: */
|
||||
curthread->wakeup_time.tv_sec = abstime->tv_sec;
|
||||
@ -447,12 +436,8 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
*/
|
||||
cond_queue_enq(*cond, curthread);
|
||||
|
||||
/* Remember the mutex and sequence number: */
|
||||
(*cond)->c_mutex = *mutex;
|
||||
seqno = (*cond)->c_seqno;
|
||||
|
||||
/* Unlock the mutex: */
|
||||
if ((unlock_mutex != 0) &&
|
||||
if (mutex_locked &&
|
||||
((rval = _mutex_cv_unlock(mutex)) != 0)) {
|
||||
/*
|
||||
* Cannot unlock the mutex; remove the
|
||||
@ -460,21 +445,17 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
* variable queue:
|
||||
*/
|
||||
cond_queue_remove(*cond, curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
} else {
|
||||
/* Remember the mutex: */
|
||||
(*cond)->c_mutex = *mutex;
|
||||
|
||||
/*
|
||||
* Don't unlock the mutex the next
|
||||
* time through the loop (if the
|
||||
* thread has to be requeued after
|
||||
* handling a signal).
|
||||
*/
|
||||
unlock_mutex = 0;
|
||||
mutex_locked = 0;
|
||||
|
||||
/*
|
||||
* This thread is active and is in a
|
||||
@ -487,6 +468,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
|
||||
/* Remember the CV: */
|
||||
curthread->data.cond = *cond;
|
||||
curthread->sigbackout = cond_wait_backout;
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
/* Unlock the CV structure: */
|
||||
@ -496,8 +478,6 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.cond = NULL;
|
||||
|
||||
/*
|
||||
* XXX - This really isn't a good check
|
||||
* since there can be more than one
|
||||
@ -509,38 +489,45 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
* should be sent "as soon as possible".
|
||||
*/
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
if (done && !THR_IN_CONDQ(curthread)) {
|
||||
/*
|
||||
* The thread is dequeued, so
|
||||
* it is safe to clear these.
|
||||
*/
|
||||
curthread->data.cond = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
check_continuation(curthread,
|
||||
NULL, mutex);
|
||||
return (_mutex_cv_lock(mutex));
|
||||
}
|
||||
|
||||
/* Relock the CV structure: */
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* Clear these after taking the lock to
|
||||
* prevent a race condition where a
|
||||
* signal can arrive before dequeueing
|
||||
* the thread.
|
||||
*/
|
||||
curthread->data.cond = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
if (THR_IN_CONDQ(curthread)) {
|
||||
/*
|
||||
* Lock the condition variable
|
||||
* while removing the thread.
|
||||
*/
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
cond_queue_remove(*cond,
|
||||
curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the interrupted flag; locking
|
||||
* the mutex may destroy it.
|
||||
*/
|
||||
interrupted = curthread->interrupted;
|
||||
if (curthread->timeout != 0) {
|
||||
/* The wait timedout. */
|
||||
rval = ETIMEDOUT;
|
||||
(void)_mutex_cv_lock(mutex);
|
||||
} else if (interrupted || done) {
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
unlock_mutex = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -548,18 +535,21 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
|
||||
/* Trap invalid condition variable types: */
|
||||
default:
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((interrupted != 0) && (curthread->continuation != NULL))
|
||||
curthread->continuation((void *)curthread);
|
||||
check_continuation(curthread, *cond,
|
||||
mutex_locked ? NULL : mutex);
|
||||
} while ((done == 0) && (rval == 0));
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
if (mutex_locked == 0)
|
||||
_mutex_cv_lock(mutex);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
}
|
||||
@ -615,6 +605,7 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
!= NULL) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
cond_queue_remove(*cond, pthread);
|
||||
pthread->sigbackout = NULL;
|
||||
if ((pthread->kseg == curthread->kseg) &&
|
||||
(pthread->active_priority >
|
||||
curthread->active_priority))
|
||||
@ -681,6 +672,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
!= NULL) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
cond_queue_remove(*cond, pthread);
|
||||
pthread->sigbackout = NULL;
|
||||
if ((pthread->kseg == curthread->kseg) &&
|
||||
(pthread->active_priority >
|
||||
curthread->active_priority))
|
||||
@ -712,9 +704,31 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
|
||||
__strong_reference(_pthread_cond_broadcast, _thr_cond_broadcast);
|
||||
|
||||
void
|
||||
_cond_wait_backout(struct pthread *curthread)
|
||||
static inline void
|
||||
check_continuation(struct pthread *curthread, struct pthread_cond *cond,
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
if ((curthread->interrupted != 0) &&
|
||||
(curthread->continuation != NULL)) {
|
||||
if (cond != NULL)
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &cond->c_lock);
|
||||
/*
|
||||
* Note that even though this thread may have been
|
||||
* canceled, POSIX requires that the mutex be
|
||||
* reaquired prior to cancellation.
|
||||
*/
|
||||
if (mutex != NULL)
|
||||
_mutex_cv_lock(mutex);
|
||||
curthread->continuation((void *) curthread);
|
||||
PANIC("continuation returned in pthread_cond_wait.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
cond_wait_backout(void *arg)
|
||||
{
|
||||
struct pthread *curthread = (struct pthread *)arg;
|
||||
pthread_cond_t cond;
|
||||
|
||||
cond = curthread->data.cond;
|
||||
@ -740,6 +754,8 @@ _cond_wait_backout(struct pthread *curthread)
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &cond->c_lock);
|
||||
}
|
||||
/* No need to call this again. */
|
||||
curthread->sigbackout = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -171,9 +171,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
/* No thread is wanting to join to this one: */
|
||||
new_thread->joiner = NULL;
|
||||
|
||||
/* Initialize the signal frame: */
|
||||
new_thread->curframe = NULL;
|
||||
|
||||
/*
|
||||
* Initialize the machine context.
|
||||
* Enter a critical region to get consistent context.
|
||||
@ -235,6 +232,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
new_thread->cleanup = NULL;
|
||||
new_thread->flags = 0;
|
||||
new_thread->tlflags = 0;
|
||||
new_thread->sigbackout = NULL;
|
||||
new_thread->continuation = NULL;
|
||||
new_thread->wakeup_time.tv_sec = -1;
|
||||
new_thread->lock_switch = 0;
|
||||
|
@ -43,12 +43,6 @@
|
||||
#include "libc_private.h"
|
||||
#include "thr_private.h"
|
||||
|
||||
/*
|
||||
* For a while, allow libpthread to work with a libc that doesn't
|
||||
* export the malloc lock.
|
||||
*/
|
||||
#pragma weak __malloc_lock
|
||||
|
||||
__weak_reference(_fork, fork);
|
||||
|
||||
pid_t
|
||||
@ -60,11 +54,21 @@ _fork(void)
|
||||
pid_t ret;
|
||||
int errsave;
|
||||
|
||||
if (!_kse_isthreaded())
|
||||
return (__sys_fork());
|
||||
|
||||
curthread = _get_curthread();
|
||||
|
||||
if (!_kse_isthreaded()) {
|
||||
SIGFILLSET(sigset);
|
||||
__sys_sigprocmask(SIG_SETMASK, &sigset, &oldset);
|
||||
ret = __sys_fork();
|
||||
if (ret == 0)
|
||||
/* Child */
|
||||
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask,
|
||||
NULL);
|
||||
else
|
||||
__sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Masks all signals until we reach a safe point in
|
||||
* _kse_single_thread, and the signal masks will be
|
||||
@ -86,7 +90,7 @@ _fork(void)
|
||||
}
|
||||
|
||||
/* Fork a new process: */
|
||||
if ((_kse_isthreaded() != 0) && (__malloc_lock != NULL)) {
|
||||
if (_kse_isthreaded() != 0) {
|
||||
_spinlock(__malloc_lock);
|
||||
}
|
||||
if ((ret = __sys_fork()) == 0) {
|
||||
|
@ -391,6 +391,7 @@ init_main_thread(struct pthread *thread)
|
||||
thread->specific = NULL;
|
||||
thread->cleanup = NULL;
|
||||
thread->flags = 0;
|
||||
thread->sigbackout = NULL;
|
||||
thread->continuation = NULL;
|
||||
|
||||
thread->state = PS_RUNNING;
|
||||
|
@ -56,7 +56,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include "thr_private.h"
|
||||
#include "libc_private.h"
|
||||
|
||||
/*#define DEBUG_THREAD_KERN */
|
||||
/* #define DEBUG_THREAD_KERN */
|
||||
#ifdef DEBUG_THREAD_KERN
|
||||
#define DBG_MSG stdout_debug
|
||||
#else
|
||||
@ -165,8 +165,7 @@ static struct kse_mailbox *kse_wakeup_one(struct pthread *thread);
|
||||
static void thr_cleanup(struct kse *kse, struct pthread *curthread);
|
||||
static void thr_link(struct pthread *thread);
|
||||
static void thr_resume_wrapper(int sig, siginfo_t *, ucontext_t *);
|
||||
static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
|
||||
struct pthread_sigframe *psf);
|
||||
static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp);
|
||||
static int thr_timedout(struct pthread *thread, struct timespec *curtime);
|
||||
static void thr_unlink(struct pthread *thread);
|
||||
static void thr_destroy(struct pthread *curthread, struct pthread *thread);
|
||||
@ -352,6 +351,9 @@ _kse_single_thread(struct pthread *curthread)
|
||||
curthread->kse->k_kcb->kcb_kmbx.km_curthread = NULL;
|
||||
curthread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
|
||||
|
||||
/* After a fork(), there child should have no pending signals. */
|
||||
sigemptyset(&curthread->sigpend);
|
||||
|
||||
/*
|
||||
* Restore signal mask early, so any memory problems could
|
||||
* dump core.
|
||||
@ -615,13 +617,12 @@ _thr_sched_switch(struct pthread *curthread)
|
||||
void
|
||||
_thr_sched_switch_unlocked(struct pthread *curthread)
|
||||
{
|
||||
struct pthread_sigframe psf;
|
||||
struct kse *curkse;
|
||||
volatile int resume_once = 0;
|
||||
ucontext_t *uc;
|
||||
|
||||
/* We're in the scheduler, 5 by 5: */
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
|
||||
curthread->need_switchout = 1; /* The thread yielded on its own. */
|
||||
curthread->critical_yield = 0; /* No need to yield anymore. */
|
||||
@ -629,14 +630,6 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
|
||||
/* Thread can unlock the scheduler lock. */
|
||||
curthread->lock_switch = 1;
|
||||
|
||||
/*
|
||||
* The signal frame is allocated off the stack because
|
||||
* a thread can be interrupted by other signals while
|
||||
* it is running down pending signals.
|
||||
*/
|
||||
psf.psf_valid = 0;
|
||||
curthread->curframe = &psf;
|
||||
|
||||
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
|
||||
kse_sched_single(&curkse->k_kcb->kcb_kmbx);
|
||||
else {
|
||||
@ -657,19 +650,12 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
|
||||
_thread_enter_uts(curthread->tcb, curkse->k_kcb);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is ugly we must increase critical count, because we
|
||||
* have a frame saved, we must backout state in psf
|
||||
* before we can process signals.
|
||||
*/
|
||||
curthread->critical_count += psf.psf_valid;
|
||||
|
||||
/*
|
||||
* Unlock the scheduling queue and leave the
|
||||
* critical region.
|
||||
*/
|
||||
/* Don't trust this after a switch! */
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
|
||||
curthread->lock_switch = 0;
|
||||
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
|
||||
@ -678,16 +664,14 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
|
||||
/*
|
||||
* This thread is being resumed; check for cancellations.
|
||||
*/
|
||||
if ((psf.psf_valid ||
|
||||
((curthread->check_pending || THR_NEED_ASYNC_CANCEL(curthread))
|
||||
&& !THR_IN_CRITICAL(curthread)))) {
|
||||
if (THR_NEED_ASYNC_CANCEL(curthread) && !THR_IN_CRITICAL(curthread)) {
|
||||
uc = alloca(sizeof(ucontext_t));
|
||||
resume_once = 0;
|
||||
THR_GETCONTEXT(uc);
|
||||
if (resume_once == 0) {
|
||||
resume_once = 1;
|
||||
curthread->check_pending = 0;
|
||||
thr_resume_check(curthread, uc, &psf);
|
||||
thr_resume_check(curthread, uc);
|
||||
}
|
||||
}
|
||||
THR_ACTIVATE_LAST_LOCK(curthread);
|
||||
@ -888,9 +872,6 @@ kse_sched_single(struct kse_mailbox *kmbx)
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove the frame reference. */
|
||||
curthread->curframe = NULL;
|
||||
|
||||
if (curthread->lock_switch == 0) {
|
||||
/* Unlock the scheduling queue. */
|
||||
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
|
||||
@ -925,7 +906,6 @@ kse_sched_multi(struct kse_mailbox *kmbx)
|
||||
{
|
||||
struct kse *curkse;
|
||||
struct pthread *curthread, *td_wait;
|
||||
struct pthread_sigframe *curframe;
|
||||
int ret;
|
||||
|
||||
curkse = (struct kse *)kmbx->km_udata;
|
||||
@ -980,6 +960,8 @@ kse_sched_multi(struct kse_mailbox *kmbx)
|
||||
* will be cleared.
|
||||
*/
|
||||
curthread->blocked = 1;
|
||||
DBG_MSG("Running thread %p is now blocked in kernel.\n",
|
||||
curthread);
|
||||
}
|
||||
|
||||
/* Check for any unblocked threads in the kernel. */
|
||||
@ -1085,10 +1067,6 @@ kse_sched_multi(struct kse_mailbox *kmbx)
|
||||
/* Mark the thread active. */
|
||||
curthread->active = 1;
|
||||
|
||||
/* Remove the frame reference. */
|
||||
curframe = curthread->curframe;
|
||||
curthread->curframe = NULL;
|
||||
|
||||
/*
|
||||
* The thread's current signal frame will only be NULL if it
|
||||
* is being resumed after being blocked in the kernel. In
|
||||
@ -1096,7 +1074,7 @@ kse_sched_multi(struct kse_mailbox *kmbx)
|
||||
* signals or needs a cancellation check, we need to add a
|
||||
* signal frame to the thread's context.
|
||||
*/
|
||||
if ((curframe == NULL) && (curthread->state == PS_RUNNING) &&
|
||||
if (curthread->lock_switch == 0 && curthread->state == PS_RUNNING &&
|
||||
(curthread->check_pending != 0 ||
|
||||
THR_NEED_ASYNC_CANCEL(curthread)) &&
|
||||
!THR_IN_CRITICAL(curthread)) {
|
||||
@ -1136,10 +1114,10 @@ thr_resume_wrapper(int sig, siginfo_t *siginfo, ucontext_t *ucp)
|
||||
DBG_MSG(">>> sig wrapper\n");
|
||||
if (curthread->lock_switch)
|
||||
PANIC("thr_resume_wrapper, lock_switch != 0\n");
|
||||
thr_resume_check(curthread, ucp, NULL);
|
||||
thr_resume_check(curthread, ucp);
|
||||
errno = err_save;
|
||||
_kse_critical_enter();
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
curthread->tcb->tcb_tmbx.tm_context = *ucp;
|
||||
ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1);
|
||||
if (ret != 0)
|
||||
@ -1149,10 +1127,9 @@ thr_resume_wrapper(int sig, siginfo_t *siginfo, ucontext_t *ucp)
|
||||
}
|
||||
|
||||
static void
|
||||
thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
|
||||
struct pthread_sigframe *psf)
|
||||
thr_resume_check(struct pthread *curthread, ucontext_t *ucp)
|
||||
{
|
||||
_thr_sig_rundown(curthread, ucp, psf);
|
||||
_thr_sig_rundown(curthread, ucp);
|
||||
|
||||
if (THR_NEED_ASYNC_CANCEL(curthread))
|
||||
pthread_testcancel();
|
||||
|
@ -85,26 +85,26 @@ static void mutex_rescan_owned (struct pthread *, struct pthread *,
|
||||
static inline pthread_t mutex_queue_deq(pthread_mutex_t);
|
||||
static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
|
||||
static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
|
||||
|
||||
static void mutex_lock_backout(void *arg);
|
||||
|
||||
static struct pthread_mutex_attr static_mutex_attr =
|
||||
PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
|
||||
static pthread_mutexattr_t static_mattr = &static_mutex_attr;
|
||||
|
||||
/* Single underscore versions provided for libc internal usage: */
|
||||
__weak_reference(__pthread_mutex_init, pthread_mutex_init);
|
||||
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
|
||||
__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
|
||||
__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
|
||||
|
||||
/* No difference between libc and application usage of these: */
|
||||
__weak_reference(_pthread_mutex_init, pthread_mutex_init);
|
||||
__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
|
||||
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
|
||||
|
||||
|
||||
|
||||
int
|
||||
_pthread_mutex_init(pthread_mutex_t *mutex,
|
||||
__pthread_mutex_init(pthread_mutex_t *mutex,
|
||||
const pthread_mutexattr_t *mutex_attr)
|
||||
{
|
||||
struct pthread_mutex *pmutex;
|
||||
@ -206,6 +206,22 @@ _pthread_mutex_init(pthread_mutex_t *mutex,
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
_pthread_mutex_init(pthread_mutex_t *mutex,
|
||||
const pthread_mutexattr_t *mutex_attr)
|
||||
{
|
||||
struct pthread_mutex_attr mattr, *mattrp;
|
||||
|
||||
if ((mutex_attr == NULL) || (*mutex_attr == NULL))
|
||||
return (__pthread_mutex_init(mutex, &static_mattr));
|
||||
else {
|
||||
mattr = **mutex_attr;
|
||||
mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
|
||||
mattrp = &mattr;
|
||||
return (__pthread_mutex_init(mutex, &mattrp));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
_thr_mutex_reinit(pthread_mutex_t *mutex)
|
||||
{
|
||||
@ -303,6 +319,7 @@ init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
|
||||
static int
|
||||
mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
|
||||
{
|
||||
int private;
|
||||
int ret = 0;
|
||||
|
||||
THR_ASSERT((mutex != NULL) && (*mutex != NULL),
|
||||
@ -310,6 +327,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
|
||||
|
||||
/* Lock the mutex structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
|
||||
private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
|
||||
|
||||
/*
|
||||
* If the mutex was statically allocated, properly
|
||||
@ -417,6 +435,9 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret == 0 && private)
|
||||
THR_CRITICAL_ENTER(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
|
||||
|
||||
@ -468,6 +489,7 @@ static int
|
||||
mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
const struct timespec * abstime)
|
||||
{
|
||||
int private;
|
||||
int ret = 0;
|
||||
|
||||
THR_ASSERT((m != NULL) && (*m != NULL),
|
||||
@ -482,6 +504,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
curthread->timeout = 0;
|
||||
curthread->wakeup_time.tv_sec = -1;
|
||||
|
||||
private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
|
||||
|
||||
/*
|
||||
* Enter a loop waiting to become the mutex owner. We need a
|
||||
* loop in case the waiting thread is interrupted by a signal
|
||||
@ -516,6 +540,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
MUTEX_ASSERT_NOT_OWNED(*m);
|
||||
TAILQ_INSERT_TAIL(&curthread->mutexq,
|
||||
(*m), m_qe);
|
||||
if (private)
|
||||
THR_CRITICAL_ENTER(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
@ -539,6 +565,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
*/
|
||||
mutex_queue_enq(*m, curthread);
|
||||
curthread->data.mutex = *m;
|
||||
curthread->sigbackout = mutex_lock_backout;
|
||||
/*
|
||||
* This thread is active and is in a critical
|
||||
* region (holding the mutex lock); we should
|
||||
@ -554,12 +581,17 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.mutex = NULL;
|
||||
if (THR_IN_MUTEXQ(curthread)) {
|
||||
THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
|
||||
mutex_queue_remove(*m, curthread);
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
}
|
||||
/*
|
||||
* Only clear these after assuring the
|
||||
* thread is dequeued.
|
||||
*/
|
||||
curthread->data.mutex = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -590,6 +622,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
MUTEX_ASSERT_NOT_OWNED(*m);
|
||||
TAILQ_INSERT_TAIL(&curthread->mutexq,
|
||||
(*m), m_qe);
|
||||
if (private)
|
||||
THR_CRITICAL_ENTER(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
@ -613,6 +647,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
*/
|
||||
mutex_queue_enq(*m, curthread);
|
||||
curthread->data.mutex = *m;
|
||||
curthread->sigbackout = mutex_lock_backout;
|
||||
|
||||
/*
|
||||
* This thread is active and is in a critical
|
||||
@ -633,12 +668,17 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.mutex = NULL;
|
||||
if (THR_IN_MUTEXQ(curthread)) {
|
||||
THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
|
||||
mutex_queue_remove(*m, curthread);
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
}
|
||||
/*
|
||||
* Only clear these after assuring the
|
||||
* thread is dequeued.
|
||||
*/
|
||||
curthread->data.mutex = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -679,6 +719,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
MUTEX_ASSERT_NOT_OWNED(*m);
|
||||
TAILQ_INSERT_TAIL(&curthread->mutexq,
|
||||
(*m), m_qe);
|
||||
if (private)
|
||||
THR_CRITICAL_ENTER(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
@ -702,6 +744,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
*/
|
||||
mutex_queue_enq(*m, curthread);
|
||||
curthread->data.mutex = *m;
|
||||
curthread->sigbackout = mutex_lock_backout;
|
||||
|
||||
/* Clear any previous error: */
|
||||
curthread->error = 0;
|
||||
@ -722,12 +765,17 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.mutex = NULL;
|
||||
if (THR_IN_MUTEXQ(curthread)) {
|
||||
THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
|
||||
mutex_queue_remove(*m, curthread);
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
}
|
||||
/*
|
||||
* Only clear these after assuring the
|
||||
* thread is dequeued.
|
||||
*/
|
||||
curthread->data.mutex = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
|
||||
/*
|
||||
* The threads priority may have changed while
|
||||
@ -932,6 +980,13 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Don't allow evil recursive mutexes for private use
|
||||
* in libc and libpthread.
|
||||
*/
|
||||
if (m->m_flags & MUTEX_FLAGS_PRIVATE)
|
||||
PANIC("Recurse on a private mutex.");
|
||||
|
||||
switch (m->m_type) {
|
||||
/* case PTHREAD_MUTEX_DEFAULT: */
|
||||
case PTHREAD_MUTEX_ERRORCHECK:
|
||||
@ -1135,8 +1190,13 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
|
||||
/* Increment the reference count: */
|
||||
(*m)->m_refcount++;
|
||||
|
||||
/* Leave the critical region if this is a private mutex. */
|
||||
if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
|
||||
THR_CRITICAL_LEAVE(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
|
||||
if (kmbx != NULL)
|
||||
kse_wakeup(kmbx);
|
||||
}
|
||||
@ -1511,9 +1571,10 @@ _mutex_unlock_private(pthread_t pthread)
|
||||
* This is called by the current thread when it wants to back out of a
|
||||
* mutex_lock in order to run a signal handler.
|
||||
*/
|
||||
void
|
||||
_mutex_lock_backout(struct pthread *curthread)
|
||||
static void
|
||||
mutex_lock_backout(void *arg)
|
||||
{
|
||||
struct pthread *curthread = (struct pthread *)arg;
|
||||
struct pthread_mutex *m;
|
||||
|
||||
if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
|
||||
@ -1554,6 +1615,8 @@ _mutex_lock_backout(struct pthread *curthread)
|
||||
THR_LOCK_RELEASE(curthread, &m->m_lock);
|
||||
}
|
||||
}
|
||||
/* No need to call this again. */
|
||||
curthread->sigbackout = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1674,13 +1737,16 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
|
||||
(pthread->active_priority > curthread->active_priority))
|
||||
curthread->critical_yield = 1;
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
if (mutex->m_owner == pthread)
|
||||
if (mutex->m_owner == pthread) {
|
||||
/* We're done; a valid owner was found. */
|
||||
if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
|
||||
THR_CRITICAL_ENTER(pthread);
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
break;
|
||||
else
|
||||
/* Get the next thread from the waiting queue: */
|
||||
pthread = TAILQ_NEXT(pthread, sqe);
|
||||
}
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
/* Get the next thread from the waiting queue: */
|
||||
pthread = TAILQ_NEXT(pthread, sqe);
|
||||
}
|
||||
|
||||
if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
|
||||
|
@ -67,6 +67,7 @@ once_cancel_handler(void *arg)
|
||||
int
|
||||
_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
|
||||
{
|
||||
struct pthread *curthread;
|
||||
int wakeup = 0;
|
||||
|
||||
if (once_control->state == ONCE_DONE)
|
||||
@ -81,9 +82,10 @@ _pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
|
||||
if (*(volatile int *)&(once_control->state) == ONCE_NEVER_DONE) {
|
||||
once_control->state = ONCE_IN_PROGRESS;
|
||||
_pthread_mutex_unlock(&once_lock);
|
||||
_pthread_cleanup_push(once_cancel_handler, once_control);
|
||||
curthread = _get_curthread();
|
||||
THR_CLEANUP_PUSH(curthread, once_cancel_handler, once_control);
|
||||
init_routine();
|
||||
_pthread_cleanup_pop(0);
|
||||
THR_CLEANUP_POP(curthread, 0);
|
||||
_pthread_mutex_lock(&once_lock);
|
||||
once_control->state = ONCE_DONE;
|
||||
wakeup = 1;
|
||||
|
@ -416,8 +416,24 @@ struct pthread_cleanup {
|
||||
struct pthread_cleanup *next;
|
||||
void (*routine) ();
|
||||
void *routine_arg;
|
||||
int onstack;
|
||||
};
|
||||
|
||||
#define THR_CLEANUP_PUSH(td, func, arg) { \
|
||||
struct pthread_cleanup __cup; \
|
||||
\
|
||||
__cup.routine = func; \
|
||||
__cup.routine_arg = arg; \
|
||||
__cup.onstack = 1; \
|
||||
__cup.next = (td)->cleanup; \
|
||||
(td)->cleanup = &__cup;
|
||||
|
||||
#define THR_CLEANUP_POP(td, exec) \
|
||||
(td)->cleanup = __cup.next; \
|
||||
if ((exec) != 0) \
|
||||
__cup.routine(__cup.routine_arg); \
|
||||
}
|
||||
|
||||
struct pthread_atfork {
|
||||
TAILQ_ENTRY(pthread_atfork) qe;
|
||||
void (*prepare)(void);
|
||||
@ -573,6 +589,7 @@ struct pthread_sigframe {
|
||||
sigset_t psf_sigset;
|
||||
sigset_t psf_sigmask;
|
||||
int psf_seqno;
|
||||
thread_continuation_t psf_continuation;
|
||||
};
|
||||
|
||||
struct join_status {
|
||||
@ -645,8 +662,8 @@ struct pthread {
|
||||
/*
|
||||
* Used for tracking delivery of signal handlers.
|
||||
*/
|
||||
struct pthread_sigframe *curframe;
|
||||
siginfo_t *siginfo;
|
||||
thread_continuation_t sigbackout;
|
||||
|
||||
/*
|
||||
* Cancelability flags - the lower 2 bits are used by cancel
|
||||
@ -1070,7 +1087,6 @@ SCLASS int _thr_debug_flags SCLASS_PRESET(0);
|
||||
*/
|
||||
__BEGIN_DECLS
|
||||
int _cond_reinit(pthread_cond_t *);
|
||||
void _cond_wait_backout(struct pthread *);
|
||||
struct kse *_kse_alloc(struct pthread *, int sys_scope);
|
||||
kse_critical_t _kse_critical_enter(void);
|
||||
void _kse_critical_leave(kse_critical_t);
|
||||
@ -1085,7 +1101,6 @@ int _kse_setthreaded(int);
|
||||
void _kseg_free(struct kse_group *);
|
||||
int _mutex_cv_lock(pthread_mutex_t *);
|
||||
int _mutex_cv_unlock(pthread_mutex_t *);
|
||||
void _mutex_lock_backout(struct pthread *);
|
||||
void _mutex_notify_priochange(struct pthread *, struct pthread *, int);
|
||||
int _mutex_reinit(struct pthread_mutex *);
|
||||
void _mutex_unlock_private(struct pthread *);
|
||||
@ -1148,8 +1163,7 @@ void _thr_set_timeout(const struct timespec *);
|
||||
void _thr_seterrno(struct pthread *, int);
|
||||
void _thr_sig_handler(int, siginfo_t *, ucontext_t *);
|
||||
void _thr_sig_check_pending(struct pthread *);
|
||||
void _thr_sig_rundown(struct pthread *, ucontext_t *,
|
||||
struct pthread_sigframe *);
|
||||
void _thr_sig_rundown(struct pthread *, ucontext_t *);
|
||||
void _thr_sig_send(struct pthread *pthread, int sig);
|
||||
void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
|
||||
void _thr_spinlock_init(void);
|
||||
|
@ -123,7 +123,7 @@ _sem_init(sem_t *sem, int pshared, unsigned int value)
|
||||
{
|
||||
semid_t semid;
|
||||
|
||||
semid = SEM_USER;
|
||||
semid = (semid_t)SEM_USER;
|
||||
if ((pshared != 0) && (ksem_init(&semid, value) != 0))
|
||||
return (-1);
|
||||
|
||||
@ -145,8 +145,8 @@ _sem_wait(sem_t *sem)
|
||||
if (sem_check_validity(sem) != 0)
|
||||
return (-1);
|
||||
|
||||
curthread = _get_curthread();
|
||||
if ((*sem)->syssem != 0) {
|
||||
curthread = _get_curthread();
|
||||
_thr_cancel_enter(curthread);
|
||||
retval = ksem_wait((*sem)->semid);
|
||||
_thr_cancel_leave(curthread, retval != 0);
|
||||
@ -157,9 +157,9 @@ _sem_wait(sem_t *sem)
|
||||
|
||||
while ((*sem)->count <= 0) {
|
||||
(*sem)->nwaiters++;
|
||||
pthread_cleanup_push(decrease_nwaiters, sem);
|
||||
THR_CLEANUP_PUSH(curthread, decrease_nwaiters, sem);
|
||||
pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
|
||||
pthread_cleanup_pop(0);
|
||||
THR_CLEANUP_POP(curthread, 0);
|
||||
(*sem)->nwaiters--;
|
||||
}
|
||||
(*sem)->count--;
|
||||
|
@ -43,17 +43,15 @@
|
||||
#include "thr_private.h"
|
||||
|
||||
/* Prototypes: */
|
||||
static void build_siginfo(siginfo_t *info, int signo);
|
||||
static inline void build_siginfo(siginfo_t *info, int signo);
|
||||
#ifndef SYSTEM_SCOPE_ONLY
|
||||
static struct pthread *thr_sig_find(struct kse *curkse, int sig,
|
||||
siginfo_t *info);
|
||||
static void handle_special_signals(struct kse *curkse, int sig);
|
||||
#endif
|
||||
static void thr_sigframe_add(struct pthread *thread);
|
||||
static void thr_sigframe_restore(struct pthread *thread,
|
||||
struct pthread_sigframe *psf);
|
||||
static void thr_sigframe_save(struct pthread *thread,
|
||||
struct pthread_sigframe *psf);
|
||||
static inline void thr_sigframe_restore(struct pthread *thread,
|
||||
struct pthread_sigframe *psf);
|
||||
static inline void thr_sigframe_save(struct pthread *thread,
|
||||
struct pthread_sigframe *psf);
|
||||
|
||||
#define SA_KILL 0x01 /* terminates process by default */
|
||||
#define SA_STOP 0x02
|
||||
@ -254,9 +252,6 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
|
||||
|
||||
DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig);
|
||||
|
||||
/* Some signals need special handling: */
|
||||
handle_special_signals(curkse, sig);
|
||||
|
||||
/* Check if the signal requires a dump of thread information: */
|
||||
if (sig == SIGINFO) {
|
||||
/* Dump thread information to file: */
|
||||
@ -306,11 +301,14 @@ typedef void (*ohandler)(int sig, int code,
|
||||
void
|
||||
_thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
||||
{
|
||||
struct pthread_sigframe psf;
|
||||
__siginfohandler_t *sigfunc;
|
||||
struct pthread *curthread;
|
||||
struct kse *curkse;
|
||||
struct sigaction act;
|
||||
int sa_flags, err_save, intr_save, timeout_save;
|
||||
int sa_flags, err_save;
|
||||
|
||||
err_save = errno;
|
||||
|
||||
DBG_MSG(">>> _thr_sig_handler(%d)\n", sig);
|
||||
|
||||
@ -319,15 +317,18 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
||||
PANIC("No current thread.\n");
|
||||
if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
|
||||
PANIC("Thread is not system scope.\n");
|
||||
if (curthread->flags & THR_FLAGS_EXITING)
|
||||
if (curthread->flags & THR_FLAGS_EXITING) {
|
||||
errno = err_save;
|
||||
return;
|
||||
}
|
||||
|
||||
curkse = _get_curkse();
|
||||
/*
|
||||
* If thread is in critical region or if thread is on
|
||||
* the way of state transition, then latch signal into buffer.
|
||||
*/
|
||||
if (_kse_in_critical() || THR_IN_CRITICAL(curthread) ||
|
||||
(curthread->state != PS_RUNNING && curthread->curframe == NULL)) {
|
||||
curthread->state != PS_RUNNING) {
|
||||
DBG_MSG(">>> _thr_sig_handler(%d) in critical\n", sig);
|
||||
curthread->siginfo[sig-1] = *info;
|
||||
curthread->check_pending = 1;
|
||||
@ -341,18 +342,24 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
||||
*/
|
||||
if (KSE_IS_IDLE(curkse))
|
||||
kse_wakeup(&curkse->k_kcb->kcb_kmbx);
|
||||
errno = err_save;
|
||||
return;
|
||||
}
|
||||
|
||||
/* It is now safe to invoke signal handler */
|
||||
err_save = errno;
|
||||
timeout_save = curthread->timeout;
|
||||
intr_save = curthread->interrupted;
|
||||
/* Check if the signal requires a dump of thread information: */
|
||||
if (sig == SIGINFO) {
|
||||
/* Dump thread information to file: */
|
||||
_thread_dump_info();
|
||||
}
|
||||
|
||||
/* Check the threads previous state: */
|
||||
curthread->critical_count++;
|
||||
if (curthread->sigbackout != NULL)
|
||||
curthread->sigbackout((void *)curthread);
|
||||
curthread->critical_count--;
|
||||
thr_sigframe_save(curthread, &psf);
|
||||
THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared.");
|
||||
|
||||
_kse_critical_enter();
|
||||
/* Get a fresh copy of signal mask */
|
||||
__sys_sigprocmask(SIG_BLOCK, NULL, &curthread->sigmask);
|
||||
@ -395,14 +402,16 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
errno = err_save;
|
||||
curthread->timeout = timeout_save;
|
||||
curthread->interrupted = intr_save;
|
||||
_kse_critical_enter();
|
||||
curthread->sigmask = ucp->uc_sigmask;
|
||||
SIG_CANTMASK(curthread->sigmask);
|
||||
_kse_critical_leave(&curthread->tcb->tcb_tmbx);
|
||||
|
||||
thr_sigframe_restore(curthread, &psf);
|
||||
|
||||
DBG_MSG("<<< _thr_sig_handler(%d)\n", sig);
|
||||
|
||||
errno = err_save;
|
||||
}
|
||||
|
||||
struct sighandle_info {
|
||||
@ -439,7 +448,7 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
|
||||
|
||||
if (!_kse_in_critical())
|
||||
PANIC("thr_sig_invoke_handler without in critical\n");
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
/*
|
||||
* Check that a custom handler is installed and if
|
||||
* the signal is not blocked:
|
||||
@ -491,7 +500,7 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
|
||||
|
||||
_kse_critical_enter();
|
||||
/* Don't trust after critical leave/enter */
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
|
||||
/*
|
||||
* Restore the thread's signal mask.
|
||||
@ -752,7 +761,7 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
|
||||
}
|
||||
#endif /* ! SYSTEM_SCOPE_ONLY */
|
||||
|
||||
static void
|
||||
static inline void
|
||||
build_siginfo(siginfo_t *info, int signo)
|
||||
{
|
||||
bzero(info, sizeof(*info));
|
||||
@ -765,54 +774,35 @@ build_siginfo(siginfo_t *info, int signo)
|
||||
* It should only be called from the context of the thread.
|
||||
*/
|
||||
void
|
||||
_thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
|
||||
struct pthread_sigframe *psf)
|
||||
_thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp)
|
||||
{
|
||||
int interrupted = curthread->interrupted;
|
||||
int timeout = curthread->timeout;
|
||||
struct pthread_sigframe psf;
|
||||
siginfo_t siginfo;
|
||||
int i;
|
||||
int i, err_save;
|
||||
kse_critical_t crit;
|
||||
struct kse *curkse;
|
||||
sigset_t sigmask;
|
||||
|
||||
err_save = errno;
|
||||
|
||||
DBG_MSG(">>> thr_sig_rundown (%p)\n", curthread);
|
||||
|
||||
/* Check the threads previous state: */
|
||||
if ((psf != NULL) && (psf->psf_valid != 0)) {
|
||||
/*
|
||||
* Do a little cleanup handling for those threads in
|
||||
* queues before calling the signal handler. Signals
|
||||
* for these threads are temporarily blocked until
|
||||
* after cleanup handling.
|
||||
*/
|
||||
switch (psf->psf_state) {
|
||||
case PS_COND_WAIT:
|
||||
_cond_wait_backout(curthread);
|
||||
psf->psf_state = PS_RUNNING;
|
||||
break;
|
||||
|
||||
case PS_MUTEX_WAIT:
|
||||
_mutex_lock_backout(curthread);
|
||||
psf->psf_state = PS_RUNNING;
|
||||
break;
|
||||
|
||||
case PS_RUNNING:
|
||||
break;
|
||||
curthread->critical_count++;
|
||||
if (curthread->sigbackout != NULL)
|
||||
curthread->sigbackout((void *)curthread);
|
||||
curthread->critical_count--;
|
||||
|
||||
default:
|
||||
psf->psf_state = PS_RUNNING;
|
||||
break;
|
||||
}
|
||||
/* XXX see comment in thr_sched_switch_unlocked */
|
||||
curthread->critical_count--;
|
||||
}
|
||||
THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared.");
|
||||
THR_ASSERT((curthread->state == PS_RUNNING), "state is not PS_RUNNING");
|
||||
|
||||
thr_sigframe_save(curthread, &psf);
|
||||
/*
|
||||
* Lower the priority before calling the handler in case
|
||||
* it never returns (longjmps back):
|
||||
*/
|
||||
crit = _kse_critical_enter();
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
|
||||
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
|
||||
curthread->active_priority &= ~THR_SIGNAL_PRIORITY;
|
||||
@ -851,9 +841,8 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
|
||||
}
|
||||
}
|
||||
|
||||
if (psf != NULL && psf->psf_valid != 0)
|
||||
thr_sigframe_restore(curthread, psf);
|
||||
curkse = _get_curkse();
|
||||
/* Don't trust after signal handling */
|
||||
curkse = curthread->kse;
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
|
||||
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
|
||||
_kse_critical_leave(&curthread->tcb->tcb_tmbx);
|
||||
@ -875,10 +864,10 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
|
||||
}
|
||||
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
|
||||
}
|
||||
curthread->interrupted = interrupted;
|
||||
curthread->timeout = timeout;
|
||||
|
||||
DBG_MSG("<<< thr_sig_rundown (%p)\n", curthread);
|
||||
|
||||
thr_sigframe_restore(curthread, &psf);
|
||||
errno = err_save;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -897,7 +886,15 @@ _thr_sig_check_pending(struct pthread *curthread)
|
||||
volatile int once;
|
||||
int errsave;
|
||||
|
||||
if (THR_IN_CRITICAL(curthread))
|
||||
/*
|
||||
* If the thread is in critical region, delay processing signals.
|
||||
* If the thread state is not PS_RUNNING, it might be switching
|
||||
* into UTS and but a THR_LOCK_RELEASE saw check_pending, and it
|
||||
* goes here, in the case we delay processing signals, lets UTS
|
||||
* process complicated things, normally UTS will call _thr_sig_add
|
||||
* to resume the thread, so we needn't repeat doing it here.
|
||||
*/
|
||||
if (THR_IN_CRITICAL(curthread) || curthread->state != PS_RUNNING)
|
||||
return;
|
||||
|
||||
errsave = errno;
|
||||
@ -906,42 +903,11 @@ _thr_sig_check_pending(struct pthread *curthread)
|
||||
if (once == 0) {
|
||||
once = 1;
|
||||
curthread->check_pending = 0;
|
||||
_thr_sig_rundown(curthread, &uc, NULL);
|
||||
_thr_sig_rundown(curthread, &uc);
|
||||
}
|
||||
errno = errsave;
|
||||
}
|
||||
|
||||
#ifndef SYSTEM_SCOPE_ONLY
|
||||
/*
|
||||
* This must be called with upcalls disabled.
|
||||
*/
|
||||
static void
|
||||
handle_special_signals(struct kse *curkse, int sig)
|
||||
{
|
||||
switch (sig) {
|
||||
/*
|
||||
* POSIX says that pending SIGCONT signals are
|
||||
* discarded when one of these signals occurs.
|
||||
*/
|
||||
case SIGTSTP:
|
||||
case SIGTTIN:
|
||||
case SIGTTOU:
|
||||
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
|
||||
SIGDELSET(_thr_proc_sigpending, SIGCONT);
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
|
||||
break;
|
||||
case SIGCONT:
|
||||
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
|
||||
SIGDELSET(_thr_proc_sigpending, SIGTSTP);
|
||||
SIGDELSET(_thr_proc_sigpending, SIGTTIN);
|
||||
SIGDELSET(_thr_proc_sigpending, SIGTTOU);
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif /* ! SYSTEM_SCOPE_ONLY */
|
||||
|
||||
/*
|
||||
* Perform thread specific actions in response to a signal.
|
||||
* This function is only called if there is a handler installed
|
||||
@ -979,11 +945,9 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if (pthread->curframe == NULL ||
|
||||
(pthread->state != PS_SIGWAIT &&
|
||||
SIGISMEMBER(pthread->sigmask, sig)) ||
|
||||
THR_IN_CRITICAL(pthread)) {
|
||||
/* thread is running or signal was being masked */
|
||||
if (pthread->state != PS_SIGWAIT &&
|
||||
SIGISMEMBER(pthread->sigmask, sig)) {
|
||||
/* signal is masked, just add signal to thread. */
|
||||
if (!fromproc) {
|
||||
SIGADDSET(pthread->sigpend, sig);
|
||||
if (info == NULL)
|
||||
@ -996,19 +960,6 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
|
||||
return (NULL);
|
||||
SIGADDSET(pthread->sigpend, sig);
|
||||
}
|
||||
if (!SIGISMEMBER(pthread->sigmask, sig)) {
|
||||
/* A quick path to exit process */
|
||||
if (sigfunc == SIG_DFL && sigprop(sig) & SA_KILL) {
|
||||
kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig);
|
||||
/* Never reach */
|
||||
}
|
||||
pthread->check_pending = 1;
|
||||
if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
|
||||
(pthread->blocked != 0) &&
|
||||
!THR_IN_CRITICAL(pthread))
|
||||
kse_thr_interrupt(&pthread->tcb->tcb_tmbx,
|
||||
restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0);
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* if process signal not exists, just return */
|
||||
@ -1049,7 +1000,6 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
|
||||
/* Possible not in RUNQ and has curframe ? */
|
||||
pthread->active_priority |= THR_SIGNAL_PRIORITY;
|
||||
}
|
||||
suppress_handler = 1;
|
||||
break;
|
||||
/*
|
||||
* States which cannot be interrupted but still require the
|
||||
@ -1115,19 +1065,22 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
|
||||
build_siginfo(&pthread->siginfo[sig-1], sig);
|
||||
else if (info != &pthread->siginfo[sig-1])
|
||||
memcpy(&pthread->siginfo[sig-1], info, sizeof(*info));
|
||||
|
||||
pthread->check_pending = 1;
|
||||
if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
|
||||
(pthread->blocked != 0) && !THR_IN_CRITICAL(pthread))
|
||||
kse_thr_interrupt(&pthread->tcb->tcb_tmbx,
|
||||
restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0);
|
||||
if (suppress_handler == 0) {
|
||||
/*
|
||||
* Setup a signal frame and save the current threads
|
||||
* state:
|
||||
*/
|
||||
thr_sigframe_add(pthread);
|
||||
if (pthread->flags & THR_FLAGS_IN_RUNQ)
|
||||
THR_RUNQ_REMOVE(pthread);
|
||||
pthread->active_priority |= THR_SIGNAL_PRIORITY;
|
||||
kmbx = _thr_setrunnable_unlocked(pthread);
|
||||
} else {
|
||||
pthread->check_pending = 1;
|
||||
if (pthread->state != PS_RUNNING) {
|
||||
if (pthread->flags & THR_FLAGS_IN_RUNQ)
|
||||
THR_RUNQ_REMOVE(pthread);
|
||||
pthread->active_priority |= THR_SIGNAL_PRIORITY;
|
||||
kmbx = _thr_setrunnable_unlocked(pthread);
|
||||
}
|
||||
}
|
||||
}
|
||||
return (kmbx);
|
||||
@ -1151,6 +1104,10 @@ _thr_sig_send(struct pthread *pthread, int sig)
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
|
||||
kmbx = _thr_sig_add(pthread, sig, NULL);
|
||||
/* Add a preemption point. */
|
||||
if (kmbx == NULL && (curthread->kseg == pthread->kseg) &&
|
||||
(pthread->active_priority > curthread->active_priority))
|
||||
curthread->critical_yield = 1;
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
if (kmbx != NULL)
|
||||
kse_wakeup(kmbx);
|
||||
@ -1161,52 +1118,55 @@ _thr_sig_send(struct pthread *pthread, int sig)
|
||||
*/
|
||||
if (pthread == curthread && curthread->check_pending)
|
||||
_thr_sig_check_pending(curthread);
|
||||
|
||||
} else {
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
thr_sigframe_add(struct pthread *thread)
|
||||
static inline void
|
||||
thr_sigframe_restore(struct pthread *curthread, struct pthread_sigframe *psf)
|
||||
{
|
||||
if (thread->curframe == NULL)
|
||||
PANIC("Thread doesn't have signal frame ");
|
||||
kse_critical_t crit;
|
||||
struct kse *curkse;
|
||||
|
||||
if (thread->curframe->psf_valid == 0) {
|
||||
thread->curframe->psf_valid = 1;
|
||||
/*
|
||||
* Multiple signals can be added to the same signal
|
||||
* frame. Only save the thread's state the first time.
|
||||
*/
|
||||
thr_sigframe_save(thread, thread->curframe);
|
||||
}
|
||||
THR_THREAD_LOCK(curthread, curthread);
|
||||
curthread->cancelflags = psf->psf_cancelflags;
|
||||
crit = _kse_critical_enter();
|
||||
curkse = curthread->kse;
|
||||
KSE_SCHED_LOCK(curkse, curthread->kseg);
|
||||
curthread->flags = psf->psf_flags;
|
||||
curthread->interrupted = psf->psf_interrupted;
|
||||
curthread->timeout = psf->psf_timeout;
|
||||
curthread->data = psf->psf_wait_data;
|
||||
curthread->wakeup_time = psf->psf_wakeup_time;
|
||||
curthread->continuation = psf->psf_continuation;
|
||||
KSE_SCHED_UNLOCK(curkse, curthread->kseg);
|
||||
_kse_critical_leave(crit);
|
||||
THR_THREAD_UNLOCK(curthread, curthread);
|
||||
}
|
||||
|
||||
static void
|
||||
thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf)
|
||||
static inline void
|
||||
thr_sigframe_save(struct pthread *curthread, struct pthread_sigframe *psf)
|
||||
{
|
||||
if (psf->psf_valid == 0)
|
||||
PANIC("invalid pthread_sigframe\n");
|
||||
thread->flags = psf->psf_flags;
|
||||
thread->cancelflags = psf->psf_cancelflags;
|
||||
thread->interrupted = psf->psf_interrupted;
|
||||
thread->timeout = psf->psf_timeout;
|
||||
thread->state = psf->psf_state;
|
||||
thread->data = psf->psf_wait_data;
|
||||
thread->wakeup_time = psf->psf_wakeup_time;
|
||||
}
|
||||
kse_critical_t crit;
|
||||
struct kse *curkse;
|
||||
|
||||
static void
|
||||
thr_sigframe_save(struct pthread *thread, struct pthread_sigframe *psf)
|
||||
{
|
||||
THR_THREAD_LOCK(curthread, curthread);
|
||||
psf->psf_cancelflags = curthread->cancelflags;
|
||||
crit = _kse_critical_enter();
|
||||
curkse = curthread->kse;
|
||||
KSE_SCHED_LOCK(curkse, curthread->kseg);
|
||||
/* This has to initialize all members of the sigframe. */
|
||||
psf->psf_flags = thread->flags & THR_FLAGS_PRIVATE;
|
||||
psf->psf_cancelflags = thread->cancelflags;
|
||||
psf->psf_interrupted = thread->interrupted;
|
||||
psf->psf_timeout = thread->timeout;
|
||||
psf->psf_state = thread->state;
|
||||
psf->psf_wait_data = thread->data;
|
||||
psf->psf_wakeup_time = thread->wakeup_time;
|
||||
psf->psf_flags = (curthread->flags & (THR_FLAGS_PRIVATE | THR_FLAGS_EXITING));
|
||||
psf->psf_interrupted = curthread->interrupted;
|
||||
psf->psf_timeout = curthread->timeout;
|
||||
psf->psf_wait_data = curthread->data;
|
||||
psf->psf_wakeup_time = curthread->wakeup_time;
|
||||
psf->psf_continuation = curthread->continuation;
|
||||
KSE_SCHED_UNLOCK(curkse, curthread->kseg);
|
||||
_kse_critical_leave(crit);
|
||||
THR_THREAD_UNLOCK(curthread, curthread);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1266,6 +1226,9 @@ _thr_signal_deinit(void)
|
||||
int i;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/* Clear process pending signals. */
|
||||
sigemptyset(&_thr_proc_sigpending);
|
||||
|
||||
/* Enter a loop to get the existing signal status: */
|
||||
for (i = 1; i <= _SIG_MAXSIG; i++) {
|
||||
/* Check for signals which cannot be trapped: */
|
||||
|
@ -69,6 +69,7 @@ _sigsuspend(const sigset_t *set)
|
||||
/* Wait for a signal: */
|
||||
_thr_sched_switch_unlocked(curthread);
|
||||
} else {
|
||||
curthread->check_pending = 1;
|
||||
THR_UNLOCK_SWITCH(curthread);
|
||||
/* check pending signal I can handle: */
|
||||
_thr_sig_check_pending(curthread);
|
||||
|
@ -49,6 +49,10 @@ struct spinlock_extra {
|
||||
|
||||
static void init_spinlock(spinlock_t *lck);
|
||||
|
||||
static struct pthread_mutex_attr static_mutex_attr =
|
||||
PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
|
||||
static pthread_mutexattr_t static_mattr = &static_mutex_attr;
|
||||
|
||||
static pthread_mutex_t spinlock_static_lock;
|
||||
static struct spinlock_extra extra[MAX_SPINLOCKS];
|
||||
static int spinlock_count = 0;
|
||||
@ -65,7 +69,7 @@ _spinunlock(spinlock_t *lck)
|
||||
struct spinlock_extra *extra;
|
||||
|
||||
extra = (struct spinlock_extra *)lck->fname;
|
||||
pthread_mutex_unlock(&extra->lock);
|
||||
_pthread_mutex_unlock(&extra->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -90,7 +94,7 @@ _spinlock(spinlock_t *lck)
|
||||
if (lck->fname == NULL)
|
||||
init_spinlock(lck);
|
||||
extra = (struct spinlock_extra *)lck->fname;
|
||||
pthread_mutex_lock(&extra->lock);
|
||||
_pthread_mutex_lock(&extra->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -112,13 +116,13 @@ _spinlock_debug(spinlock_t *lck, char *fname, int lineno)
|
||||
static void
|
||||
init_spinlock(spinlock_t *lck)
|
||||
{
|
||||
pthread_mutex_lock(&spinlock_static_lock);
|
||||
_pthread_mutex_lock(&spinlock_static_lock);
|
||||
if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
|
||||
lck->fname = (char *)&extra[spinlock_count];
|
||||
extra[spinlock_count].owner = lck;
|
||||
spinlock_count++;
|
||||
}
|
||||
pthread_mutex_unlock(&spinlock_static_lock);
|
||||
_pthread_mutex_unlock(&spinlock_static_lock);
|
||||
if (lck->fname == NULL)
|
||||
PANIC("Exceeded max spinlocks");
|
||||
}
|
||||
@ -133,10 +137,10 @@ _thr_spinlock_init(void)
|
||||
for (i = 0; i < spinlock_count; i++)
|
||||
_thr_mutex_reinit(&extra[i].lock);
|
||||
} else {
|
||||
if (pthread_mutex_init(&spinlock_static_lock, NULL))
|
||||
if (_pthread_mutex_init(&spinlock_static_lock, &static_mattr))
|
||||
PANIC("Cannot initialize spinlock_static_lock");
|
||||
for (i = 0; i < MAX_SPINLOCKS; i++) {
|
||||
if (pthread_mutex_init(&extra[i].lock, NULL))
|
||||
if (_pthread_mutex_init(&extra[i].lock, &static_mattr))
|
||||
PANIC("Cannot initialize spinlock extra");
|
||||
}
|
||||
initialized = 1;
|
||||
|
@ -14,6 +14,7 @@ global:
|
||||
__poll;
|
||||
__pthread_cond_timedwait;
|
||||
__pthread_cond_wait;
|
||||
__pthread_mutex_init;
|
||||
__pthread_mutex_lock;
|
||||
__pthread_mutex_trylock;
|
||||
__pthread_mutex_timedlock;
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "libc_private.h"
|
||||
#include "thr_private.h"
|
||||
|
||||
#undef errno
|
||||
extern int errno;
|
||||
|
||||
int *
|
||||
|
@ -14,18 +14,26 @@ __weak_reference(_pthread_testcancel, pthread_testcancel);
|
||||
static inline int
|
||||
checkcancel(struct pthread *curthread)
|
||||
{
|
||||
if (((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) &&
|
||||
((curthread->cancelflags & THR_CANCELLING) != 0)) {
|
||||
if ((curthread->cancelflags & THR_CANCELLING) != 0) {
|
||||
/*
|
||||
* It is possible for this thread to be swapped out
|
||||
* while performing cancellation; do not allow it
|
||||
* to be cancelled again.
|
||||
*/
|
||||
curthread->cancelflags &= ~THR_CANCELLING;
|
||||
return (1);
|
||||
if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
|
||||
/*
|
||||
* This may happen once, but after this, it
|
||||
* shouldn't happen again.
|
||||
*/
|
||||
curthread->cancelflags &= ~THR_CANCELLING;
|
||||
return (0);
|
||||
}
|
||||
if ((curthread->cancelflags & PTHREAD_CANCEL_DISABLE) == 0) {
|
||||
curthread->cancelflags &= ~THR_CANCELLING;
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
else
|
||||
return (0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -50,6 +50,7 @@ _pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
|
||||
malloc(sizeof(struct pthread_cleanup))) != NULL) {
|
||||
new->routine = routine;
|
||||
new->routine_arg = routine_arg;
|
||||
new->onstack = 0;
|
||||
new->next = curthread->cleanup;
|
||||
|
||||
curthread->cleanup = new;
|
||||
@ -67,6 +68,7 @@ _pthread_cleanup_pop(int execute)
|
||||
if (execute) {
|
||||
old->routine(old->routine_arg);
|
||||
}
|
||||
free(old);
|
||||
if (old->onstack == 0)
|
||||
free(old);
|
||||
}
|
||||
}
|
||||
|
@ -84,6 +84,13 @@ _thr_setconcurrency(int new_level)
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Turn on threaded mode, if failed, it is unnecessary to
|
||||
* do further work.
|
||||
*/
|
||||
if (_kse_isthreaded() == 0 && _kse_setthreaded(1))
|
||||
return (EAGAIN);
|
||||
|
||||
ret = 0;
|
||||
curthread = _get_curthread();
|
||||
/* Race condition, but so what. */
|
||||
|
@ -47,6 +47,9 @@
|
||||
static inline struct pthread *cond_queue_deq(pthread_cond_t);
|
||||
static inline void cond_queue_remove(pthread_cond_t, pthread_t);
|
||||
static inline void cond_queue_enq(pthread_cond_t, pthread_t);
|
||||
static void cond_wait_backout(void *);
|
||||
static inline void check_continuation(struct pthread *,
|
||||
struct pthread_cond *, pthread_mutex_t *);
|
||||
|
||||
/*
|
||||
* Double underscore versions are cancellation points. Single underscore
|
||||
@ -171,8 +174,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int rval = 0;
|
||||
int done = 0;
|
||||
int interrupted = 0;
|
||||
int unlock_mutex = 1;
|
||||
int mutex_locked = 1;
|
||||
int seqno;
|
||||
|
||||
if (cond == NULL)
|
||||
@ -198,10 +200,11 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
* and backed out of the waiting queue prior to executing the
|
||||
* signal handler.
|
||||
*/
|
||||
do {
|
||||
/* Lock the condition variable structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
seqno = (*cond)->c_seqno;
|
||||
do {
|
||||
/*
|
||||
* If the condvar was statically allocated, properly
|
||||
* initialize the tail queue.
|
||||
@ -217,9 +220,6 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
case COND_TYPE_FAST:
|
||||
if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
|
||||
((*cond)->c_mutex != *mutex))) {
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return invalid argument error: */
|
||||
rval = EINVAL;
|
||||
} else {
|
||||
@ -233,15 +233,11 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
*/
|
||||
cond_queue_enq(*cond, curthread);
|
||||
|
||||
/* Remember the mutex and sequence number: */
|
||||
(*cond)->c_mutex = *mutex;
|
||||
seqno = (*cond)->c_seqno;
|
||||
|
||||
/* Wait forever: */
|
||||
curthread->wakeup_time.tv_sec = -1;
|
||||
|
||||
/* Unlock the mutex: */
|
||||
if ((unlock_mutex != 0) &&
|
||||
if (mutex_locked &&
|
||||
((rval = _mutex_cv_unlock(mutex)) != 0)) {
|
||||
/*
|
||||
* Cannot unlock the mutex, so remove
|
||||
@ -249,22 +245,18 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
* variable queue:
|
||||
*/
|
||||
cond_queue_remove(*cond, curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
}
|
||||
else {
|
||||
/* Remember the mutex: */
|
||||
(*cond)->c_mutex = *mutex;
|
||||
|
||||
/*
|
||||
* Don't unlock the mutex the next
|
||||
* time through the loop (if the
|
||||
* thread has to be requeued after
|
||||
* handling a signal).
|
||||
*/
|
||||
unlock_mutex = 0;
|
||||
mutex_locked = 0;
|
||||
|
||||
/*
|
||||
* This thread is active and is in a
|
||||
@ -277,6 +269,7 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
|
||||
/* Remember the CV: */
|
||||
curthread->data.cond = *cond;
|
||||
curthread->sigbackout = cond_wait_backout;
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
/* Unlock the CV structure: */
|
||||
@ -286,8 +279,6 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.cond = NULL;
|
||||
|
||||
/*
|
||||
* XXX - This really isn't a good check
|
||||
* since there can be more than one
|
||||
@ -299,41 +290,39 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
* should be sent "as soon as possible".
|
||||
*/
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
if (THR_IN_SYNCQ(curthread)) {
|
||||
if (done && !THR_IN_CONDQ(curthread)) {
|
||||
/*
|
||||
* Lock the condition variable
|
||||
* while removing the thread.
|
||||
* The thread is dequeued, so
|
||||
* it is safe to clear these.
|
||||
*/
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
curthread->data.cond = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
check_continuation(curthread,
|
||||
NULL, mutex);
|
||||
return (_mutex_cv_lock(mutex));
|
||||
}
|
||||
|
||||
/* Relock the CV structure: */
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* Clear these after taking the lock to
|
||||
* prevent a race condition where a
|
||||
* signal can arrive before dequeueing
|
||||
* the thread.
|
||||
*/
|
||||
curthread->data.cond = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
if (THR_IN_CONDQ(curthread)) {
|
||||
cond_queue_remove(*cond,
|
||||
curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the interrupted flag; locking
|
||||
* the mutex may destroy it.
|
||||
*/
|
||||
interrupted = curthread->interrupted;
|
||||
|
||||
/*
|
||||
* Note that even though this thread may
|
||||
* have been canceled, POSIX requires
|
||||
* that the mutex be reaquired prior to
|
||||
* cancellation.
|
||||
*/
|
||||
if (done || interrupted) {
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
unlock_mutex = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -341,18 +330,21 @@ _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
|
||||
/* Trap invalid condition variable types: */
|
||||
default:
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((interrupted != 0) && (curthread->continuation != NULL))
|
||||
curthread->continuation((void *) curthread);
|
||||
check_continuation(curthread, *cond,
|
||||
mutex_locked ? NULL : mutex);
|
||||
} while ((done == 0) && (rval == 0));
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
if (mutex_locked == 0)
|
||||
_mutex_cv_lock(mutex);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
}
|
||||
@ -378,8 +370,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
struct pthread *curthread = _get_curthread();
|
||||
int rval = 0;
|
||||
int done = 0;
|
||||
int interrupted = 0;
|
||||
int unlock_mutex = 1;
|
||||
int mutex_locked = 1;
|
||||
int seqno;
|
||||
|
||||
THR_ASSERT(curthread->locklevel == 0,
|
||||
@ -407,10 +398,11 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
* and backed out of the waiting queue prior to executing the
|
||||
* signal handler.
|
||||
*/
|
||||
do {
|
||||
/* Lock the condition variable structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Lock the condition variable structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
|
||||
seqno = (*cond)->c_seqno;
|
||||
do {
|
||||
/*
|
||||
* If the condvar was statically allocated, properly
|
||||
* initialize the tail queue.
|
||||
@ -428,9 +420,6 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
((*cond)->c_mutex != *mutex))) {
|
||||
/* Return invalid argument error: */
|
||||
rval = EINVAL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
} else {
|
||||
/* Set the wakeup time: */
|
||||
curthread->wakeup_time.tv_sec = abstime->tv_sec;
|
||||
@ -447,12 +436,8 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
*/
|
||||
cond_queue_enq(*cond, curthread);
|
||||
|
||||
/* Remember the mutex and sequence number: */
|
||||
(*cond)->c_mutex = *mutex;
|
||||
seqno = (*cond)->c_seqno;
|
||||
|
||||
/* Unlock the mutex: */
|
||||
if ((unlock_mutex != 0) &&
|
||||
if (mutex_locked &&
|
||||
((rval = _mutex_cv_unlock(mutex)) != 0)) {
|
||||
/*
|
||||
* Cannot unlock the mutex; remove the
|
||||
@ -460,21 +445,17 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
* variable queue:
|
||||
*/
|
||||
cond_queue_remove(*cond, curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
} else {
|
||||
/* Remember the mutex: */
|
||||
(*cond)->c_mutex = *mutex;
|
||||
|
||||
/*
|
||||
* Don't unlock the mutex the next
|
||||
* time through the loop (if the
|
||||
* thread has to be requeued after
|
||||
* handling a signal).
|
||||
*/
|
||||
unlock_mutex = 0;
|
||||
mutex_locked = 0;
|
||||
|
||||
/*
|
||||
* This thread is active and is in a
|
||||
@ -487,6 +468,7 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
|
||||
/* Remember the CV: */
|
||||
curthread->data.cond = *cond;
|
||||
curthread->sigbackout = cond_wait_backout;
|
||||
THR_SCHED_UNLOCK(curthread, curthread);
|
||||
|
||||
/* Unlock the CV structure: */
|
||||
@ -496,8 +478,6 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.cond = NULL;
|
||||
|
||||
/*
|
||||
* XXX - This really isn't a good check
|
||||
* since there can be more than one
|
||||
@ -509,38 +489,45 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
* should be sent "as soon as possible".
|
||||
*/
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
if (done && !THR_IN_CONDQ(curthread)) {
|
||||
/*
|
||||
* The thread is dequeued, so
|
||||
* it is safe to clear these.
|
||||
*/
|
||||
curthread->data.cond = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
check_continuation(curthread,
|
||||
NULL, mutex);
|
||||
return (_mutex_cv_lock(mutex));
|
||||
}
|
||||
|
||||
/* Relock the CV structure: */
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
/*
|
||||
* Clear these after taking the lock to
|
||||
* prevent a race condition where a
|
||||
* signal can arrive before dequeueing
|
||||
* the thread.
|
||||
*/
|
||||
curthread->data.cond = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
|
||||
done = (seqno != (*cond)->c_seqno);
|
||||
|
||||
if (THR_IN_CONDQ(curthread)) {
|
||||
/*
|
||||
* Lock the condition variable
|
||||
* while removing the thread.
|
||||
*/
|
||||
THR_LOCK_ACQUIRE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
|
||||
cond_queue_remove(*cond,
|
||||
curthread);
|
||||
|
||||
/* Check for no more waiters: */
|
||||
if (TAILQ_FIRST(&(*cond)->c_queue) == NULL)
|
||||
(*cond)->c_mutex = NULL;
|
||||
|
||||
THR_LOCK_RELEASE(curthread,
|
||||
&(*cond)->c_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the interrupted flag; locking
|
||||
* the mutex may destroy it.
|
||||
*/
|
||||
interrupted = curthread->interrupted;
|
||||
if (curthread->timeout != 0) {
|
||||
/* The wait timedout. */
|
||||
rval = ETIMEDOUT;
|
||||
(void)_mutex_cv_lock(mutex);
|
||||
} else if (interrupted || done) {
|
||||
rval = _mutex_cv_lock(mutex);
|
||||
unlock_mutex = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -548,18 +535,21 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
||||
|
||||
/* Trap invalid condition variable types: */
|
||||
default:
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
/* Return an invalid argument error: */
|
||||
rval = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((interrupted != 0) && (curthread->continuation != NULL))
|
||||
curthread->continuation((void *)curthread);
|
||||
check_continuation(curthread, *cond,
|
||||
mutex_locked ? NULL : mutex);
|
||||
} while ((done == 0) && (rval == 0));
|
||||
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
|
||||
|
||||
if (mutex_locked == 0)
|
||||
_mutex_cv_lock(mutex);
|
||||
|
||||
/* Return the completion status: */
|
||||
return (rval);
|
||||
}
|
||||
@ -615,6 +605,7 @@ _pthread_cond_signal(pthread_cond_t * cond)
|
||||
!= NULL) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
cond_queue_remove(*cond, pthread);
|
||||
pthread->sigbackout = NULL;
|
||||
if ((pthread->kseg == curthread->kseg) &&
|
||||
(pthread->active_priority >
|
||||
curthread->active_priority))
|
||||
@ -681,6 +672,7 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
!= NULL) {
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
cond_queue_remove(*cond, pthread);
|
||||
pthread->sigbackout = NULL;
|
||||
if ((pthread->kseg == curthread->kseg) &&
|
||||
(pthread->active_priority >
|
||||
curthread->active_priority))
|
||||
@ -712,9 +704,31 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
|
||||
|
||||
__strong_reference(_pthread_cond_broadcast, _thr_cond_broadcast);
|
||||
|
||||
void
|
||||
_cond_wait_backout(struct pthread *curthread)
|
||||
static inline void
|
||||
check_continuation(struct pthread *curthread, struct pthread_cond *cond,
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
if ((curthread->interrupted != 0) &&
|
||||
(curthread->continuation != NULL)) {
|
||||
if (cond != NULL)
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &cond->c_lock);
|
||||
/*
|
||||
* Note that even though this thread may have been
|
||||
* canceled, POSIX requires that the mutex be
|
||||
* reaquired prior to cancellation.
|
||||
*/
|
||||
if (mutex != NULL)
|
||||
_mutex_cv_lock(mutex);
|
||||
curthread->continuation((void *) curthread);
|
||||
PANIC("continuation returned in pthread_cond_wait.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
cond_wait_backout(void *arg)
|
||||
{
|
||||
struct pthread *curthread = (struct pthread *)arg;
|
||||
pthread_cond_t cond;
|
||||
|
||||
cond = curthread->data.cond;
|
||||
@ -740,6 +754,8 @@ _cond_wait_backout(struct pthread *curthread)
|
||||
/* Unlock the condition variable structure: */
|
||||
THR_LOCK_RELEASE(curthread, &cond->c_lock);
|
||||
}
|
||||
/* No need to call this again. */
|
||||
curthread->sigbackout = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -171,9 +171,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
/* No thread is wanting to join to this one: */
|
||||
new_thread->joiner = NULL;
|
||||
|
||||
/* Initialize the signal frame: */
|
||||
new_thread->curframe = NULL;
|
||||
|
||||
/*
|
||||
* Initialize the machine context.
|
||||
* Enter a critical region to get consistent context.
|
||||
@ -235,6 +232,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
new_thread->cleanup = NULL;
|
||||
new_thread->flags = 0;
|
||||
new_thread->tlflags = 0;
|
||||
new_thread->sigbackout = NULL;
|
||||
new_thread->continuation = NULL;
|
||||
new_thread->wakeup_time.tv_sec = -1;
|
||||
new_thread->lock_switch = 0;
|
||||
|
@ -43,12 +43,6 @@
|
||||
#include "libc_private.h"
|
||||
#include "thr_private.h"
|
||||
|
||||
/*
|
||||
* For a while, allow libpthread to work with a libc that doesn't
|
||||
* export the malloc lock.
|
||||
*/
|
||||
#pragma weak __malloc_lock
|
||||
|
||||
__weak_reference(_fork, fork);
|
||||
|
||||
pid_t
|
||||
@ -60,11 +54,21 @@ _fork(void)
|
||||
pid_t ret;
|
||||
int errsave;
|
||||
|
||||
if (!_kse_isthreaded())
|
||||
return (__sys_fork());
|
||||
|
||||
curthread = _get_curthread();
|
||||
|
||||
if (!_kse_isthreaded()) {
|
||||
SIGFILLSET(sigset);
|
||||
__sys_sigprocmask(SIG_SETMASK, &sigset, &oldset);
|
||||
ret = __sys_fork();
|
||||
if (ret == 0)
|
||||
/* Child */
|
||||
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask,
|
||||
NULL);
|
||||
else
|
||||
__sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Masks all signals until we reach a safe point in
|
||||
* _kse_single_thread, and the signal masks will be
|
||||
@ -86,7 +90,7 @@ _fork(void)
|
||||
}
|
||||
|
||||
/* Fork a new process: */
|
||||
if ((_kse_isthreaded() != 0) && (__malloc_lock != NULL)) {
|
||||
if (_kse_isthreaded() != 0) {
|
||||
_spinlock(__malloc_lock);
|
||||
}
|
||||
if ((ret = __sys_fork()) == 0) {
|
||||
|
@ -391,6 +391,7 @@ init_main_thread(struct pthread *thread)
|
||||
thread->specific = NULL;
|
||||
thread->cleanup = NULL;
|
||||
thread->flags = 0;
|
||||
thread->sigbackout = NULL;
|
||||
thread->continuation = NULL;
|
||||
|
||||
thread->state = PS_RUNNING;
|
||||
|
@ -56,7 +56,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include "thr_private.h"
|
||||
#include "libc_private.h"
|
||||
|
||||
/*#define DEBUG_THREAD_KERN */
|
||||
/* #define DEBUG_THREAD_KERN */
|
||||
#ifdef DEBUG_THREAD_KERN
|
||||
#define DBG_MSG stdout_debug
|
||||
#else
|
||||
@ -165,8 +165,7 @@ static struct kse_mailbox *kse_wakeup_one(struct pthread *thread);
|
||||
static void thr_cleanup(struct kse *kse, struct pthread *curthread);
|
||||
static void thr_link(struct pthread *thread);
|
||||
static void thr_resume_wrapper(int sig, siginfo_t *, ucontext_t *);
|
||||
static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
|
||||
struct pthread_sigframe *psf);
|
||||
static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp);
|
||||
static int thr_timedout(struct pthread *thread, struct timespec *curtime);
|
||||
static void thr_unlink(struct pthread *thread);
|
||||
static void thr_destroy(struct pthread *curthread, struct pthread *thread);
|
||||
@ -352,6 +351,9 @@ _kse_single_thread(struct pthread *curthread)
|
||||
curthread->kse->k_kcb->kcb_kmbx.km_curthread = NULL;
|
||||
curthread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
|
||||
|
||||
/* After a fork(), there child should have no pending signals. */
|
||||
sigemptyset(&curthread->sigpend);
|
||||
|
||||
/*
|
||||
* Restore signal mask early, so any memory problems could
|
||||
* dump core.
|
||||
@ -615,13 +617,12 @@ _thr_sched_switch(struct pthread *curthread)
|
||||
void
|
||||
_thr_sched_switch_unlocked(struct pthread *curthread)
|
||||
{
|
||||
struct pthread_sigframe psf;
|
||||
struct kse *curkse;
|
||||
volatile int resume_once = 0;
|
||||
ucontext_t *uc;
|
||||
|
||||
/* We're in the scheduler, 5 by 5: */
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
|
||||
curthread->need_switchout = 1; /* The thread yielded on its own. */
|
||||
curthread->critical_yield = 0; /* No need to yield anymore. */
|
||||
@ -629,14 +630,6 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
|
||||
/* Thread can unlock the scheduler lock. */
|
||||
curthread->lock_switch = 1;
|
||||
|
||||
/*
|
||||
* The signal frame is allocated off the stack because
|
||||
* a thread can be interrupted by other signals while
|
||||
* it is running down pending signals.
|
||||
*/
|
||||
psf.psf_valid = 0;
|
||||
curthread->curframe = &psf;
|
||||
|
||||
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
|
||||
kse_sched_single(&curkse->k_kcb->kcb_kmbx);
|
||||
else {
|
||||
@ -657,19 +650,12 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
|
||||
_thread_enter_uts(curthread->tcb, curkse->k_kcb);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is ugly we must increase critical count, because we
|
||||
* have a frame saved, we must backout state in psf
|
||||
* before we can process signals.
|
||||
*/
|
||||
curthread->critical_count += psf.psf_valid;
|
||||
|
||||
/*
|
||||
* Unlock the scheduling queue and leave the
|
||||
* critical region.
|
||||
*/
|
||||
/* Don't trust this after a switch! */
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
|
||||
curthread->lock_switch = 0;
|
||||
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
|
||||
@ -678,16 +664,14 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
|
||||
/*
|
||||
* This thread is being resumed; check for cancellations.
|
||||
*/
|
||||
if ((psf.psf_valid ||
|
||||
((curthread->check_pending || THR_NEED_ASYNC_CANCEL(curthread))
|
||||
&& !THR_IN_CRITICAL(curthread)))) {
|
||||
if (THR_NEED_ASYNC_CANCEL(curthread) && !THR_IN_CRITICAL(curthread)) {
|
||||
uc = alloca(sizeof(ucontext_t));
|
||||
resume_once = 0;
|
||||
THR_GETCONTEXT(uc);
|
||||
if (resume_once == 0) {
|
||||
resume_once = 1;
|
||||
curthread->check_pending = 0;
|
||||
thr_resume_check(curthread, uc, &psf);
|
||||
thr_resume_check(curthread, uc);
|
||||
}
|
||||
}
|
||||
THR_ACTIVATE_LAST_LOCK(curthread);
|
||||
@ -888,9 +872,6 @@ kse_sched_single(struct kse_mailbox *kmbx)
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove the frame reference. */
|
||||
curthread->curframe = NULL;
|
||||
|
||||
if (curthread->lock_switch == 0) {
|
||||
/* Unlock the scheduling queue. */
|
||||
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
|
||||
@ -925,7 +906,6 @@ kse_sched_multi(struct kse_mailbox *kmbx)
|
||||
{
|
||||
struct kse *curkse;
|
||||
struct pthread *curthread, *td_wait;
|
||||
struct pthread_sigframe *curframe;
|
||||
int ret;
|
||||
|
||||
curkse = (struct kse *)kmbx->km_udata;
|
||||
@ -980,6 +960,8 @@ kse_sched_multi(struct kse_mailbox *kmbx)
|
||||
* will be cleared.
|
||||
*/
|
||||
curthread->blocked = 1;
|
||||
DBG_MSG("Running thread %p is now blocked in kernel.\n",
|
||||
curthread);
|
||||
}
|
||||
|
||||
/* Check for any unblocked threads in the kernel. */
|
||||
@ -1085,10 +1067,6 @@ kse_sched_multi(struct kse_mailbox *kmbx)
|
||||
/* Mark the thread active. */
|
||||
curthread->active = 1;
|
||||
|
||||
/* Remove the frame reference. */
|
||||
curframe = curthread->curframe;
|
||||
curthread->curframe = NULL;
|
||||
|
||||
/*
|
||||
* The thread's current signal frame will only be NULL if it
|
||||
* is being resumed after being blocked in the kernel. In
|
||||
@ -1096,7 +1074,7 @@ kse_sched_multi(struct kse_mailbox *kmbx)
|
||||
* signals or needs a cancellation check, we need to add a
|
||||
* signal frame to the thread's context.
|
||||
*/
|
||||
if ((curframe == NULL) && (curthread->state == PS_RUNNING) &&
|
||||
if (curthread->lock_switch == 0 && curthread->state == PS_RUNNING &&
|
||||
(curthread->check_pending != 0 ||
|
||||
THR_NEED_ASYNC_CANCEL(curthread)) &&
|
||||
!THR_IN_CRITICAL(curthread)) {
|
||||
@ -1136,10 +1114,10 @@ thr_resume_wrapper(int sig, siginfo_t *siginfo, ucontext_t *ucp)
|
||||
DBG_MSG(">>> sig wrapper\n");
|
||||
if (curthread->lock_switch)
|
||||
PANIC("thr_resume_wrapper, lock_switch != 0\n");
|
||||
thr_resume_check(curthread, ucp, NULL);
|
||||
thr_resume_check(curthread, ucp);
|
||||
errno = err_save;
|
||||
_kse_critical_enter();
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
curthread->tcb->tcb_tmbx.tm_context = *ucp;
|
||||
ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1);
|
||||
if (ret != 0)
|
||||
@ -1149,10 +1127,9 @@ thr_resume_wrapper(int sig, siginfo_t *siginfo, ucontext_t *ucp)
|
||||
}
|
||||
|
||||
static void
|
||||
thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
|
||||
struct pthread_sigframe *psf)
|
||||
thr_resume_check(struct pthread *curthread, ucontext_t *ucp)
|
||||
{
|
||||
_thr_sig_rundown(curthread, ucp, psf);
|
||||
_thr_sig_rundown(curthread, ucp);
|
||||
|
||||
if (THR_NEED_ASYNC_CANCEL(curthread))
|
||||
pthread_testcancel();
|
||||
|
@ -85,26 +85,26 @@ static void mutex_rescan_owned (struct pthread *, struct pthread *,
|
||||
static inline pthread_t mutex_queue_deq(pthread_mutex_t);
|
||||
static inline void mutex_queue_remove(pthread_mutex_t, pthread_t);
|
||||
static inline void mutex_queue_enq(pthread_mutex_t, pthread_t);
|
||||
|
||||
static void mutex_lock_backout(void *arg);
|
||||
|
||||
static struct pthread_mutex_attr static_mutex_attr =
|
||||
PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
|
||||
static pthread_mutexattr_t static_mattr = &static_mutex_attr;
|
||||
|
||||
/* Single underscore versions provided for libc internal usage: */
|
||||
__weak_reference(__pthread_mutex_init, pthread_mutex_init);
|
||||
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
|
||||
__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
|
||||
__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
|
||||
|
||||
/* No difference between libc and application usage of these: */
|
||||
__weak_reference(_pthread_mutex_init, pthread_mutex_init);
|
||||
__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
|
||||
__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
|
||||
|
||||
|
||||
|
||||
int
|
||||
_pthread_mutex_init(pthread_mutex_t *mutex,
|
||||
__pthread_mutex_init(pthread_mutex_t *mutex,
|
||||
const pthread_mutexattr_t *mutex_attr)
|
||||
{
|
||||
struct pthread_mutex *pmutex;
|
||||
@ -206,6 +206,22 @@ _pthread_mutex_init(pthread_mutex_t *mutex,
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int
|
||||
_pthread_mutex_init(pthread_mutex_t *mutex,
|
||||
const pthread_mutexattr_t *mutex_attr)
|
||||
{
|
||||
struct pthread_mutex_attr mattr, *mattrp;
|
||||
|
||||
if ((mutex_attr == NULL) || (*mutex_attr == NULL))
|
||||
return (__pthread_mutex_init(mutex, &static_mattr));
|
||||
else {
|
||||
mattr = **mutex_attr;
|
||||
mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
|
||||
mattrp = &mattr;
|
||||
return (__pthread_mutex_init(mutex, &mattrp));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
_thr_mutex_reinit(pthread_mutex_t *mutex)
|
||||
{
|
||||
@ -303,6 +319,7 @@ init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
|
||||
static int
|
||||
mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
|
||||
{
|
||||
int private;
|
||||
int ret = 0;
|
||||
|
||||
THR_ASSERT((mutex != NULL) && (*mutex != NULL),
|
||||
@ -310,6 +327,7 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
|
||||
|
||||
/* Lock the mutex structure: */
|
||||
THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
|
||||
private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
|
||||
|
||||
/*
|
||||
* If the mutex was statically allocated, properly
|
||||
@ -417,6 +435,9 @@ mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret == 0 && private)
|
||||
THR_CRITICAL_ENTER(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
|
||||
|
||||
@ -468,6 +489,7 @@ static int
|
||||
mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
const struct timespec * abstime)
|
||||
{
|
||||
int private;
|
||||
int ret = 0;
|
||||
|
||||
THR_ASSERT((m != NULL) && (*m != NULL),
|
||||
@ -482,6 +504,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
curthread->timeout = 0;
|
||||
curthread->wakeup_time.tv_sec = -1;
|
||||
|
||||
private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
|
||||
|
||||
/*
|
||||
* Enter a loop waiting to become the mutex owner. We need a
|
||||
* loop in case the waiting thread is interrupted by a signal
|
||||
@ -516,6 +540,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
MUTEX_ASSERT_NOT_OWNED(*m);
|
||||
TAILQ_INSERT_TAIL(&curthread->mutexq,
|
||||
(*m), m_qe);
|
||||
if (private)
|
||||
THR_CRITICAL_ENTER(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
@ -539,6 +565,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
*/
|
||||
mutex_queue_enq(*m, curthread);
|
||||
curthread->data.mutex = *m;
|
||||
curthread->sigbackout = mutex_lock_backout;
|
||||
/*
|
||||
* This thread is active and is in a critical
|
||||
* region (holding the mutex lock); we should
|
||||
@ -554,12 +581,17 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.mutex = NULL;
|
||||
if (THR_IN_MUTEXQ(curthread)) {
|
||||
THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
|
||||
mutex_queue_remove(*m, curthread);
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
}
|
||||
/*
|
||||
* Only clear these after assuring the
|
||||
* thread is dequeued.
|
||||
*/
|
||||
curthread->data.mutex = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -590,6 +622,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
MUTEX_ASSERT_NOT_OWNED(*m);
|
||||
TAILQ_INSERT_TAIL(&curthread->mutexq,
|
||||
(*m), m_qe);
|
||||
if (private)
|
||||
THR_CRITICAL_ENTER(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
@ -613,6 +647,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
*/
|
||||
mutex_queue_enq(*m, curthread);
|
||||
curthread->data.mutex = *m;
|
||||
curthread->sigbackout = mutex_lock_backout;
|
||||
|
||||
/*
|
||||
* This thread is active and is in a critical
|
||||
@ -633,12 +668,17 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.mutex = NULL;
|
||||
if (THR_IN_MUTEXQ(curthread)) {
|
||||
THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
|
||||
mutex_queue_remove(*m, curthread);
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
}
|
||||
/*
|
||||
* Only clear these after assuring the
|
||||
* thread is dequeued.
|
||||
*/
|
||||
curthread->data.mutex = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -679,6 +719,8 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
MUTEX_ASSERT_NOT_OWNED(*m);
|
||||
TAILQ_INSERT_TAIL(&curthread->mutexq,
|
||||
(*m), m_qe);
|
||||
if (private)
|
||||
THR_CRITICAL_ENTER(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
@ -702,6 +744,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
*/
|
||||
mutex_queue_enq(*m, curthread);
|
||||
curthread->data.mutex = *m;
|
||||
curthread->sigbackout = mutex_lock_backout;
|
||||
|
||||
/* Clear any previous error: */
|
||||
curthread->error = 0;
|
||||
@ -722,12 +765,17 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
|
||||
/* Schedule the next thread: */
|
||||
_thr_sched_switch(curthread);
|
||||
|
||||
curthread->data.mutex = NULL;
|
||||
if (THR_IN_MUTEXQ(curthread)) {
|
||||
THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
|
||||
mutex_queue_remove(*m, curthread);
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
}
|
||||
/*
|
||||
* Only clear these after assuring the
|
||||
* thread is dequeued.
|
||||
*/
|
||||
curthread->data.mutex = NULL;
|
||||
curthread->sigbackout = NULL;
|
||||
|
||||
/*
|
||||
* The threads priority may have changed while
|
||||
@ -932,6 +980,13 @@ mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Don't allow evil recursive mutexes for private use
|
||||
* in libc and libpthread.
|
||||
*/
|
||||
if (m->m_flags & MUTEX_FLAGS_PRIVATE)
|
||||
PANIC("Recurse on a private mutex.");
|
||||
|
||||
switch (m->m_type) {
|
||||
/* case PTHREAD_MUTEX_DEFAULT: */
|
||||
case PTHREAD_MUTEX_ERRORCHECK:
|
||||
@ -1135,8 +1190,13 @@ mutex_unlock_common(pthread_mutex_t *m, int add_reference)
|
||||
/* Increment the reference count: */
|
||||
(*m)->m_refcount++;
|
||||
|
||||
/* Leave the critical region if this is a private mutex. */
|
||||
if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
|
||||
THR_CRITICAL_LEAVE(curthread);
|
||||
|
||||
/* Unlock the mutex structure: */
|
||||
THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
|
||||
|
||||
if (kmbx != NULL)
|
||||
kse_wakeup(kmbx);
|
||||
}
|
||||
@ -1511,9 +1571,10 @@ _mutex_unlock_private(pthread_t pthread)
|
||||
* This is called by the current thread when it wants to back out of a
|
||||
* mutex_lock in order to run a signal handler.
|
||||
*/
|
||||
void
|
||||
_mutex_lock_backout(struct pthread *curthread)
|
||||
static void
|
||||
mutex_lock_backout(void *arg)
|
||||
{
|
||||
struct pthread *curthread = (struct pthread *)arg;
|
||||
struct pthread_mutex *m;
|
||||
|
||||
if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
|
||||
@ -1554,6 +1615,8 @@ _mutex_lock_backout(struct pthread *curthread)
|
||||
THR_LOCK_RELEASE(curthread, &m->m_lock);
|
||||
}
|
||||
}
|
||||
/* No need to call this again. */
|
||||
curthread->sigbackout = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1674,13 +1737,16 @@ mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
|
||||
(pthread->active_priority > curthread->active_priority))
|
||||
curthread->critical_yield = 1;
|
||||
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
if (mutex->m_owner == pthread)
|
||||
if (mutex->m_owner == pthread) {
|
||||
/* We're done; a valid owner was found. */
|
||||
if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
|
||||
THR_CRITICAL_ENTER(pthread);
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
break;
|
||||
else
|
||||
/* Get the next thread from the waiting queue: */
|
||||
pthread = TAILQ_NEXT(pthread, sqe);
|
||||
}
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
/* Get the next thread from the waiting queue: */
|
||||
pthread = TAILQ_NEXT(pthread, sqe);
|
||||
}
|
||||
|
||||
if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
|
||||
|
@ -67,6 +67,7 @@ once_cancel_handler(void *arg)
|
||||
int
|
||||
_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
|
||||
{
|
||||
struct pthread *curthread;
|
||||
int wakeup = 0;
|
||||
|
||||
if (once_control->state == ONCE_DONE)
|
||||
@ -81,9 +82,10 @@ _pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
|
||||
if (*(volatile int *)&(once_control->state) == ONCE_NEVER_DONE) {
|
||||
once_control->state = ONCE_IN_PROGRESS;
|
||||
_pthread_mutex_unlock(&once_lock);
|
||||
_pthread_cleanup_push(once_cancel_handler, once_control);
|
||||
curthread = _get_curthread();
|
||||
THR_CLEANUP_PUSH(curthread, once_cancel_handler, once_control);
|
||||
init_routine();
|
||||
_pthread_cleanup_pop(0);
|
||||
THR_CLEANUP_POP(curthread, 0);
|
||||
_pthread_mutex_lock(&once_lock);
|
||||
once_control->state = ONCE_DONE;
|
||||
wakeup = 1;
|
||||
|
@ -416,8 +416,24 @@ struct pthread_cleanup {
|
||||
struct pthread_cleanup *next;
|
||||
void (*routine) ();
|
||||
void *routine_arg;
|
||||
int onstack;
|
||||
};
|
||||
|
||||
#define THR_CLEANUP_PUSH(td, func, arg) { \
|
||||
struct pthread_cleanup __cup; \
|
||||
\
|
||||
__cup.routine = func; \
|
||||
__cup.routine_arg = arg; \
|
||||
__cup.onstack = 1; \
|
||||
__cup.next = (td)->cleanup; \
|
||||
(td)->cleanup = &__cup;
|
||||
|
||||
#define THR_CLEANUP_POP(td, exec) \
|
||||
(td)->cleanup = __cup.next; \
|
||||
if ((exec) != 0) \
|
||||
__cup.routine(__cup.routine_arg); \
|
||||
}
|
||||
|
||||
struct pthread_atfork {
|
||||
TAILQ_ENTRY(pthread_atfork) qe;
|
||||
void (*prepare)(void);
|
||||
@ -573,6 +589,7 @@ struct pthread_sigframe {
|
||||
sigset_t psf_sigset;
|
||||
sigset_t psf_sigmask;
|
||||
int psf_seqno;
|
||||
thread_continuation_t psf_continuation;
|
||||
};
|
||||
|
||||
struct join_status {
|
||||
@ -645,8 +662,8 @@ struct pthread {
|
||||
/*
|
||||
* Used for tracking delivery of signal handlers.
|
||||
*/
|
||||
struct pthread_sigframe *curframe;
|
||||
siginfo_t *siginfo;
|
||||
thread_continuation_t sigbackout;
|
||||
|
||||
/*
|
||||
* Cancelability flags - the lower 2 bits are used by cancel
|
||||
@ -1070,7 +1087,6 @@ SCLASS int _thr_debug_flags SCLASS_PRESET(0);
|
||||
*/
|
||||
__BEGIN_DECLS
|
||||
int _cond_reinit(pthread_cond_t *);
|
||||
void _cond_wait_backout(struct pthread *);
|
||||
struct kse *_kse_alloc(struct pthread *, int sys_scope);
|
||||
kse_critical_t _kse_critical_enter(void);
|
||||
void _kse_critical_leave(kse_critical_t);
|
||||
@ -1085,7 +1101,6 @@ int _kse_setthreaded(int);
|
||||
void _kseg_free(struct kse_group *);
|
||||
int _mutex_cv_lock(pthread_mutex_t *);
|
||||
int _mutex_cv_unlock(pthread_mutex_t *);
|
||||
void _mutex_lock_backout(struct pthread *);
|
||||
void _mutex_notify_priochange(struct pthread *, struct pthread *, int);
|
||||
int _mutex_reinit(struct pthread_mutex *);
|
||||
void _mutex_unlock_private(struct pthread *);
|
||||
@ -1148,8 +1163,7 @@ void _thr_set_timeout(const struct timespec *);
|
||||
void _thr_seterrno(struct pthread *, int);
|
||||
void _thr_sig_handler(int, siginfo_t *, ucontext_t *);
|
||||
void _thr_sig_check_pending(struct pthread *);
|
||||
void _thr_sig_rundown(struct pthread *, ucontext_t *,
|
||||
struct pthread_sigframe *);
|
||||
void _thr_sig_rundown(struct pthread *, ucontext_t *);
|
||||
void _thr_sig_send(struct pthread *pthread, int sig);
|
||||
void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
|
||||
void _thr_spinlock_init(void);
|
||||
|
@ -123,7 +123,7 @@ _sem_init(sem_t *sem, int pshared, unsigned int value)
|
||||
{
|
||||
semid_t semid;
|
||||
|
||||
semid = SEM_USER;
|
||||
semid = (semid_t)SEM_USER;
|
||||
if ((pshared != 0) && (ksem_init(&semid, value) != 0))
|
||||
return (-1);
|
||||
|
||||
@ -145,8 +145,8 @@ _sem_wait(sem_t *sem)
|
||||
if (sem_check_validity(sem) != 0)
|
||||
return (-1);
|
||||
|
||||
curthread = _get_curthread();
|
||||
if ((*sem)->syssem != 0) {
|
||||
curthread = _get_curthread();
|
||||
_thr_cancel_enter(curthread);
|
||||
retval = ksem_wait((*sem)->semid);
|
||||
_thr_cancel_leave(curthread, retval != 0);
|
||||
@ -157,9 +157,9 @@ _sem_wait(sem_t *sem)
|
||||
|
||||
while ((*sem)->count <= 0) {
|
||||
(*sem)->nwaiters++;
|
||||
pthread_cleanup_push(decrease_nwaiters, sem);
|
||||
THR_CLEANUP_PUSH(curthread, decrease_nwaiters, sem);
|
||||
pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
|
||||
pthread_cleanup_pop(0);
|
||||
THR_CLEANUP_POP(curthread, 0);
|
||||
(*sem)->nwaiters--;
|
||||
}
|
||||
(*sem)->count--;
|
||||
|
@ -43,17 +43,15 @@
|
||||
#include "thr_private.h"
|
||||
|
||||
/* Prototypes: */
|
||||
static void build_siginfo(siginfo_t *info, int signo);
|
||||
static inline void build_siginfo(siginfo_t *info, int signo);
|
||||
#ifndef SYSTEM_SCOPE_ONLY
|
||||
static struct pthread *thr_sig_find(struct kse *curkse, int sig,
|
||||
siginfo_t *info);
|
||||
static void handle_special_signals(struct kse *curkse, int sig);
|
||||
#endif
|
||||
static void thr_sigframe_add(struct pthread *thread);
|
||||
static void thr_sigframe_restore(struct pthread *thread,
|
||||
struct pthread_sigframe *psf);
|
||||
static void thr_sigframe_save(struct pthread *thread,
|
||||
struct pthread_sigframe *psf);
|
||||
static inline void thr_sigframe_restore(struct pthread *thread,
|
||||
struct pthread_sigframe *psf);
|
||||
static inline void thr_sigframe_save(struct pthread *thread,
|
||||
struct pthread_sigframe *psf);
|
||||
|
||||
#define SA_KILL 0x01 /* terminates process by default */
|
||||
#define SA_STOP 0x02
|
||||
@ -254,9 +252,6 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
|
||||
|
||||
DBG_MSG(">>> _thr_sig_dispatch(%d)\n", sig);
|
||||
|
||||
/* Some signals need special handling: */
|
||||
handle_special_signals(curkse, sig);
|
||||
|
||||
/* Check if the signal requires a dump of thread information: */
|
||||
if (sig == SIGINFO) {
|
||||
/* Dump thread information to file: */
|
||||
@ -306,11 +301,14 @@ typedef void (*ohandler)(int sig, int code,
|
||||
void
|
||||
_thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
||||
{
|
||||
struct pthread_sigframe psf;
|
||||
__siginfohandler_t *sigfunc;
|
||||
struct pthread *curthread;
|
||||
struct kse *curkse;
|
||||
struct sigaction act;
|
||||
int sa_flags, err_save, intr_save, timeout_save;
|
||||
int sa_flags, err_save;
|
||||
|
||||
err_save = errno;
|
||||
|
||||
DBG_MSG(">>> _thr_sig_handler(%d)\n", sig);
|
||||
|
||||
@ -319,15 +317,18 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
||||
PANIC("No current thread.\n");
|
||||
if (!(curthread->attr.flags & PTHREAD_SCOPE_SYSTEM))
|
||||
PANIC("Thread is not system scope.\n");
|
||||
if (curthread->flags & THR_FLAGS_EXITING)
|
||||
if (curthread->flags & THR_FLAGS_EXITING) {
|
||||
errno = err_save;
|
||||
return;
|
||||
}
|
||||
|
||||
curkse = _get_curkse();
|
||||
/*
|
||||
* If thread is in critical region or if thread is on
|
||||
* the way of state transition, then latch signal into buffer.
|
||||
*/
|
||||
if (_kse_in_critical() || THR_IN_CRITICAL(curthread) ||
|
||||
(curthread->state != PS_RUNNING && curthread->curframe == NULL)) {
|
||||
curthread->state != PS_RUNNING) {
|
||||
DBG_MSG(">>> _thr_sig_handler(%d) in critical\n", sig);
|
||||
curthread->siginfo[sig-1] = *info;
|
||||
curthread->check_pending = 1;
|
||||
@ -341,18 +342,24 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
||||
*/
|
||||
if (KSE_IS_IDLE(curkse))
|
||||
kse_wakeup(&curkse->k_kcb->kcb_kmbx);
|
||||
errno = err_save;
|
||||
return;
|
||||
}
|
||||
|
||||
/* It is now safe to invoke signal handler */
|
||||
err_save = errno;
|
||||
timeout_save = curthread->timeout;
|
||||
intr_save = curthread->interrupted;
|
||||
/* Check if the signal requires a dump of thread information: */
|
||||
if (sig == SIGINFO) {
|
||||
/* Dump thread information to file: */
|
||||
_thread_dump_info();
|
||||
}
|
||||
|
||||
/* Check the threads previous state: */
|
||||
curthread->critical_count++;
|
||||
if (curthread->sigbackout != NULL)
|
||||
curthread->sigbackout((void *)curthread);
|
||||
curthread->critical_count--;
|
||||
thr_sigframe_save(curthread, &psf);
|
||||
THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared.");
|
||||
|
||||
_kse_critical_enter();
|
||||
/* Get a fresh copy of signal mask */
|
||||
__sys_sigprocmask(SIG_BLOCK, NULL, &curthread->sigmask);
|
||||
@ -395,14 +402,16 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
errno = err_save;
|
||||
curthread->timeout = timeout_save;
|
||||
curthread->interrupted = intr_save;
|
||||
_kse_critical_enter();
|
||||
curthread->sigmask = ucp->uc_sigmask;
|
||||
SIG_CANTMASK(curthread->sigmask);
|
||||
_kse_critical_leave(&curthread->tcb->tcb_tmbx);
|
||||
|
||||
thr_sigframe_restore(curthread, &psf);
|
||||
|
||||
DBG_MSG("<<< _thr_sig_handler(%d)\n", sig);
|
||||
|
||||
errno = err_save;
|
||||
}
|
||||
|
||||
struct sighandle_info {
|
||||
@ -439,7 +448,7 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
|
||||
|
||||
if (!_kse_in_critical())
|
||||
PANIC("thr_sig_invoke_handler without in critical\n");
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
/*
|
||||
* Check that a custom handler is installed and if
|
||||
* the signal is not blocked:
|
||||
@ -491,7 +500,7 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info,
|
||||
|
||||
_kse_critical_enter();
|
||||
/* Don't trust after critical leave/enter */
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
|
||||
/*
|
||||
* Restore the thread's signal mask.
|
||||
@ -752,7 +761,7 @@ thr_sig_find(struct kse *curkse, int sig, siginfo_t *info)
|
||||
}
|
||||
#endif /* ! SYSTEM_SCOPE_ONLY */
|
||||
|
||||
static void
|
||||
static inline void
|
||||
build_siginfo(siginfo_t *info, int signo)
|
||||
{
|
||||
bzero(info, sizeof(*info));
|
||||
@ -765,54 +774,35 @@ build_siginfo(siginfo_t *info, int signo)
|
||||
* It should only be called from the context of the thread.
|
||||
*/
|
||||
void
|
||||
_thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
|
||||
struct pthread_sigframe *psf)
|
||||
_thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp)
|
||||
{
|
||||
int interrupted = curthread->interrupted;
|
||||
int timeout = curthread->timeout;
|
||||
struct pthread_sigframe psf;
|
||||
siginfo_t siginfo;
|
||||
int i;
|
||||
int i, err_save;
|
||||
kse_critical_t crit;
|
||||
struct kse *curkse;
|
||||
sigset_t sigmask;
|
||||
|
||||
err_save = errno;
|
||||
|
||||
DBG_MSG(">>> thr_sig_rundown (%p)\n", curthread);
|
||||
|
||||
/* Check the threads previous state: */
|
||||
if ((psf != NULL) && (psf->psf_valid != 0)) {
|
||||
/*
|
||||
* Do a little cleanup handling for those threads in
|
||||
* queues before calling the signal handler. Signals
|
||||
* for these threads are temporarily blocked until
|
||||
* after cleanup handling.
|
||||
*/
|
||||
switch (psf->psf_state) {
|
||||
case PS_COND_WAIT:
|
||||
_cond_wait_backout(curthread);
|
||||
psf->psf_state = PS_RUNNING;
|
||||
break;
|
||||
|
||||
case PS_MUTEX_WAIT:
|
||||
_mutex_lock_backout(curthread);
|
||||
psf->psf_state = PS_RUNNING;
|
||||
break;
|
||||
|
||||
case PS_RUNNING:
|
||||
break;
|
||||
curthread->critical_count++;
|
||||
if (curthread->sigbackout != NULL)
|
||||
curthread->sigbackout((void *)curthread);
|
||||
curthread->critical_count--;
|
||||
|
||||
default:
|
||||
psf->psf_state = PS_RUNNING;
|
||||
break;
|
||||
}
|
||||
/* XXX see comment in thr_sched_switch_unlocked */
|
||||
curthread->critical_count--;
|
||||
}
|
||||
THR_ASSERT(!(curthread->sigbackout), "sigbackout was not cleared.");
|
||||
THR_ASSERT((curthread->state == PS_RUNNING), "state is not PS_RUNNING");
|
||||
|
||||
thr_sigframe_save(curthread, &psf);
|
||||
/*
|
||||
* Lower the priority before calling the handler in case
|
||||
* it never returns (longjmps back):
|
||||
*/
|
||||
crit = _kse_critical_enter();
|
||||
curkse = _get_curkse();
|
||||
curkse = curthread->kse;
|
||||
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
|
||||
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
|
||||
curthread->active_priority &= ~THR_SIGNAL_PRIORITY;
|
||||
@ -851,9 +841,8 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
|
||||
}
|
||||
}
|
||||
|
||||
if (psf != NULL && psf->psf_valid != 0)
|
||||
thr_sigframe_restore(curthread, psf);
|
||||
curkse = _get_curkse();
|
||||
/* Don't trust after signal handling */
|
||||
curkse = curthread->kse;
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
|
||||
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
|
||||
_kse_critical_leave(&curthread->tcb->tcb_tmbx);
|
||||
@ -875,10 +864,10 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp,
|
||||
}
|
||||
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
|
||||
}
|
||||
curthread->interrupted = interrupted;
|
||||
curthread->timeout = timeout;
|
||||
|
||||
DBG_MSG("<<< thr_sig_rundown (%p)\n", curthread);
|
||||
|
||||
thr_sigframe_restore(curthread, &psf);
|
||||
errno = err_save;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -897,7 +886,15 @@ _thr_sig_check_pending(struct pthread *curthread)
|
||||
volatile int once;
|
||||
int errsave;
|
||||
|
||||
if (THR_IN_CRITICAL(curthread))
|
||||
/*
|
||||
* If the thread is in critical region, delay processing signals.
|
||||
* If the thread state is not PS_RUNNING, it might be switching
|
||||
* into UTS and but a THR_LOCK_RELEASE saw check_pending, and it
|
||||
* goes here, in the case we delay processing signals, lets UTS
|
||||
* process complicated things, normally UTS will call _thr_sig_add
|
||||
* to resume the thread, so we needn't repeat doing it here.
|
||||
*/
|
||||
if (THR_IN_CRITICAL(curthread) || curthread->state != PS_RUNNING)
|
||||
return;
|
||||
|
||||
errsave = errno;
|
||||
@ -906,42 +903,11 @@ _thr_sig_check_pending(struct pthread *curthread)
|
||||
if (once == 0) {
|
||||
once = 1;
|
||||
curthread->check_pending = 0;
|
||||
_thr_sig_rundown(curthread, &uc, NULL);
|
||||
_thr_sig_rundown(curthread, &uc);
|
||||
}
|
||||
errno = errsave;
|
||||
}
|
||||
|
||||
#ifndef SYSTEM_SCOPE_ONLY
|
||||
/*
|
||||
* This must be called with upcalls disabled.
|
||||
*/
|
||||
static void
|
||||
handle_special_signals(struct kse *curkse, int sig)
|
||||
{
|
||||
switch (sig) {
|
||||
/*
|
||||
* POSIX says that pending SIGCONT signals are
|
||||
* discarded when one of these signals occurs.
|
||||
*/
|
||||
case SIGTSTP:
|
||||
case SIGTTIN:
|
||||
case SIGTTOU:
|
||||
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
|
||||
SIGDELSET(_thr_proc_sigpending, SIGCONT);
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
|
||||
break;
|
||||
case SIGCONT:
|
||||
KSE_LOCK_ACQUIRE(curkse, &_thread_signal_lock);
|
||||
SIGDELSET(_thr_proc_sigpending, SIGTSTP);
|
||||
SIGDELSET(_thr_proc_sigpending, SIGTTIN);
|
||||
SIGDELSET(_thr_proc_sigpending, SIGTTOU);
|
||||
KSE_LOCK_RELEASE(curkse, &_thread_signal_lock);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif /* ! SYSTEM_SCOPE_ONLY */
|
||||
|
||||
/*
|
||||
* Perform thread specific actions in response to a signal.
|
||||
* This function is only called if there is a handler installed
|
||||
@ -979,11 +945,9 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if (pthread->curframe == NULL ||
|
||||
(pthread->state != PS_SIGWAIT &&
|
||||
SIGISMEMBER(pthread->sigmask, sig)) ||
|
||||
THR_IN_CRITICAL(pthread)) {
|
||||
/* thread is running or signal was being masked */
|
||||
if (pthread->state != PS_SIGWAIT &&
|
||||
SIGISMEMBER(pthread->sigmask, sig)) {
|
||||
/* signal is masked, just add signal to thread. */
|
||||
if (!fromproc) {
|
||||
SIGADDSET(pthread->sigpend, sig);
|
||||
if (info == NULL)
|
||||
@ -996,19 +960,6 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
|
||||
return (NULL);
|
||||
SIGADDSET(pthread->sigpend, sig);
|
||||
}
|
||||
if (!SIGISMEMBER(pthread->sigmask, sig)) {
|
||||
/* A quick path to exit process */
|
||||
if (sigfunc == SIG_DFL && sigprop(sig) & SA_KILL) {
|
||||
kse_thr_interrupt(NULL, KSE_INTR_SIGEXIT, sig);
|
||||
/* Never reach */
|
||||
}
|
||||
pthread->check_pending = 1;
|
||||
if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
|
||||
(pthread->blocked != 0) &&
|
||||
!THR_IN_CRITICAL(pthread))
|
||||
kse_thr_interrupt(&pthread->tcb->tcb_tmbx,
|
||||
restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0);
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* if process signal not exists, just return */
|
||||
@ -1049,7 +1000,6 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
|
||||
/* Possible not in RUNQ and has curframe ? */
|
||||
pthread->active_priority |= THR_SIGNAL_PRIORITY;
|
||||
}
|
||||
suppress_handler = 1;
|
||||
break;
|
||||
/*
|
||||
* States which cannot be interrupted but still require the
|
||||
@ -1115,19 +1065,22 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info)
|
||||
build_siginfo(&pthread->siginfo[sig-1], sig);
|
||||
else if (info != &pthread->siginfo[sig-1])
|
||||
memcpy(&pthread->siginfo[sig-1], info, sizeof(*info));
|
||||
|
||||
pthread->check_pending = 1;
|
||||
if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) &&
|
||||
(pthread->blocked != 0) && !THR_IN_CRITICAL(pthread))
|
||||
kse_thr_interrupt(&pthread->tcb->tcb_tmbx,
|
||||
restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0);
|
||||
if (suppress_handler == 0) {
|
||||
/*
|
||||
* Setup a signal frame and save the current threads
|
||||
* state:
|
||||
*/
|
||||
thr_sigframe_add(pthread);
|
||||
if (pthread->flags & THR_FLAGS_IN_RUNQ)
|
||||
THR_RUNQ_REMOVE(pthread);
|
||||
pthread->active_priority |= THR_SIGNAL_PRIORITY;
|
||||
kmbx = _thr_setrunnable_unlocked(pthread);
|
||||
} else {
|
||||
pthread->check_pending = 1;
|
||||
if (pthread->state != PS_RUNNING) {
|
||||
if (pthread->flags & THR_FLAGS_IN_RUNQ)
|
||||
THR_RUNQ_REMOVE(pthread);
|
||||
pthread->active_priority |= THR_SIGNAL_PRIORITY;
|
||||
kmbx = _thr_setrunnable_unlocked(pthread);
|
||||
}
|
||||
}
|
||||
}
|
||||
return (kmbx);
|
||||
@ -1151,6 +1104,10 @@ _thr_sig_send(struct pthread *pthread, int sig)
|
||||
THR_SCHED_LOCK(curthread, pthread);
|
||||
if (_thread_sigact[sig - 1].sa_handler != SIG_IGN) {
|
||||
kmbx = _thr_sig_add(pthread, sig, NULL);
|
||||
/* Add a preemption point. */
|
||||
if (kmbx == NULL && (curthread->kseg == pthread->kseg) &&
|
||||
(pthread->active_priority > curthread->active_priority))
|
||||
curthread->critical_yield = 1;
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
if (kmbx != NULL)
|
||||
kse_wakeup(kmbx);
|
||||
@ -1161,52 +1118,55 @@ _thr_sig_send(struct pthread *pthread, int sig)
|
||||
*/
|
||||
if (pthread == curthread && curthread->check_pending)
|
||||
_thr_sig_check_pending(curthread);
|
||||
|
||||
} else {
|
||||
THR_SCHED_UNLOCK(curthread, pthread);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
thr_sigframe_add(struct pthread *thread)
|
||||
static inline void
|
||||
thr_sigframe_restore(struct pthread *curthread, struct pthread_sigframe *psf)
|
||||
{
|
||||
if (thread->curframe == NULL)
|
||||
PANIC("Thread doesn't have signal frame ");
|
||||
kse_critical_t crit;
|
||||
struct kse *curkse;
|
||||
|
||||
if (thread->curframe->psf_valid == 0) {
|
||||
thread->curframe->psf_valid = 1;
|
||||
/*
|
||||
* Multiple signals can be added to the same signal
|
||||
* frame. Only save the thread's state the first time.
|
||||
*/
|
||||
thr_sigframe_save(thread, thread->curframe);
|
||||
}
|
||||
THR_THREAD_LOCK(curthread, curthread);
|
||||
curthread->cancelflags = psf->psf_cancelflags;
|
||||
crit = _kse_critical_enter();
|
||||
curkse = curthread->kse;
|
||||
KSE_SCHED_LOCK(curkse, curthread->kseg);
|
||||
curthread->flags = psf->psf_flags;
|
||||
curthread->interrupted = psf->psf_interrupted;
|
||||
curthread->timeout = psf->psf_timeout;
|
||||
curthread->data = psf->psf_wait_data;
|
||||
curthread->wakeup_time = psf->psf_wakeup_time;
|
||||
curthread->continuation = psf->psf_continuation;
|
||||
KSE_SCHED_UNLOCK(curkse, curthread->kseg);
|
||||
_kse_critical_leave(crit);
|
||||
THR_THREAD_UNLOCK(curthread, curthread);
|
||||
}
|
||||
|
||||
static void
|
||||
thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf)
|
||||
static inline void
|
||||
thr_sigframe_save(struct pthread *curthread, struct pthread_sigframe *psf)
|
||||
{
|
||||
if (psf->psf_valid == 0)
|
||||
PANIC("invalid pthread_sigframe\n");
|
||||
thread->flags = psf->psf_flags;
|
||||
thread->cancelflags = psf->psf_cancelflags;
|
||||
thread->interrupted = psf->psf_interrupted;
|
||||
thread->timeout = psf->psf_timeout;
|
||||
thread->state = psf->psf_state;
|
||||
thread->data = psf->psf_wait_data;
|
||||
thread->wakeup_time = psf->psf_wakeup_time;
|
||||
}
|
||||
kse_critical_t crit;
|
||||
struct kse *curkse;
|
||||
|
||||
static void
|
||||
thr_sigframe_save(struct pthread *thread, struct pthread_sigframe *psf)
|
||||
{
|
||||
THR_THREAD_LOCK(curthread, curthread);
|
||||
psf->psf_cancelflags = curthread->cancelflags;
|
||||
crit = _kse_critical_enter();
|
||||
curkse = curthread->kse;
|
||||
KSE_SCHED_LOCK(curkse, curthread->kseg);
|
||||
/* This has to initialize all members of the sigframe. */
|
||||
psf->psf_flags = thread->flags & THR_FLAGS_PRIVATE;
|
||||
psf->psf_cancelflags = thread->cancelflags;
|
||||
psf->psf_interrupted = thread->interrupted;
|
||||
psf->psf_timeout = thread->timeout;
|
||||
psf->psf_state = thread->state;
|
||||
psf->psf_wait_data = thread->data;
|
||||
psf->psf_wakeup_time = thread->wakeup_time;
|
||||
psf->psf_flags = (curthread->flags & (THR_FLAGS_PRIVATE | THR_FLAGS_EXITING));
|
||||
psf->psf_interrupted = curthread->interrupted;
|
||||
psf->psf_timeout = curthread->timeout;
|
||||
psf->psf_wait_data = curthread->data;
|
||||
psf->psf_wakeup_time = curthread->wakeup_time;
|
||||
psf->psf_continuation = curthread->continuation;
|
||||
KSE_SCHED_UNLOCK(curkse, curthread->kseg);
|
||||
_kse_critical_leave(crit);
|
||||
THR_THREAD_UNLOCK(curthread, curthread);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1266,6 +1226,9 @@ _thr_signal_deinit(void)
|
||||
int i;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
/* Clear process pending signals. */
|
||||
sigemptyset(&_thr_proc_sigpending);
|
||||
|
||||
/* Enter a loop to get the existing signal status: */
|
||||
for (i = 1; i <= _SIG_MAXSIG; i++) {
|
||||
/* Check for signals which cannot be trapped: */
|
||||
|
@ -69,6 +69,7 @@ _sigsuspend(const sigset_t *set)
|
||||
/* Wait for a signal: */
|
||||
_thr_sched_switch_unlocked(curthread);
|
||||
} else {
|
||||
curthread->check_pending = 1;
|
||||
THR_UNLOCK_SWITCH(curthread);
|
||||
/* check pending signal I can handle: */
|
||||
_thr_sig_check_pending(curthread);
|
||||
|
@ -49,6 +49,10 @@ struct spinlock_extra {
|
||||
|
||||
static void init_spinlock(spinlock_t *lck);
|
||||
|
||||
static struct pthread_mutex_attr static_mutex_attr =
|
||||
PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
|
||||
static pthread_mutexattr_t static_mattr = &static_mutex_attr;
|
||||
|
||||
static pthread_mutex_t spinlock_static_lock;
|
||||
static struct spinlock_extra extra[MAX_SPINLOCKS];
|
||||
static int spinlock_count = 0;
|
||||
@ -65,7 +69,7 @@ _spinunlock(spinlock_t *lck)
|
||||
struct spinlock_extra *extra;
|
||||
|
||||
extra = (struct spinlock_extra *)lck->fname;
|
||||
pthread_mutex_unlock(&extra->lock);
|
||||
_pthread_mutex_unlock(&extra->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -90,7 +94,7 @@ _spinlock(spinlock_t *lck)
|
||||
if (lck->fname == NULL)
|
||||
init_spinlock(lck);
|
||||
extra = (struct spinlock_extra *)lck->fname;
|
||||
pthread_mutex_lock(&extra->lock);
|
||||
_pthread_mutex_lock(&extra->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -112,13 +116,13 @@ _spinlock_debug(spinlock_t *lck, char *fname, int lineno)
|
||||
static void
|
||||
init_spinlock(spinlock_t *lck)
|
||||
{
|
||||
pthread_mutex_lock(&spinlock_static_lock);
|
||||
_pthread_mutex_lock(&spinlock_static_lock);
|
||||
if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
|
||||
lck->fname = (char *)&extra[spinlock_count];
|
||||
extra[spinlock_count].owner = lck;
|
||||
spinlock_count++;
|
||||
}
|
||||
pthread_mutex_unlock(&spinlock_static_lock);
|
||||
_pthread_mutex_unlock(&spinlock_static_lock);
|
||||
if (lck->fname == NULL)
|
||||
PANIC("Exceeded max spinlocks");
|
||||
}
|
||||
@ -133,10 +137,10 @@ _thr_spinlock_init(void)
|
||||
for (i = 0; i < spinlock_count; i++)
|
||||
_thr_mutex_reinit(&extra[i].lock);
|
||||
} else {
|
||||
if (pthread_mutex_init(&spinlock_static_lock, NULL))
|
||||
if (_pthread_mutex_init(&spinlock_static_lock, &static_mattr))
|
||||
PANIC("Cannot initialize spinlock_static_lock");
|
||||
for (i = 0; i < MAX_SPINLOCKS; i++) {
|
||||
if (pthread_mutex_init(&extra[i].lock, NULL))
|
||||
if (_pthread_mutex_init(&extra[i].lock, &static_mattr))
|
||||
PANIC("Cannot initialize spinlock extra");
|
||||
}
|
||||
initialized = 1;
|
||||
|
Loading…
Reference in New Issue
Block a user