1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-10-19 02:29:40 +00:00

Add code to support thread debugging.

1. Add global varible _libkse_debug, debugger uses the varible to identify
   libpthread. when the varible is written to non-zero by debugger, libpthread
   will take some special action at context switch time, it will check
   TMDF_DOTRUNUSER flags, if a thread has the flags set by debugger, it won't
   be scheduled, when a thread leaves KSE critical region, thread checks
   the flag, if it was set, the thread relinquish CPU.

2. Add pq_first_debug to select a thread allowd to run by debugger.

3. Some names prefixed with _thr are renamed to _thread prefix.

which is allowed to run by debugger.
This commit is contained in:
David Xu 2004-07-13 22:49:58 +00:00
parent 8908521788
commit 566382df0a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=132120
10 changed files with 292 additions and 88 deletions

View File

@ -125,11 +125,11 @@ _pthread_exit(void *status)
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Use thread_list_lock */
_thr_active_threads--;
_thread_active_threads--;
#ifdef SYSTEM_SCOPE_ONLY
if (_thr_active_threads == 0) {
if (_thread_active_threads == 0) {
#else
if (_thr_active_threads == 1) {
if (_thread_active_threads == 1) {
#endif
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);

View File

@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/kse.h>
#include <sys/ptrace.h>
#include <sys/signalvar.h>
#include <sys/queue.h>
#include <machine/atomic.h>
@ -98,7 +99,10 @@ __FBSDID("$FreeBSD$");
_pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)
#define KSE_RUNQ_REMOVE(kse, thrd) \
_pq_remove(&(kse)->k_schedq->sq_runq, thrd)
#define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq)
#define KSE_RUNQ_FIRST(kse) \
((_libkse_debug == 0) ? \
_pq_first(&(kse)->k_schedq->sq_runq) : \
_pq_first_debug(&(kse)->k_schedq->sq_runq))
#define KSE_RUNQ_THREADS(kse) ((kse)->k_schedq->sq_runq.pq_threads)
@ -222,7 +226,7 @@ _kse_single_thread(struct pthread *curthread)
* dump core.
*/
sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
_thr_active_threads = 1;
_thread_active_threads = 1;
/*
* Enter a loop to remove and free all threads other than
@ -355,7 +359,7 @@ _kse_single_thread(struct pthread *curthread)
* dump core.
*/
sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
_thr_active_threads = 1;
_thread_active_threads = 1;
#endif
}
@ -435,6 +439,9 @@ _kse_setthreaded(int threaded)
PANIC("kse_create() failed\n");
return (-1);
}
_thr_initial->tcb->tcb_tmbx.tm_lwp =
_kse_initial->k_kcb->kcb_kmbx.km_lwp;
_thread_activated = 1;
#ifndef SYSTEM_SCOPE_ONLY
/* Set current thread to initial thread */
@ -630,6 +637,19 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
kse_sched_single(&curkse->k_kcb->kcb_kmbx);
else {
if (__predict_false(_libkse_debug != 0)) {
/*
* Because debugger saves single step status in thread
* mailbox's tm_dflags, we can safely clear single
* step status here. the single step status will be
* restored by kse_switchin when the thread is
* switched in again. This also lets uts run in full
* speed.
*/
ptrace(PT_CLEARSTEP, curkse->k_kcb->kcb_kmbx.km_lwp,
(caddr_t) 1, 0);
}
KSE_SET_SWITCH(curkse);
_thread_enter_uts(curthread->tcb, curkse->k_kcb);
}
@ -697,7 +717,7 @@ kse_sched_single(struct kse_mailbox *kmbx)
curkse->k_flags |= KF_INITIALIZED;
first = 1;
curthread->active = 1;
/* Setup kernel signal masks for new thread. */
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
/*
@ -972,7 +992,7 @@ kse_sched_multi(struct kse_mailbox *kmbx)
*/
if (curthread == NULL)
; /* Nothing to do here. */
else if ((curthread->need_switchout == 0) &&
else if ((curthread->need_switchout == 0) && DBG_CAN_RUN(curthread) &&
(curthread->blocked == 0) && (THR_IN_CRITICAL(curthread))) {
/*
* Resume the thread and tell it to yield when
@ -992,8 +1012,10 @@ kse_sched_multi(struct kse_mailbox *kmbx)
if (ret != 0)
PANIC("Can't resume thread in critical region\n");
}
else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0)
else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
curthread->tcb->tcb_tmbx.tm_lwp = 0;
kse_switchout_thread(curkse, curthread);
}
curkse->k_curthread = NULL;
#ifdef DEBUG_THREAD_KERN
@ -2447,7 +2469,7 @@ thr_link(struct pthread *thread)
*/
thread->uniqueid = next_uniqueid++;
THR_LIST_ADD(thread);
_thr_active_threads++;
_thread_active_threads++;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -2465,7 +2487,7 @@ thr_unlink(struct pthread *thread)
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
THR_LIST_REMOVE(thread);
_thr_active_threads--;
_thread_active_threads--;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -2499,3 +2521,27 @@ _thr_hash_find(struct pthread *thread)
return (NULL);
}
void
_thr_debug_check_yield(struct pthread *curthread)
{
/*
* Note that TMDF_DONOTRUNUSER is set after process is suspended.
* When we are being debugged, every suspension in process
* will cause all KSEs to schedule an upcall in kernel, unless the
* KSE is in critical region.
* If the function is being called, it means the KSE is no longer
* in critical region, if the TMDF_DONOTRUNUSER is set by debugger
* before KSE leaves critical region, we will catch it here, else
* if the flag is changed during testing, it also not a problem,
* because the change only occurs after a process suspension event
* occurs. A suspension event will always cause KSE to schedule an
* upcall, in the case, because we are not in critical region,
* upcall will be scheduled sucessfully, the flag will be checked
* again in kse_sched_multi, we won't back until the flag
* is cleared by debugger, the flag will be cleared in next
* suspension event.
*/
if ((curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0 &&
!DBG_CAN_RUN(curthread))
_thr_sched_switch(curthread);
}

View File

@ -242,6 +242,57 @@ _pq_first(pq_queue_t *pq)
return (pthread);
}
/*
* Select a thread which is allowed to run by debugger, we probably
* should merge the function into _pq_first if that function is only
* used by scheduler to select a thread.
*/
pthread_t
_pq_first_debug(pq_queue_t *pq)
{
pq_list_t *pql, *pqlnext = NULL;
pthread_t pthread = NULL;
/*
* Make some assertions when debugging is enabled:
*/
PQ_ASSERT_INACTIVE(pq, "_pq_first: pq_active");
PQ_SET_ACTIVE(pq);
for (pql = TAILQ_FIRST(&pq->pq_queue);
pql != NULL && pthread == NULL; pql = pqlnext) {
if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) {
/*
* The priority list is empty; remove the list
* from the queue.
*/
pqlnext = TAILQ_NEXT(pql, pl_link);
TAILQ_REMOVE(&pq->pq_queue, pql, pl_link);
/* Mark the list as not being in the queue: */
pql->pl_queued = 0;
} else {
/*
* note there may be a suspension event during this
* test, If TMDF_DONOTRUNUSER is set after we tested it,
* we will run the thread, this seems be a problem,
* fortunatly, when we are being debugged, all context
* switch will be done by kse_switchin, that is a
* syscall, kse_switchin will check the flag again,
* the thread will be returned via upcall, so next
* time, UTS won't run the thread.
*/
while (pthread != NULL && !DBG_CAN_RUN(pthread)) {
pthread = TAILQ_NEXT(pthread, pqe);
}
if (pthread == NULL)
pqlnext = TAILQ_NEXT(pql, pl_link);
}
}
PQ_CLEAR_ACTIVE(pq);
return (pthread);
}
static void
pq_insert_prio_list(pq_queue_t *pq, int prio)

View File

@ -430,6 +430,7 @@ struct pthread_attr {
int prio;
int suspend;
#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
#define THR_SIGNAL_THREAD 0x200 /* This is a signal thread */
int flags;
void *arg_attr;
void (*cleanup_attr) ();
@ -582,15 +583,19 @@ struct pthread_specific_elem {
int seqno;
};
struct pthread_key {
volatile int allocated;
volatile int count;
int seqno;
void (*destructor) (void *);
};
#define MAX_THR_LOCKLEVEL 5
/*
* Thread structure.
*/
struct pthread {
/*
* Thread mailbox is first so it cal be aligned properly.
*/
/* Thread control block */
struct tcb *tcb;
/*
@ -816,12 +821,14 @@ struct pthread {
#define THR_YIELD_CHECK(thrd) \
do { \
if (((thrd)->critical_yield != 0) && \
!(THR_IN_CRITICAL(thrd))) \
_thr_sched_switch(thrd); \
else if (((thrd)->check_pending != 0) && \
!(THR_IN_CRITICAL(thrd))) \
_thr_sig_check_pending(thrd); \
if (!THR_IN_CRITICAL(thrd)) { \
if (__predict_false(_libkse_debug)) \
_thr_debug_check_yield(thrd); \
if ((thrd)->critical_yield != 0) \
_thr_sched_switch(thrd); \
if ((thrd)->check_pending != 0) \
_thr_sig_check_pending(thrd); \
} \
} while (0)
#define THR_LOCK_ACQUIRE(thrd, lck) \
@ -882,8 +889,6 @@ do { \
_pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
#define THR_RUNQ_REMOVE(thrd) \
_pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
#define THR_RUNQ_FIRST(thrd) \
_pq_first(&(thrd)->kseg->kg_schedq.sq_runq)
/*
* Macros to insert/remove threads to the all thread list and
@ -964,6 +969,8 @@ do { \
(((thrd)->state == PS_SUSPENDED) || \
(((thrd)->flags & THR_FLAGS_SUSPENDED) != 0))
#define THR_IS_EXITING(thrd) (((thrd)->flags & THR_FLAGS_EXITING) != 0)
#define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \
TMDF_DONOTRUNUSER) == 0)
extern int __isthreaded;
@ -980,6 +987,9 @@ _kse_isthreaded(void)
SCLASS void *_usrstack SCLASS_PRESET(NULL);
SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL);
SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL);
/* For debugger */
SCLASS int _libkse_debug SCLASS_PRESET(0);
SCLASS int _thread_activated SCLASS_PRESET(0);
/* List of all threads: */
SCLASS TAILQ_HEAD(, pthread) _thread_list
@ -989,7 +999,7 @@ SCLASS TAILQ_HEAD(, pthread) _thread_list
SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
SCLASS int _thr_active_threads SCLASS_PRESET(1);
SCLASS int _thread_active_threads SCLASS_PRESET(1);
SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
SCLASS pthread_mutex_t _thr_atfork_mutex;
@ -1079,6 +1089,7 @@ void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
struct pthread *_pq_first(struct pq_queue *pq);
struct pthread *_pq_first_debug(struct pq_queue *pq);
void *_pthread_getspecific(pthread_key_t);
int _pthread_key_create(pthread_key_t *, void (*) (void *));
int _pthread_key_delete(pthread_key_t);
@ -1150,6 +1161,7 @@ void _thr_hash_remove(struct pthread *);
struct pthread *_thr_hash_find(struct pthread *);
void _thr_finish_cancellation(void *arg);
int _thr_sigonstack(void *sp);
void _thr_debug_check_yield(struct pthread *);
/*
* Aliases for _pthread functions. Should be called instead of

View File

@ -38,15 +38,8 @@
#include <pthread.h>
#include "thr_private.h"
struct pthread_key {
volatile int allocated;
volatile int count;
int seqno;
void (*destructor) ();
};
/* Static variables: */
static struct pthread_key key_table[PTHREAD_KEYS_MAX];
struct pthread_key _thread_keytable[PTHREAD_KEYS_MAX];
__weak_reference(_pthread_key_create, pthread_key_create);
__weak_reference(_pthread_key_delete, pthread_key_delete);
@ -64,10 +57,10 @@ _pthread_key_create(pthread_key_t *key, void (*destructor) (void *))
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
if (key_table[i].allocated == 0) {
key_table[i].allocated = 1;
key_table[i].destructor = destructor;
key_table[i].seqno++;
if (_thread_keytable[i].allocated == 0) {
_thread_keytable[i].allocated = 1;
_thread_keytable[i].destructor = destructor;
_thread_keytable[i].seqno++;
/* Unlock the key table: */
THR_LOCK_RELEASE(curthread, &_keytable_lock);
@ -91,8 +84,8 @@ _pthread_key_delete(pthread_key_t key)
/* Lock the key table: */
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
if (key_table[key].allocated)
key_table[key].allocated = 0;
if (_thread_keytable[key].allocated)
_thread_keytable[key].allocated = 0;
else
ret = EINVAL;
@ -123,13 +116,13 @@ _thread_cleanupspecific(void)
(curthread->specific_data_count > 0); key++) {
destructor = NULL;
if (key_table[key].allocated &&
if (_thread_keytable[key].allocated &&
(curthread->specific[key].data != NULL)) {
if (curthread->specific[key].seqno ==
key_table[key].seqno) {
_thread_keytable[key].seqno) {
data = (void *)
curthread->specific[key].data;
destructor = key_table[key].destructor;
destructor = _thread_keytable[key].destructor;
}
curthread->specific[key].data = NULL;
curthread->specific_data_count--;
@ -185,7 +178,7 @@ _pthread_setspecific(pthread_key_t key, const void *value)
if ((pthread->specific) ||
(pthread->specific = pthread_key_allocate_data())) {
if ((unsigned int)key < PTHREAD_KEYS_MAX) {
if (key_table[key].allocated) {
if (_thread_keytable[key].allocated) {
if (pthread->specific[key].data == NULL) {
if (value != NULL)
pthread->specific_data_count++;
@ -193,7 +186,7 @@ _pthread_setspecific(pthread_key_t key, const void *value)
pthread->specific_data_count--;
pthread->specific[key].data = value;
pthread->specific[key].seqno =
key_table[key].seqno;
_thread_keytable[key].seqno;
ret = 0;
} else
ret = EINVAL;
@ -216,8 +209,8 @@ _pthread_getspecific(pthread_key_t key)
/* Check if there is specific data: */
if (pthread->specific != NULL && (unsigned int)key < PTHREAD_KEYS_MAX) {
/* Check if this key has been used before: */
if (key_table[key].allocated &&
(pthread->specific[key].seqno == key_table[key].seqno)) {
if (_thread_keytable[key].allocated &&
(pthread->specific[key].seqno == _thread_keytable[key].seqno)) {
/* Return the value: */
data = (void *) pthread->specific[key].data;
} else {

View File

@ -125,11 +125,11 @@ _pthread_exit(void *status)
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Use thread_list_lock */
_thr_active_threads--;
_thread_active_threads--;
#ifdef SYSTEM_SCOPE_ONLY
if (_thr_active_threads == 0) {
if (_thread_active_threads == 0) {
#else
if (_thr_active_threads == 1) {
if (_thread_active_threads == 1) {
#endif
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);

View File

@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/kse.h>
#include <sys/ptrace.h>
#include <sys/signalvar.h>
#include <sys/queue.h>
#include <machine/atomic.h>
@ -98,7 +99,10 @@ __FBSDID("$FreeBSD$");
_pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)
#define KSE_RUNQ_REMOVE(kse, thrd) \
_pq_remove(&(kse)->k_schedq->sq_runq, thrd)
#define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq)
#define KSE_RUNQ_FIRST(kse) \
((_libkse_debug == 0) ? \
_pq_first(&(kse)->k_schedq->sq_runq) : \
_pq_first_debug(&(kse)->k_schedq->sq_runq))
#define KSE_RUNQ_THREADS(kse) ((kse)->k_schedq->sq_runq.pq_threads)
@ -222,7 +226,7 @@ _kse_single_thread(struct pthread *curthread)
* dump core.
*/
sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
_thr_active_threads = 1;
_thread_active_threads = 1;
/*
* Enter a loop to remove and free all threads other than
@ -355,7 +359,7 @@ _kse_single_thread(struct pthread *curthread)
* dump core.
*/
sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
_thr_active_threads = 1;
_thread_active_threads = 1;
#endif
}
@ -435,6 +439,9 @@ _kse_setthreaded(int threaded)
PANIC("kse_create() failed\n");
return (-1);
}
_thr_initial->tcb->tcb_tmbx.tm_lwp =
_kse_initial->k_kcb->kcb_kmbx.km_lwp;
_thread_activated = 1;
#ifndef SYSTEM_SCOPE_ONLY
/* Set current thread to initial thread */
@ -630,6 +637,19 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
kse_sched_single(&curkse->k_kcb->kcb_kmbx);
else {
if (__predict_false(_libkse_debug != 0)) {
/*
* Because debugger saves single step status in thread
* mailbox's tm_dflags, we can safely clear single
* step status here. the single step status will be
* restored by kse_switchin when the thread is
* switched in again. This also lets uts run in full
* speed.
*/
ptrace(PT_CLEARSTEP, curkse->k_kcb->kcb_kmbx.km_lwp,
(caddr_t) 1, 0);
}
KSE_SET_SWITCH(curkse);
_thread_enter_uts(curthread->tcb, curkse->k_kcb);
}
@ -697,7 +717,7 @@ kse_sched_single(struct kse_mailbox *kmbx)
curkse->k_flags |= KF_INITIALIZED;
first = 1;
curthread->active = 1;
/* Setup kernel signal masks for new thread. */
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
/*
@ -972,7 +992,7 @@ kse_sched_multi(struct kse_mailbox *kmbx)
*/
if (curthread == NULL)
; /* Nothing to do here. */
else if ((curthread->need_switchout == 0) &&
else if ((curthread->need_switchout == 0) && DBG_CAN_RUN(curthread) &&
(curthread->blocked == 0) && (THR_IN_CRITICAL(curthread))) {
/*
* Resume the thread and tell it to yield when
@ -992,8 +1012,10 @@ kse_sched_multi(struct kse_mailbox *kmbx)
if (ret != 0)
PANIC("Can't resume thread in critical region\n");
}
else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0)
else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
curthread->tcb->tcb_tmbx.tm_lwp = 0;
kse_switchout_thread(curkse, curthread);
}
curkse->k_curthread = NULL;
#ifdef DEBUG_THREAD_KERN
@ -2447,7 +2469,7 @@ thr_link(struct pthread *thread)
*/
thread->uniqueid = next_uniqueid++;
THR_LIST_ADD(thread);
_thr_active_threads++;
_thread_active_threads++;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -2465,7 +2487,7 @@ thr_unlink(struct pthread *thread)
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
THR_LIST_REMOVE(thread);
_thr_active_threads--;
_thread_active_threads--;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -2499,3 +2521,27 @@ _thr_hash_find(struct pthread *thread)
return (NULL);
}
void
_thr_debug_check_yield(struct pthread *curthread)
{
/*
* Note that TMDF_DONOTRUNUSER is set after process is suspended.
* When we are being debugged, every suspension in process
* will cause all KSEs to schedule an upcall in kernel, unless the
* KSE is in critical region.
* If the function is being called, it means the KSE is no longer
* in critical region, if the TMDF_DONOTRUNUSER is set by debugger
* before KSE leaves critical region, we will catch it here, else
* if the flag is changed during testing, it also not a problem,
* because the change only occurs after a process suspension event
* occurs. A suspension event will always cause KSE to schedule an
* upcall, in the case, because we are not in critical region,
* upcall will be scheduled sucessfully, the flag will be checked
* again in kse_sched_multi, we won't back until the flag
* is cleared by debugger, the flag will be cleared in next
* suspension event.
*/
if ((curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) == 0 &&
!DBG_CAN_RUN(curthread))
_thr_sched_switch(curthread);
}

View File

@ -242,6 +242,57 @@ _pq_first(pq_queue_t *pq)
return (pthread);
}
/*
* Select a thread which is allowed to run by debugger, we probably
* should merge the function into _pq_first if that function is only
* used by scheduler to select a thread.
*/
pthread_t
_pq_first_debug(pq_queue_t *pq)
{
pq_list_t *pql, *pqlnext = NULL;
pthread_t pthread = NULL;
/*
* Make some assertions when debugging is enabled:
*/
PQ_ASSERT_INACTIVE(pq, "_pq_first: pq_active");
PQ_SET_ACTIVE(pq);
for (pql = TAILQ_FIRST(&pq->pq_queue);
pql != NULL && pthread == NULL; pql = pqlnext) {
if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) {
/*
* The priority list is empty; remove the list
* from the queue.
*/
pqlnext = TAILQ_NEXT(pql, pl_link);
TAILQ_REMOVE(&pq->pq_queue, pql, pl_link);
/* Mark the list as not being in the queue: */
pql->pl_queued = 0;
} else {
/*
* note there may be a suspension event during this
* test, If TMDF_DONOTRUNUSER is set after we tested it,
* we will run the thread, this seems be a problem,
* fortunatly, when we are being debugged, all context
* switch will be done by kse_switchin, that is a
* syscall, kse_switchin will check the flag again,
* the thread will be returned via upcall, so next
* time, UTS won't run the thread.
*/
while (pthread != NULL && !DBG_CAN_RUN(pthread)) {
pthread = TAILQ_NEXT(pthread, pqe);
}
if (pthread == NULL)
pqlnext = TAILQ_NEXT(pql, pl_link);
}
}
PQ_CLEAR_ACTIVE(pq);
return (pthread);
}
static void
pq_insert_prio_list(pq_queue_t *pq, int prio)

View File

@ -430,6 +430,7 @@ struct pthread_attr {
int prio;
int suspend;
#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
#define THR_SIGNAL_THREAD 0x200 /* This is a signal thread */
int flags;
void *arg_attr;
void (*cleanup_attr) ();
@ -582,15 +583,19 @@ struct pthread_specific_elem {
int seqno;
};
struct pthread_key {
volatile int allocated;
volatile int count;
int seqno;
void (*destructor) (void *);
};
#define MAX_THR_LOCKLEVEL 5
/*
* Thread structure.
*/
struct pthread {
/*
* Thread mailbox is first so it cal be aligned properly.
*/
/* Thread control block */
struct tcb *tcb;
/*
@ -816,12 +821,14 @@ struct pthread {
#define THR_YIELD_CHECK(thrd) \
do { \
if (((thrd)->critical_yield != 0) && \
!(THR_IN_CRITICAL(thrd))) \
_thr_sched_switch(thrd); \
else if (((thrd)->check_pending != 0) && \
!(THR_IN_CRITICAL(thrd))) \
_thr_sig_check_pending(thrd); \
if (!THR_IN_CRITICAL(thrd)) { \
if (__predict_false(_libkse_debug)) \
_thr_debug_check_yield(thrd); \
if ((thrd)->critical_yield != 0) \
_thr_sched_switch(thrd); \
if ((thrd)->check_pending != 0) \
_thr_sig_check_pending(thrd); \
} \
} while (0)
#define THR_LOCK_ACQUIRE(thrd, lck) \
@ -882,8 +889,6 @@ do { \
_pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
#define THR_RUNQ_REMOVE(thrd) \
_pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
#define THR_RUNQ_FIRST(thrd) \
_pq_first(&(thrd)->kseg->kg_schedq.sq_runq)
/*
* Macros to insert/remove threads to the all thread list and
@ -964,6 +969,8 @@ do { \
(((thrd)->state == PS_SUSPENDED) || \
(((thrd)->flags & THR_FLAGS_SUSPENDED) != 0))
#define THR_IS_EXITING(thrd) (((thrd)->flags & THR_FLAGS_EXITING) != 0)
#define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \
TMDF_DONOTRUNUSER) == 0)
extern int __isthreaded;
@ -980,6 +987,9 @@ _kse_isthreaded(void)
SCLASS void *_usrstack SCLASS_PRESET(NULL);
SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL);
SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL);
/* For debugger */
SCLASS int _libkse_debug SCLASS_PRESET(0);
SCLASS int _thread_activated SCLASS_PRESET(0);
/* List of all threads: */
SCLASS TAILQ_HEAD(, pthread) _thread_list
@ -989,7 +999,7 @@ SCLASS TAILQ_HEAD(, pthread) _thread_list
SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
SCLASS int _thr_active_threads SCLASS_PRESET(1);
SCLASS int _thread_active_threads SCLASS_PRESET(1);
SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
SCLASS pthread_mutex_t _thr_atfork_mutex;
@ -1079,6 +1089,7 @@ void _pq_remove(struct pq_queue *pq, struct pthread *);
void _pq_insert_head(struct pq_queue *pq, struct pthread *);
void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
struct pthread *_pq_first(struct pq_queue *pq);
struct pthread *_pq_first_debug(struct pq_queue *pq);
void *_pthread_getspecific(pthread_key_t);
int _pthread_key_create(pthread_key_t *, void (*) (void *));
int _pthread_key_delete(pthread_key_t);
@ -1150,6 +1161,7 @@ void _thr_hash_remove(struct pthread *);
struct pthread *_thr_hash_find(struct pthread *);
void _thr_finish_cancellation(void *arg);
int _thr_sigonstack(void *sp);
void _thr_debug_check_yield(struct pthread *);
/*
* Aliases for _pthread functions. Should be called instead of

View File

@ -38,15 +38,8 @@
#include <pthread.h>
#include "thr_private.h"
struct pthread_key {
volatile int allocated;
volatile int count;
int seqno;
void (*destructor) ();
};
/* Static variables: */
static struct pthread_key key_table[PTHREAD_KEYS_MAX];
struct pthread_key _thread_keytable[PTHREAD_KEYS_MAX];
__weak_reference(_pthread_key_create, pthread_key_create);
__weak_reference(_pthread_key_delete, pthread_key_delete);
@ -64,10 +57,10 @@ _pthread_key_create(pthread_key_t *key, void (*destructor) (void *))
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
if (key_table[i].allocated == 0) {
key_table[i].allocated = 1;
key_table[i].destructor = destructor;
key_table[i].seqno++;
if (_thread_keytable[i].allocated == 0) {
_thread_keytable[i].allocated = 1;
_thread_keytable[i].destructor = destructor;
_thread_keytable[i].seqno++;
/* Unlock the key table: */
THR_LOCK_RELEASE(curthread, &_keytable_lock);
@ -91,8 +84,8 @@ _pthread_key_delete(pthread_key_t key)
/* Lock the key table: */
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
if (key_table[key].allocated)
key_table[key].allocated = 0;
if (_thread_keytable[key].allocated)
_thread_keytable[key].allocated = 0;
else
ret = EINVAL;
@ -123,13 +116,13 @@ _thread_cleanupspecific(void)
(curthread->specific_data_count > 0); key++) {
destructor = NULL;
if (key_table[key].allocated &&
if (_thread_keytable[key].allocated &&
(curthread->specific[key].data != NULL)) {
if (curthread->specific[key].seqno ==
key_table[key].seqno) {
_thread_keytable[key].seqno) {
data = (void *)
curthread->specific[key].data;
destructor = key_table[key].destructor;
destructor = _thread_keytable[key].destructor;
}
curthread->specific[key].data = NULL;
curthread->specific_data_count--;
@ -185,7 +178,7 @@ _pthread_setspecific(pthread_key_t key, const void *value)
if ((pthread->specific) ||
(pthread->specific = pthread_key_allocate_data())) {
if ((unsigned int)key < PTHREAD_KEYS_MAX) {
if (key_table[key].allocated) {
if (_thread_keytable[key].allocated) {
if (pthread->specific[key].data == NULL) {
if (value != NULL)
pthread->specific_data_count++;
@ -193,7 +186,7 @@ _pthread_setspecific(pthread_key_t key, const void *value)
pthread->specific_data_count--;
pthread->specific[key].data = value;
pthread->specific[key].seqno =
key_table[key].seqno;
_thread_keytable[key].seqno;
ret = 0;
} else
ret = EINVAL;
@ -216,8 +209,8 @@ _pthread_getspecific(pthread_key_t key)
/* Check if there is specific data: */
if (pthread->specific != NULL && (unsigned int)key < PTHREAD_KEYS_MAX) {
/* Check if this key has been used before: */
if (key_table[key].allocated &&
(pthread->specific[key].seqno == key_table[key].seqno)) {
if (_thread_keytable[key].allocated &&
(pthread->specific[key].seqno == _thread_keytable[key].seqno)) {
/* Return the value: */
data = (void *) pthread->specific[key].data;
} else {