1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-11 14:10:34 +00:00

Don't really spin on a spinlock; silently convert it to the same

low-level lock used by the libpthread implementation.  In the
future, we'll eliminate spinlocks from libc but that will wait
until after 5.1-release.

Don't call an application signal handler if the handler is
the same as the library-installed handler.  This seems to
be possible after a fork and is the cause of konsole hangs.

Approved by:	re@ (jhb)
This commit is contained in:
Daniel Eischen 2003-05-29 17:10:45 +00:00
parent 0022867d24
commit 28362a5c80
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=115381
8 changed files with 142 additions and 32 deletions

View File

@ -515,6 +515,7 @@ init_private(void)
if (_lock_init(&_keytable_lock, LCK_ADAPTIVE,
_thr_lock_wait, _thr_lock_wakeup) != 0)
PANIC("Cannot initialize thread specific keytable lock");
_thr_spinlock_init();
/* Clear pending signals and get the process signal mask. */
sigemptyset(&_thr_proc_sigpending);

View File

@ -1114,6 +1114,7 @@ void _thr_sched_frame(struct pthread_sigframe *);
void _thr_sched_switch(struct pthread *);
void _thr_sched_switch_unlocked(struct pthread *);
void _thr_set_timeout(const struct timespec *);
void _thr_seterrno(struct pthread *, int);
void _thr_sig_handler(int, siginfo_t *, ucontext_t *);
void _thr_sig_check_pending(struct pthread *);
void _thr_sig_rundown(struct pthread *, ucontext_t *,
@ -1121,7 +1122,7 @@ void _thr_sig_rundown(struct pthread *, ucontext_t *,
void _thr_sig_send(struct pthread *pthread, int sig);
void _thr_sig_wrapper(void);
void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
void _thr_seterrno(struct pthread *, int);
void _thr_spinlock_init(void);
void _thr_enter_cancellation_point(struct pthread *);
void _thr_leave_cancellation_point(struct pthread *);
int _thr_setconcurrency(int new_level);

View File

@ -175,7 +175,7 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
void
_thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
{
void (*sigfunc)(int, siginfo_t *, void *);
__siginfohandler_t *sigfunc;
struct kse *curkse;
curkse = _get_curkse();
@ -184,7 +184,8 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
sigfunc = _thread_sigact[sig - 1].sa_sigaction;
ucp->uc_sigmask = _thr_proc_sigmask;
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
((__sighandler_t *)sigfunc != SIG_IGN)) {
((__sighandler_t *)sigfunc != SIG_IGN) &&
(sigfunc != (__siginfohandler_t *)_thr_sig_handler)) {
if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO)
!= 0) || (info == NULL))
(*(sigfunc))(sig, info, ucp);

View File

@ -40,6 +40,20 @@
#include "spinlock.h"
#include "thr_private.h"
#define MAX_SPINLOCKS 5
struct spinlock_extra {
struct lock lock;
kse_critical_t crit;
};
static void init_spinlock(spinlock_t *lck);
static struct lock spinlock_static_lock;
static struct spinlock_extra extra[MAX_SPINLOCKS];
static int spinlock_count = 0;
static int initialized = 0;
/*
* These are for compatability only. Spinlocks of this type
* are deprecated.
@ -48,12 +62,13 @@
void
_spinunlock(spinlock_t *lck)
{
struct spinlock_extra *extra;
kse_critical_t crit;
crit = (kse_critical_t)lck->fname;
atomic_store_rel_long(&lck->access_lock, 0);
if (crit != NULL)
_kse_critical_leave(crit);
extra = (struct spinlock_extra *)lck->fname;
crit = extra->crit;
KSE_LOCK_RELEASE(_get_curkse(), &extra->lock);
_kse_critical_leave(crit);
}
@ -66,21 +81,21 @@ _spinunlock(spinlock_t *lck)
void
_spinlock(spinlock_t *lck)
{
struct spinlock_extra *extra;
kse_critical_t crit;
THR_ASSERT(__isthreaded != 0, "Spinlock called when not threaded.");
THR_ASSERT(initialized != 0, "Spinlocks not initialized.");
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
if (_kse_isthreaded())
crit = _kse_critical_enter();
else
crit = NULL;
while(_atomic_lock(&lck->access_lock)) {
while (lck->access_lock)
;
}
lck->fname = (char *)crit;
crit = _kse_critical_enter();
if (lck->fname == NULL)
init_spinlock(lck);
extra = (struct spinlock_extra *)lck->fname;
KSE_LOCK_ACQUIRE(_get_curkse(), &extra->lock);
extra->crit = crit;
}
/*
@ -98,3 +113,40 @@ _spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
_spinlock(lck);
}
static void
init_spinlock(spinlock_t *lck)
{
struct kse *curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &spinlock_static_lock);
if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
lck->fname = (char *)&extra[spinlock_count];
spinlock_count++;
}
KSE_LOCK_RELEASE(curkse, &spinlock_static_lock);
THR_ASSERT(lck->fname != NULL, "Exceeded max spinlocks");
}
void
_thr_spinlock_init(void)
{
int i;
if (initialized != 0) {
_lock_destroy(&spinlock_static_lock);
for (i = 0; i < MAX_SPINLOCKS; i++) {
_lock_destroy(&extra[i].lock);
}
}
if (_lock_init(&spinlock_static_lock, LCK_ADAPTIVE,
_kse_lock_wait, _kse_lock_wakeup) != 0)
PANIC("Cannot initialize spinlock_static_lock");
for (i = 0; i < MAX_SPINLOCKS; i++) {
if (_lock_init(&extra[i].lock, LCK_ADAPTIVE,
_kse_lock_wait, _kse_lock_wakeup) != 0)
PANIC("Cannot initialize spinlock extra");
}
initialized = 1;
}

View File

@ -515,6 +515,7 @@ init_private(void)
if (_lock_init(&_keytable_lock, LCK_ADAPTIVE,
_thr_lock_wait, _thr_lock_wakeup) != 0)
PANIC("Cannot initialize thread specific keytable lock");
_thr_spinlock_init();
/* Clear pending signals and get the process signal mask. */
sigemptyset(&_thr_proc_sigpending);

View File

@ -1114,6 +1114,7 @@ void _thr_sched_frame(struct pthread_sigframe *);
void _thr_sched_switch(struct pthread *);
void _thr_sched_switch_unlocked(struct pthread *);
void _thr_set_timeout(const struct timespec *);
void _thr_seterrno(struct pthread *, int);
void _thr_sig_handler(int, siginfo_t *, ucontext_t *);
void _thr_sig_check_pending(struct pthread *);
void _thr_sig_rundown(struct pthread *, ucontext_t *,
@ -1121,7 +1122,7 @@ void _thr_sig_rundown(struct pthread *, ucontext_t *,
void _thr_sig_send(struct pthread *pthread, int sig);
void _thr_sig_wrapper(void);
void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
void _thr_seterrno(struct pthread *, int);
void _thr_spinlock_init(void);
void _thr_enter_cancellation_point(struct pthread *);
void _thr_leave_cancellation_point(struct pthread *);
int _thr_setconcurrency(int new_level);

View File

@ -175,7 +175,7 @@ _thr_sig_dispatch(struct kse *curkse, int sig, siginfo_t *info)
void
_thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
{
void (*sigfunc)(int, siginfo_t *, void *);
__siginfohandler_t *sigfunc;
struct kse *curkse;
curkse = _get_curkse();
@ -184,7 +184,8 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp)
sigfunc = _thread_sigact[sig - 1].sa_sigaction;
ucp->uc_sigmask = _thr_proc_sigmask;
if (((__sighandler_t *)sigfunc != SIG_DFL) &&
((__sighandler_t *)sigfunc != SIG_IGN)) {
((__sighandler_t *)sigfunc != SIG_IGN) &&
(sigfunc != (__siginfohandler_t *)_thr_sig_handler)) {
if (((_thread_sigact[sig - 1].sa_flags & SA_SIGINFO)
!= 0) || (info == NULL))
(*(sigfunc))(sig, info, ucp);

View File

@ -40,6 +40,20 @@
#include "spinlock.h"
#include "thr_private.h"
#define MAX_SPINLOCKS 5
struct spinlock_extra {
struct lock lock;
kse_critical_t crit;
};
static void init_spinlock(spinlock_t *lck);
static struct lock spinlock_static_lock;
static struct spinlock_extra extra[MAX_SPINLOCKS];
static int spinlock_count = 0;
static int initialized = 0;
/*
* These are for compatability only. Spinlocks of this type
* are deprecated.
@ -48,12 +62,13 @@
void
_spinunlock(spinlock_t *lck)
{
struct spinlock_extra *extra;
kse_critical_t crit;
crit = (kse_critical_t)lck->fname;
atomic_store_rel_long(&lck->access_lock, 0);
if (crit != NULL)
_kse_critical_leave(crit);
extra = (struct spinlock_extra *)lck->fname;
crit = extra->crit;
KSE_LOCK_RELEASE(_get_curkse(), &extra->lock);
_kse_critical_leave(crit);
}
@ -66,21 +81,21 @@ _spinunlock(spinlock_t *lck)
void
_spinlock(spinlock_t *lck)
{
struct spinlock_extra *extra;
kse_critical_t crit;
THR_ASSERT(__isthreaded != 0, "Spinlock called when not threaded.");
THR_ASSERT(initialized != 0, "Spinlocks not initialized.");
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
if (_kse_isthreaded())
crit = _kse_critical_enter();
else
crit = NULL;
while(_atomic_lock(&lck->access_lock)) {
while (lck->access_lock)
;
}
lck->fname = (char *)crit;
crit = _kse_critical_enter();
if (lck->fname == NULL)
init_spinlock(lck);
extra = (struct spinlock_extra *)lck->fname;
KSE_LOCK_ACQUIRE(_get_curkse(), &extra->lock);
extra->crit = crit;
}
/*
@ -98,3 +113,40 @@ _spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
_spinlock(lck);
}
static void
init_spinlock(spinlock_t *lck)
{
struct kse *curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &spinlock_static_lock);
if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
lck->fname = (char *)&extra[spinlock_count];
spinlock_count++;
}
KSE_LOCK_RELEASE(curkse, &spinlock_static_lock);
THR_ASSERT(lck->fname != NULL, "Exceeded max spinlocks");
}
void
_thr_spinlock_init(void)
{
int i;
if (initialized != 0) {
_lock_destroy(&spinlock_static_lock);
for (i = 0; i < MAX_SPINLOCKS; i++) {
_lock_destroy(&extra[i].lock);
}
}
if (_lock_init(&spinlock_static_lock, LCK_ADAPTIVE,
_kse_lock_wait, _kse_lock_wakeup) != 0)
PANIC("Cannot initialize spinlock_static_lock");
for (i = 0; i < MAX_SPINLOCKS; i++) {
if (_lock_init(&extra[i].lock, LCK_ADAPTIVE,
_kse_lock_wait, _kse_lock_wakeup) != 0)
PANIC("Cannot initialize spinlock extra");
}
initialized = 1;
}