From 24f33bca1c2f4207c48830f893ee5a07e3c4402e Mon Sep 17 00:00:00 2001 From: Daniel Eischen Date: Thu, 8 Jan 2004 15:37:09 +0000 Subject: [PATCH] Add a simple work-around for deadlocking on recursive read locks on a rwlock while there are writers waiting. We normally favor writers but when a reader already has at least one other read lock, we favor the reader. We don't track all the rwlocks owned by a thread, nor all the threads that own a rwlock -- we just keep a count of all the read locks owned by a thread. PR: 24641 --- lib/libkse/thread/thr_create.c | 1 + lib/libkse/thread/thr_private.h | 5 +- lib/libkse/thread/thr_rwlock.c | 128 ++++++++++++++++++---------- lib/libpthread/thread/thr_create.c | 1 + lib/libpthread/thread/thr_private.h | 5 +- lib/libpthread/thread/thr_rwlock.c | 128 ++++++++++++++++++---------- 6 files changed, 172 insertions(+), 96 deletions(-) diff --git a/lib/libkse/thread/thr_create.c b/lib/libkse/thread/thr_create.c index b82f1809a27c..38c2fc4ae214 100644 --- a/lib/libkse/thread/thr_create.c +++ b/lib/libkse/thread/thr_create.c @@ -252,6 +252,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, sigemptyset(&new_thread->sigpend); new_thread->check_pending = 0; new_thread->locklevel = 0; + new_thread->rdlock_count = 0; new_thread->sigstk.ss_sp = 0; new_thread->sigstk.ss_size = 0; new_thread->sigstk.ss_flags = SS_DISABLE; diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h index 73c9e284e398..7948df8e428c 100644 --- a/lib/libkse/thread/thr_private.h +++ b/lib/libkse/thread/thr_private.h @@ -521,9 +521,9 @@ struct pthread_rwlockattr { struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ - int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; + int state; /* 0 = idle >0 = # of readers -1 = writer */ int blocked_writers; }; @@ -789,6 +789,9 @@ struct pthread { /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; + /* Number rwlocks rdlocks held. */ + int rdlock_count; + /* * Queue of currently owned mutexes. */ diff --git a/lib/libkse/thread/thr_rwlock.c b/lib/libkse/thread/thr_rwlock.c index e1726e56e49d..ca8a0815d2e0 100644 --- a/lib/libkse/thread/thr_rwlock.c +++ b/lib/libkse/thread/thr_rwlock.c @@ -92,15 +92,14 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock) ret = 0; } - return (ret); } int _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) { - pthread_rwlock_t prwlock; - int ret; + pthread_rwlock_t prwlock; + int ret; /* allocate rwlock object */ prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock)); @@ -128,7 +127,7 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr free(prwlock); } else { /* success */ - prwlock->state = 0; + prwlock->state = 0; prwlock->blocked_writers = 0; *rwlock = prwlock; @@ -142,8 +141,9 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr static int rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) { - pthread_rwlock_t prwlock; - int ret; + pthread_rwlock_t prwlock; + struct pthread *curthread; + int ret; if (rwlock == NULL) return (EINVAL); @@ -162,26 +162,47 @@ rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0) return (ret); - /* give writers priority over readers */ - while (prwlock->blocked_writers || prwlock->state < 0) { - if (abstime) - ret = _pthread_cond_timedwait(&prwlock->read_signal, - &prwlock->lock, abstime); - else - ret = _thr_cond_wait(&prwlock->read_signal, - &prwlock->lock); - if (ret != 0) { - /* can't do a whole lot if this fails */ - _thr_mutex_unlock(&prwlock->lock); - return (ret); + /* check lock count */ + if (prwlock->state == MAX_READ_LOCKS) { + _thr_mutex_unlock(&prwlock->lock); + return (EAGAIN); + } + + curthread = _get_curthread(); + if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { + /* + * To avoid having to track all the rdlocks held by + * a thread or all of the threads that hold a rdlock, + * we keep a simple count of all the rdlocks held by + * a thread. If a thread holds any rdlocks it is + * possible that it is attempting to take a recursive + * rdlock. If there are blocked writers and precedence + * is given to them, then that would result in the thread + * deadlocking. So allowing a thread to take the rdlock + * when it already has one or more rdlocks avoids the + * deadlock. I hope the reader can follow that logic ;-) + */ + ; /* nothing needed */ + } else { + /* give writers priority over readers */ + while (prwlock->blocked_writers || prwlock->state < 0) { + if (abstime) + ret = _pthread_cond_timedwait + (&prwlock->read_signal, + &prwlock->lock, abstime); + else + ret = _thr_cond_wait(&prwlock->read_signal, + &prwlock->lock); + if (ret != 0) { + /* can't do a whole lot if this fails */ + _thr_mutex_unlock(&prwlock->lock); + return (ret); + } } } - /* check lock count */ - if (prwlock->state == MAX_READ_LOCKS) - ret = EAGAIN; - else - ++prwlock->state; /* indicate we are locked for reading */ + curthread->rdlock_count++; + prwlock->state++; /* indicate we are locked for reading */ /* * Something is really wrong if this call fails. Returning @@ -197,7 +218,7 @@ rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) int _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) { - return rwlock_rdlock_common (rwlock, NULL); + return (rwlock_rdlock_common(rwlock, NULL)); } __strong_reference(_pthread_rwlock_rdlock, _thr_rwlock_rdlock); @@ -206,14 +227,15 @@ int _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, const struct timespec *abstime) { - return rwlock_rdlock_common(rwlock, abstime); + return (rwlock_rdlock_common(rwlock, abstime)); } int _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { - pthread_rwlock_t prwlock; - int ret; + struct pthread *curthread; + pthread_rwlock_t prwlock; + int ret; if (rwlock == NULL) return (EINVAL); @@ -232,13 +254,21 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) return (ret); + curthread = _get_curthread(); + if (prwlock->state == MAX_READ_LOCKS) + ret = EAGAIN; + else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { + /* see comment for pthread_rwlock_rdlock() */ + curthread->rdlock_count++; + prwlock->state++; + } /* give writers priority over readers */ - if (prwlock->blocked_writers || prwlock->state < 0) + else if (prwlock->blocked_writers || prwlock->state < 0) ret = EBUSY; - else if (prwlock->state == MAX_READ_LOCKS) - ret = EAGAIN; /* too many read locks acquired */ - else - ++prwlock->state; /* indicate we are locked for reading */ + else { + curthread->rdlock_count++; + prwlock->state++; /* indicate we are locked for reading */ + } /* see the comment on this in pthread_rwlock_rdlock */ _pthread_mutex_unlock(&prwlock->lock); @@ -249,8 +279,8 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) int _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) { - pthread_rwlock_t prwlock; - int ret; + pthread_rwlock_t prwlock; + int ret; if (rwlock == NULL) return (EINVAL); @@ -284,8 +314,9 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) int _pthread_rwlock_unlock (pthread_rwlock_t *rwlock) { - pthread_rwlock_t prwlock; - int ret; + struct pthread *curthread; + pthread_rwlock_t prwlock; + int ret; if (rwlock == NULL) return (EINVAL); @@ -299,8 +330,11 @@ _pthread_rwlock_unlock (pthread_rwlock_t *rwlock) if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0) return (ret); + curthread = _get_curthread(); if (prwlock->state > 0) { - if (--prwlock->state == 0 && prwlock->blocked_writers) + curthread->rdlock_count--; + prwlock->state--; + if (prwlock->state == 0 && prwlock->blocked_writers) ret = _thr_cond_signal(&prwlock->write_signal); } else if (prwlock->state < 0) { prwlock->state = 0; @@ -323,8 +357,8 @@ __strong_reference(_pthread_rwlock_unlock, _thr_rwlock_unlock); static int rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) { - pthread_rwlock_t prwlock; - int ret; + pthread_rwlock_t prwlock; + int ret; if (rwlock == NULL) return (EINVAL); @@ -344,21 +378,21 @@ rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) return (ret); while (prwlock->state != 0) { - ++prwlock->blocked_writers; + prwlock->blocked_writers++; if (abstime != NULL) ret = _pthread_cond_timedwait(&prwlock->write_signal, - &prwlock->lock, abstime); + &prwlock->lock, abstime); else ret = _thr_cond_wait(&prwlock->write_signal, - &prwlock->lock); + &prwlock->lock); if (ret != 0) { - --prwlock->blocked_writers; + prwlock->blocked_writers--; _thr_mutex_unlock(&prwlock->lock); return (ret); } - --prwlock->blocked_writers; + prwlock->blocked_writers--; } /* indicate we are locked for writing */ @@ -373,13 +407,13 @@ rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) int _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) { - return rwlock_wrlock_common (rwlock, NULL); + return (rwlock_wrlock_common (rwlock, NULL)); } __strong_reference(_pthread_rwlock_wrlock, _thr_rwlock_wrlock); int _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, - const struct timespec *abstime) + const struct timespec *abstime) { - return rwlock_wrlock_common (rwlock, abstime); + return (rwlock_wrlock_common (rwlock, abstime)); } diff --git a/lib/libpthread/thread/thr_create.c b/lib/libpthread/thread/thr_create.c index b82f1809a27c..38c2fc4ae214 100644 --- a/lib/libpthread/thread/thr_create.c +++ b/lib/libpthread/thread/thr_create.c @@ -252,6 +252,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, sigemptyset(&new_thread->sigpend); new_thread->check_pending = 0; new_thread->locklevel = 0; + new_thread->rdlock_count = 0; new_thread->sigstk.ss_sp = 0; new_thread->sigstk.ss_size = 0; new_thread->sigstk.ss_flags = SS_DISABLE; diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h index 73c9e284e398..7948df8e428c 100644 --- a/lib/libpthread/thread/thr_private.h +++ b/lib/libpthread/thread/thr_private.h @@ -521,9 +521,9 @@ struct pthread_rwlockattr { struct pthread_rwlock { pthread_mutex_t lock; /* monitor lock */ - int state; /* 0 = idle >0 = # of readers -1 = writer */ pthread_cond_t read_signal; pthread_cond_t write_signal; + int state; /* 0 = idle >0 = # of readers -1 = writer */ int blocked_writers; }; @@ -789,6 +789,9 @@ struct pthread { /* Number of priority ceiling or protection mutexes owned. */ int priority_mutex_count; + /* Number rwlocks rdlocks held. */ + int rdlock_count; + /* * Queue of currently owned mutexes. */ diff --git a/lib/libpthread/thread/thr_rwlock.c b/lib/libpthread/thread/thr_rwlock.c index e1726e56e49d..ca8a0815d2e0 100644 --- a/lib/libpthread/thread/thr_rwlock.c +++ b/lib/libpthread/thread/thr_rwlock.c @@ -92,15 +92,14 @@ _pthread_rwlock_destroy (pthread_rwlock_t *rwlock) ret = 0; } - return (ret); } int _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) { - pthread_rwlock_t prwlock; - int ret; + pthread_rwlock_t prwlock; + int ret; /* allocate rwlock object */ prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock)); @@ -128,7 +127,7 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr free(prwlock); } else { /* success */ - prwlock->state = 0; + prwlock->state = 0; prwlock->blocked_writers = 0; *rwlock = prwlock; @@ -142,8 +141,9 @@ _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr static int rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) { - pthread_rwlock_t prwlock; - int ret; + pthread_rwlock_t prwlock; + struct pthread *curthread; + int ret; if (rwlock == NULL) return (EINVAL); @@ -162,26 +162,47 @@ rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0) return (ret); - /* give writers priority over readers */ - while (prwlock->blocked_writers || prwlock->state < 0) { - if (abstime) - ret = _pthread_cond_timedwait(&prwlock->read_signal, - &prwlock->lock, abstime); - else - ret = _thr_cond_wait(&prwlock->read_signal, - &prwlock->lock); - if (ret != 0) { - /* can't do a whole lot if this fails */ - _thr_mutex_unlock(&prwlock->lock); - return (ret); + /* check lock count */ + if (prwlock->state == MAX_READ_LOCKS) { + _thr_mutex_unlock(&prwlock->lock); + return (EAGAIN); + } + + curthread = _get_curthread(); + if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { + /* + * To avoid having to track all the rdlocks held by + * a thread or all of the threads that hold a rdlock, + * we keep a simple count of all the rdlocks held by + * a thread. If a thread holds any rdlocks it is + * possible that it is attempting to take a recursive + * rdlock. If there are blocked writers and precedence + * is given to them, then that would result in the thread + * deadlocking. So allowing a thread to take the rdlock + * when it already has one or more rdlocks avoids the + * deadlock. I hope the reader can follow that logic ;-) + */ + ; /* nothing needed */ + } else { + /* give writers priority over readers */ + while (prwlock->blocked_writers || prwlock->state < 0) { + if (abstime) + ret = _pthread_cond_timedwait + (&prwlock->read_signal, + &prwlock->lock, abstime); + else + ret = _thr_cond_wait(&prwlock->read_signal, + &prwlock->lock); + if (ret != 0) { + /* can't do a whole lot if this fails */ + _thr_mutex_unlock(&prwlock->lock); + return (ret); + } } } - /* check lock count */ - if (prwlock->state == MAX_READ_LOCKS) - ret = EAGAIN; - else - ++prwlock->state; /* indicate we are locked for reading */ + curthread->rdlock_count++; + prwlock->state++; /* indicate we are locked for reading */ /* * Something is really wrong if this call fails. Returning @@ -197,7 +218,7 @@ rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) int _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) { - return rwlock_rdlock_common (rwlock, NULL); + return (rwlock_rdlock_common(rwlock, NULL)); } __strong_reference(_pthread_rwlock_rdlock, _thr_rwlock_rdlock); @@ -206,14 +227,15 @@ int _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock, const struct timespec *abstime) { - return rwlock_rdlock_common(rwlock, abstime); + return (rwlock_rdlock_common(rwlock, abstime)); } int _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { - pthread_rwlock_t prwlock; - int ret; + struct pthread *curthread; + pthread_rwlock_t prwlock; + int ret; if (rwlock == NULL) return (EINVAL); @@ -232,13 +254,21 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) return (ret); + curthread = _get_curthread(); + if (prwlock->state == MAX_READ_LOCKS) + ret = EAGAIN; + else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { + /* see comment for pthread_rwlock_rdlock() */ + curthread->rdlock_count++; + prwlock->state++; + } /* give writers priority over readers */ - if (prwlock->blocked_writers || prwlock->state < 0) + else if (prwlock->blocked_writers || prwlock->state < 0) ret = EBUSY; - else if (prwlock->state == MAX_READ_LOCKS) - ret = EAGAIN; /* too many read locks acquired */ - else - ++prwlock->state; /* indicate we are locked for reading */ + else { + curthread->rdlock_count++; + prwlock->state++; /* indicate we are locked for reading */ + } /* see the comment on this in pthread_rwlock_rdlock */ _pthread_mutex_unlock(&prwlock->lock); @@ -249,8 +279,8 @@ _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) int _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) { - pthread_rwlock_t prwlock; - int ret; + pthread_rwlock_t prwlock; + int ret; if (rwlock == NULL) return (EINVAL); @@ -284,8 +314,9 @@ _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) int _pthread_rwlock_unlock (pthread_rwlock_t *rwlock) { - pthread_rwlock_t prwlock; - int ret; + struct pthread *curthread; + pthread_rwlock_t prwlock; + int ret; if (rwlock == NULL) return (EINVAL); @@ -299,8 +330,11 @@ _pthread_rwlock_unlock (pthread_rwlock_t *rwlock) if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0) return (ret); + curthread = _get_curthread(); if (prwlock->state > 0) { - if (--prwlock->state == 0 && prwlock->blocked_writers) + curthread->rdlock_count--; + prwlock->state--; + if (prwlock->state == 0 && prwlock->blocked_writers) ret = _thr_cond_signal(&prwlock->write_signal); } else if (prwlock->state < 0) { prwlock->state = 0; @@ -323,8 +357,8 @@ __strong_reference(_pthread_rwlock_unlock, _thr_rwlock_unlock); static int rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) { - pthread_rwlock_t prwlock; - int ret; + pthread_rwlock_t prwlock; + int ret; if (rwlock == NULL) return (EINVAL); @@ -344,21 +378,21 @@ rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) return (ret); while (prwlock->state != 0) { - ++prwlock->blocked_writers; + prwlock->blocked_writers++; if (abstime != NULL) ret = _pthread_cond_timedwait(&prwlock->write_signal, - &prwlock->lock, abstime); + &prwlock->lock, abstime); else ret = _thr_cond_wait(&prwlock->write_signal, - &prwlock->lock); + &prwlock->lock); if (ret != 0) { - --prwlock->blocked_writers; + prwlock->blocked_writers--; _thr_mutex_unlock(&prwlock->lock); return (ret); } - --prwlock->blocked_writers; + prwlock->blocked_writers--; } /* indicate we are locked for writing */ @@ -373,13 +407,13 @@ rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) int _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) { - return rwlock_wrlock_common (rwlock, NULL); + return (rwlock_wrlock_common (rwlock, NULL)); } __strong_reference(_pthread_rwlock_wrlock, _thr_rwlock_wrlock); int _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, - const struct timespec *abstime) + const struct timespec *abstime) { - return rwlock_wrlock_common (rwlock, abstime); + return (rwlock_wrlock_common (rwlock, abstime)); }