1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-17 10:26:15 +00:00

MFV r284763: 5981 Deadlock in dmu_objset_find_dp

illumos/illumos-gate@1d3f896f54

https://www.illumos.org/issues/5981
  When dmu_objset_find_dp gets called with a read lock held, it fans out
  the work to the task queue. Each task in turn acquires its own read
  lock before calling the callback. If during this process anyone tries
  to a acquire a write lock, it will stall all read lock requests.Thus
  the tasks will never finish, the read lock of the caller will never
  get freed and the write lock never acquired.  deadlock.

Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Approved by: Robert Mustacchi <rm@joyent.com>
Author: Arne Jansen <jansen@webgods.de>
This commit is contained in:
Alexander Motin 2015-08-12 19:10:29 +00:00
commit d0687a01d7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=286689
5 changed files with 40 additions and 4 deletions

View File

@ -1746,7 +1746,15 @@ dmu_objset_find_dp_cb(void *arg)
dmu_objset_find_ctx_t *dcp = arg;
dsl_pool_t *dp = dcp->dc_dp;
dsl_pool_config_enter(dp, FTAG);
/*
* We need to get a pool_config_lock here, as there are several
* asssert(pool_config_held) down the stack. Getting a lock via
* dsl_pool_config_enter is risky, as it might be stalled by a
* pending writer. This would deadlock, as the write lock can
* only be granted when our parent thread gives up the lock.
* The _prio interface gives us priority over a pending writer.
*/
dsl_pool_config_enter_prio(dp, FTAG);
dmu_objset_find_dp_impl(dcp);

View File

@ -1138,6 +1138,13 @@ dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
}
void
dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
{
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
}
void
dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
{

View File

@ -159,8 +159,8 @@ rrw_destroy(rrwlock_t *rrl)
refcount_destroy(&rrl->rr_linked_rcount);
}
void
rrw_enter_read(rrwlock_t *rrl, void *tag)
static void
rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
{
mutex_enter(&rrl->rr_lock);
#if !defined(DEBUG) && defined(_KERNEL)
@ -176,7 +176,7 @@ rrw_enter_read(rrwlock_t *rrl, void *tag)
ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
refcount_is_zero(&rrl->rr_anon_rcount) &&
refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
rrn_find(rrl) == NULL))
cv_wait(&rrl->rr_cv, &rrl->rr_lock);
@ -191,6 +191,25 @@ rrw_enter_read(rrwlock_t *rrl, void *tag)
mutex_exit(&rrl->rr_lock);
}
void
rrw_enter_read(rrwlock_t *rrl, void *tag)
{
rrw_enter_read_impl(rrl, B_FALSE, tag);
}
/*
* take a read lock even if there are pending write lock requests. if we want
* to take a lock reentrantly, but from different threads (that have a
* relationship to each other), the normal detection mechanism to overrule
* the pending writer does not work, so we have to give an explicit hint here.
*/
void
rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
{
rrw_enter_read_impl(rrl, B_TRUE, tag);
}
void
rrw_enter_write(rrwlock_t *rrl)
{

View File

@ -152,6 +152,7 @@ void dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_mos_diduse_space(dsl_pool_t *dp,
int64_t used, int64_t comp, int64_t uncomp);
void dsl_pool_config_enter(dsl_pool_t *dp, void *tag);
void dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag);
void dsl_pool_config_exit(dsl_pool_t *dp, void *tag);
boolean_t dsl_pool_config_held(dsl_pool_t *dp);
boolean_t dsl_pool_config_held_writer(dsl_pool_t *dp);

View File

@ -69,6 +69,7 @@ void rrw_init(rrwlock_t *rrl, boolean_t track_all);
void rrw_destroy(rrwlock_t *rrl);
void rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag);
void rrw_enter_read(rrwlock_t *rrl, void *tag);
void rrw_enter_read_prio(rrwlock_t *rrl, void *tag);
void rrw_enter_write(rrwlock_t *rrl);
void rrw_exit(rrwlock_t *rrl, void *tag);
boolean_t rrw_held(rrwlock_t *rrl, krw_t rw);