mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-11 09:50:12 +00:00
Fix some bugs related to adaptive spinning:
In the lockmgr support: - GIANT_RESTORE() is just called when the sleep finishes, so the current code can ends up into a giant unlock problem. Fix it by appropriately call GIANT_RESTORE() when needed. Note that this is not exactly ideal because for any interation of the adaptive spinning we drop and restore Giant, but the overhead should be not a factor. - In the lock held in exclusive mode case, after the adaptive spinning is brought to completition, we should just retry to acquire the lock instead to fallthrough. Fix that. - Fix a style nit In the sx support: - Call GIANT_SAVE() before than looping. This saves some overhead because in the current code GIANT_SAVE() is called several times. Tested by: Giovanni Trematerra <giovanni dot trematerra at gmail dot com>
This commit is contained in:
parent
c8e648e167
commit
8d3635c4db
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=196772
@ -467,7 +467,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
/*
|
||||
* If the owner is running on another CPU, spin until
|
||||
* the owner stops running or the state of the lock
|
||||
* changes.
|
||||
* changes. We need a double-state handle here
|
||||
* because for a failed acquisition the lock can be
|
||||
* either held in exclusive mode or shared mode
|
||||
* (for the writer starvation avoidance technique).
|
||||
*/
|
||||
if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
|
||||
LK_HOLDER(x) != LK_KERNPROC) {
|
||||
@ -491,8 +494,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
while (LK_HOLDER(lk->lk_lock) ==
|
||||
(uintptr_t)owner && TD_IS_RUNNING(owner))
|
||||
cpu_spinwait();
|
||||
GIANT_RESTORE();
|
||||
continue;
|
||||
} else if (LK_CAN_ADAPT(lk, flags) &&
|
||||
(x & LK_SHARE) !=0 && LK_SHARERS(x) &&
|
||||
(x & LK_SHARE) != 0 && LK_SHARERS(x) &&
|
||||
spintries < alk_retries) {
|
||||
if (flags & LK_INTERLOCK) {
|
||||
class->lc_unlock(ilk);
|
||||
@ -511,6 +516,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
break;
|
||||
cpu_spinwait();
|
||||
}
|
||||
GIANT_RESTORE();
|
||||
if (i != alk_loops)
|
||||
continue;
|
||||
}
|
||||
@ -704,6 +710,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
while (LK_HOLDER(lk->lk_lock) ==
|
||||
(uintptr_t)owner && TD_IS_RUNNING(owner))
|
||||
cpu_spinwait();
|
||||
GIANT_RESTORE();
|
||||
continue;
|
||||
} else if (LK_CAN_ADAPT(lk, flags) &&
|
||||
(x & LK_SHARE) != 0 && LK_SHARERS(x) &&
|
||||
spintries < alk_retries) {
|
||||
@ -727,6 +735,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
|
||||
break;
|
||||
cpu_spinwait();
|
||||
}
|
||||
GIANT_RESTORE();
|
||||
if (i != alk_loops)
|
||||
continue;
|
||||
}
|
||||
|
@ -531,13 +531,13 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
|
||||
continue;
|
||||
}
|
||||
} else if (SX_SHARERS(x) && spintries < asx_retries) {
|
||||
GIANT_SAVE();
|
||||
spintries++;
|
||||
for (i = 0; i < asx_loops; i++) {
|
||||
if (LOCK_LOG_TEST(&sx->lock_object, 0))
|
||||
CTR4(KTR_LOCK,
|
||||
"%s: shared spinning on %p with %u and %u",
|
||||
__func__, sx, spintries, i);
|
||||
GIANT_SAVE();
|
||||
x = sx->sx_lock;
|
||||
if ((x & SX_LOCK_SHARED) == 0 ||
|
||||
SX_SHARERS(x) == 0)
|
||||
|
Loading…
Reference in New Issue
Block a user