mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-15 10:17:20 +00:00
- Allow witness_sleep() to be called when witness hasn't been initialized
yet. We just return without performing any checks. - Don't explicitly enter and exit critical sections when walking lock lists. We don't need a critical section to walk the list of sleep locks for a thread. We check to see if a spin lock list is empty before we walk it. If the list is empty we don't need to walk it. If it isn't then we already hold at least one spin lock and are already in a critical section and thus don't need our own explicit critical section.
This commit is contained in:
parent
42e498655d
commit
bbd296aba6
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=97006
@ -882,14 +882,9 @@ witness_sleep(int check_only, struct lock_object *lock, const char *file,
|
||||
struct thread *td;
|
||||
int i, n;
|
||||
|
||||
if (witness_dead || panicstr != NULL)
|
||||
if (witness_cold || witness_dead || panicstr != NULL)
|
||||
return (0);
|
||||
KASSERT(!witness_cold, ("%s: witness_cold", __func__));
|
||||
n = 0;
|
||||
/*
|
||||
* Preemption bad because we need PCPU_PTR(spinlocks) to not change.
|
||||
*/
|
||||
critical_enter();
|
||||
td = curthread;
|
||||
lock_list = &td->td_sleeplocks;
|
||||
again:
|
||||
@ -916,7 +911,11 @@ witness_sleep(int check_only, struct lock_object *lock, const char *file,
|
||||
lock1->li_lock->lo_name, lock1->li_file,
|
||||
lock1->li_line);
|
||||
}
|
||||
if (lock_list == &td->td_sleeplocks) {
|
||||
if (lock_list == &td->td_sleeplocks && PCPU_GET(spinlocks) != NULL) {
|
||||
/*
|
||||
* Since we already hold a spinlock preemption is
|
||||
* already blocked.
|
||||
*/
|
||||
lock_list = PCPU_PTR(spinlocks);
|
||||
goto again;
|
||||
}
|
||||
@ -924,7 +923,6 @@ witness_sleep(int check_only, struct lock_object *lock, const char *file,
|
||||
if (witness_ddb && n)
|
||||
Debugger(__func__);
|
||||
#endif /* DDB */
|
||||
critical_exit();
|
||||
return (n);
|
||||
}
|
||||
|
||||
@ -1346,15 +1344,9 @@ witness_list(struct thread *td)
|
||||
* out from under us. It is probably best to just not try to handle
|
||||
* threads on other CPU's for now.
|
||||
*/
|
||||
if (td == curthread) {
|
||||
/*
|
||||
* Preemption bad because we need PCPU_PTR(spinlocks) to not
|
||||
* change.
|
||||
*/
|
||||
critical_enter();
|
||||
if (td == curthread && PCPU_GET(spinlocks) != NULL)
|
||||
nheld += witness_list_locks(PCPU_PTR(spinlocks));
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
return (nheld);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user