1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-19 10:53:58 +00:00

Really, no explicit checks against against lock_class_* object should be

done in consumers code: using locks properties is much more appropriate.
Fix current code doing these bogus checks.

Note: Really, callout are not usable by all !(LC_SPINLOCK | LC_SLEEPABLE)
primitives like rmlocks doesn't implement the generic lock layer
functions, but they can be equipped for this, so the check is still
valid.

Tested by: matteo, kris (earlier version)
Reviewed by: jhb
This commit is contained in:
Attilio Rao 2008-02-06 00:04:09 +00:00
parent 7a21dee3c2
commit 13ddf72de7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=176013
2 changed files with 4 additions and 4 deletions

View File

@ -639,8 +639,8 @@ _callout_init_lock(c, lock, flags)
("callout_init_lock: bad flags %d", flags));
KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
KASSERT(lock == NULL || LOCK_CLASS(lock) == &lock_class_mtx_sleep ||
LOCK_CLASS(lock) == &lock_class_rw, ("%s: invalid lock class",
KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
(LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
__func__));
c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
}

View File

@ -494,7 +494,7 @@ lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
if (lock_prof_skipcount &&
(++lock_prof_count % lock_prof_skipcount) != 0)
return;
spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
if (spin && lock_prof_skipspin == 1)
return;
l = lock_profile_object_lookup(lo, spin, file, line);
@ -523,7 +523,7 @@ lock_profile_release_lock(struct lock_object *lo)
if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
return;
spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
head = &curthread->td_lprof[spin];
critical_enter();
LIST_FOREACH(l, head, lpo_link)