1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-22 11:17:19 +00:00

Turn comments about locking into actual lock assertions.

Reviewed by:	ken
Tested by:	ken
MFC after:	1 month
This commit is contained in:
Edward Tomasz Napierala 2013-08-15 20:00:32 +00:00
parent cd234300d3
commit da4757e06b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=254378

View File

@ -1070,9 +1070,11 @@ ctl_init(void)
softc->emergency_pool = emergency_pool;
softc->othersc_pool = other_pool;
mtx_lock(&softc->ctl_lock);
ctl_pool_acquire(internal_pool);
ctl_pool_acquire(emergency_pool);
ctl_pool_acquire(other_pool);
mtx_unlock(&softc->ctl_lock);
/*
* We used to allocate a processor LUN here. The new scheme is to
@ -1088,10 +1090,12 @@ ctl_init(void)
"ctl_thrd");
if (error != 0) {
printf("error creating CTL work thread!\n");
mtx_lock(&softc->ctl_lock);
ctl_free_lun(lun);
ctl_pool_free(softc, internal_pool);
ctl_pool_free(softc, emergency_pool);
ctl_pool_free(softc, other_pool);
mtx_unlock(&softc->ctl_lock);
return (error);
}
printf("ctl: CAM Target Layer loaded\n");
@ -1372,7 +1376,6 @@ ctl_ioctl_offline(void *arg)
/*
* Remove an initiator by port number and initiator ID.
* Returns 0 for success, 1 for failure.
* Assumes the caller does NOT hold the CTL lock.
*/
int
ctl_remove_initiator(int32_t targ_port, uint32_t iid)
@ -1381,6 +1384,8 @@ ctl_remove_initiator(int32_t targ_port, uint32_t iid)
softc = control_softc;
mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
if ((targ_port < 0)
|| (targ_port > CTL_MAX_PORTS)) {
printf("%s: invalid port number %d\n", __func__, targ_port);
@ -1404,7 +1409,6 @@ ctl_remove_initiator(int32_t targ_port, uint32_t iid)
/*
* Add an initiator to the initiator map.
* Returns 0 for success, 1 for failure.
* Assumes the caller does NOT hold the CTL lock.
*/
int
ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
@ -1414,6 +1418,8 @@ ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
softc = control_softc;
mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
retval = 0;
if ((targ_port < 0)
@ -1970,7 +1976,6 @@ ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask)
}
/*
* Must be called with the ctl_lock held.
* Returns 0 for success, errno for failure.
*/
static int
@ -1982,6 +1987,8 @@ ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
retval = 0;
mtx_assert(&control_softc->ctl_lock, MA_OWNED);
for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
(*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
ooa_links)) {
@ -3395,12 +3402,12 @@ ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
return (retval);
}
/*
* Caller must hold ctl_softc->ctl_lock.
*/
int
ctl_pool_acquire(struct ctl_io_pool *pool)
{
mtx_assert(&control_softc->ctl_lock, MA_OWNED);
if (pool == NULL)
return (-EINVAL);
@ -3412,12 +3419,12 @@ ctl_pool_acquire(struct ctl_io_pool *pool)
return (0);
}
/*
* Caller must hold ctl_softc->ctl_lock.
*/
int
ctl_pool_invalidate(struct ctl_io_pool *pool)
{
mtx_assert(&control_softc->ctl_lock, MA_OWNED);
if (pool == NULL)
return (-EINVAL);
@ -3426,12 +3433,12 @@ ctl_pool_invalidate(struct ctl_io_pool *pool)
return (0);
}
/*
* Caller must hold ctl_softc->ctl_lock.
*/
int
ctl_pool_release(struct ctl_io_pool *pool)
{
mtx_assert(&control_softc->ctl_lock, MA_OWNED);
if (pool == NULL)
return (-EINVAL);
@ -3443,14 +3450,13 @@ ctl_pool_release(struct ctl_io_pool *pool)
return (0);
}
/*
* Must be called with ctl_softc->ctl_lock held.
*/
void
ctl_pool_free(struct ctl_softc *ctl_softc, struct ctl_io_pool *pool)
{
union ctl_io *cur_io, *next_io;
mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
cur_io != NULL; cur_io = next_io) {
next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
@ -4392,7 +4398,6 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
/*
* Delete a LUN.
* Assumptions:
* - caller holds ctl_softc->ctl_lock.
* - LUN has already been marked invalid and any pending I/O has been taken
* care of.
*/
@ -4409,6 +4414,8 @@ ctl_free_lun(struct ctl_lun *lun)
softc = lun->ctl_softc;
mtx_assert(&softc->ctl_lock, MA_OWNED);
STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
@ -9772,7 +9779,6 @@ ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io)
/*
* Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
* Assumptions:
* - caller holds ctl_lock
* - pending_io is generally either incoming, or on the blocked queue
* - starting I/O is the I/O we want to start the check with.
*/
@ -9783,6 +9789,8 @@ ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
union ctl_io *ooa_io;
ctl_action action;
mtx_assert(&control_softc->ctl_lock, MA_OWNED);
/*
* Run back along the OOA queue, starting with the current
* blocked I/O and going through every I/O before it on the
@ -9823,13 +9831,14 @@ ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
* Assumptions:
* - An I/O has just completed, and has been removed from the per-LUN OOA
* queue, so some items on the blocked queue may now be unblocked.
* - The caller holds ctl_softc->ctl_lock
*/
static int
ctl_check_blocked(struct ctl_lun *lun)
{
union ctl_io *cur_blocked, *next_blocked;
mtx_assert(&control_softc->ctl_lock, MA_OWNED);
/*
* Run forward from the head of the blocked queue, checking each
* entry against the I/Os prior to it on the OOA queue to see if
@ -10893,8 +10902,6 @@ ctl_abort_task(union ctl_io *io)
}
/*
* Assumptions: caller holds ctl_softc->ctl_lock
*
* This routine cannot block! It must be callable from an interrupt
* handler as well as from the work thread.
*/
@ -10903,6 +10910,8 @@ ctl_run_task_queue(struct ctl_softc *ctl_softc)
{
union ctl_io *io, *next_io;
mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
CTL_DEBUG_PRINT(("ctl_run_task_queue\n"));
for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->task_queue);
@ -11204,14 +11213,13 @@ ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
return (filtered_pattern);
}
/*
* Called with the CTL lock held.
*/
static void
ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
{
struct ctl_error_desc *desc, *desc2;
mtx_assert(&control_softc->ctl_lock, MA_OWNED);
STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
ctl_lun_error_pattern pattern;
/*
@ -11281,14 +11289,13 @@ ctl_datamove_timer_wakeup(void *arg)
}
#endif /* CTL_IO_DELAY */
/*
* Assumption: caller does NOT hold ctl_lock
*/
void
ctl_datamove(union ctl_io *io)
{
void (*fe_datamove)(union ctl_io *io);
mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
CTL_DEBUG_PRINT(("ctl_datamove\n"));
#ifdef CTL_TIME_IO
@ -12134,8 +12141,6 @@ ctl_datamove_remote_read(union ctl_io *io)
* first. Once that is complete, the data gets DMAed into the remote
* controller's memory. For reads, we DMA from the remote controller's
* memory into our memory first, and then move it out to the FETD.
*
* Should be called without the ctl_lock held.
*/
static void
ctl_datamove_remote(union ctl_io *io)
@ -12144,6 +12149,8 @@ ctl_datamove_remote(union ctl_io *io)
softc = control_softc;
mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
/*
* Note that we look for an aborted I/O here, but don't do some of
* the other checks that ctl_datamove() normally does. We don't