diff --git a/alua.c b/alua.c index 096b6462..656a28f0 100644 --- a/alua.c +++ b/alua.c @@ -432,12 +432,12 @@ static int alua_sync_state(struct tcmu_device *dev, * the first command sent to us so clear * lock state to avoid later blacklist errors. */ - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->lock_state == TCMUR_DEV_LOCK_WRITE_LOCKED) { tcmu_dev_dbg(dev, "Dropping lock\n"); rdev->lock_state = TCMUR_DEV_LOCK_UNLOCKED; } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); } } @@ -560,7 +560,7 @@ int alua_implicit_transition(struct tcmu_device *dev, struct tcmulib_cmd *cmd, if (!lock_is_required(dev)) return ret; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->lock_state == TCMUR_DEV_LOCK_WRITE_LOCKED) { /* For both read/write cases in this state is good */ goto done; @@ -617,7 +617,7 @@ int alua_implicit_transition(struct tcmu_device *dev, struct tcmulib_cmd *cmd, } done: - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return ret; } diff --git a/main.c b/main.c index 44a08e01..70b42335 100644 --- a/main.c +++ b/main.c @@ -615,14 +615,14 @@ static void tcmur_stop_device(void *arg) struct tcmur_device *rdev = tcmu_dev_get_private(dev); bool is_open = false; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); /* check if this was already called due to thread cancelation */ if (rdev->flags & TCMUR_DEV_FLAG_STOPPED) { - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return; } rdev->flags |= TCMUR_DEV_FLAG_STOPPING; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); /* * The lock thread can fire off the recovery thread, so make sure @@ -633,19 +633,19 @@ static void tcmur_stop_device(void *arg) tcmu_release_dev_lock(dev); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->flags & TCMUR_DEV_FLAG_IS_OPEN) { rdev->flags &= ~TCMUR_DEV_FLAG_IS_OPEN; is_open = true; } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); if (is_open) rhandler->close(dev); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); rdev->flags |= TCMUR_DEV_FLAG_STOPPED; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); tcmu_dev_dbg(dev, "cmdproc cleanup done\n"); } @@ -681,7 +681,7 @@ static bool get_next_cmd_timeout(struct tcmu_device *dev, memset(tmo, 0, sizeof(*tmo)); - pthread_spin_lock(&rdev->lock); + pthread_spin_lock(&rdev->cmds_list_lock); list_for_each(&rdev->cmds_list, tcmur_cmd, cmds_list_entry) { if (tcmur_cmd->timed_out) continue; @@ -705,7 +705,7 @@ static bool get_next_cmd_timeout(struct tcmu_device *dev, (intmax_t)curr_time->tv_sec, (intmax_t)tcmur_cmd->start_time.tv_sec); break; } - pthread_spin_unlock(&rdev->lock); + pthread_spin_unlock(&rdev->cmds_list_lock); return has_timeout; } @@ -728,7 +728,7 @@ static void check_for_timed_out_cmds(struct tcmu_device *dev) if (tcmur_get_time(dev, &curr_time)) return; - pthread_spin_lock(&rdev->lock); + pthread_spin_lock(&rdev->cmds_list_lock); list_for_each(&rdev->cmds_list, tcmur_cmd, cmds_list_entry) { if (tcmur_cmd->timed_out) continue; @@ -758,7 +758,7 @@ static void check_for_timed_out_cmds(struct tcmu_device *dev) */ tcmu_notify_cmd_timed_out(dev); } - pthread_spin_unlock(&rdev->lock); + pthread_spin_unlock(&rdev->cmds_list_lock); } static void tcmur_tcmulib_cmd_start(struct tcmu_device *dev, @@ -775,9 +775,9 @@ static void tcmur_tcmulib_cmd_start(struct tcmu_device *dev, if (rdev->cmd_time_out) { tcmur_cmd->start_time.tv_sec = curr_time->tv_sec; - pthread_spin_lock(&rdev->lock); + pthread_spin_lock(&rdev->cmds_list_lock); list_add_tail(&rdev->cmds_list, &tcmur_cmd->cmds_list_entry); - pthread_spin_unlock(&rdev->lock); + pthread_spin_unlock(&rdev->cmds_list_lock); } } @@ -872,10 +872,10 @@ static void *tcmur_cmdproc_thread(void *arg) * requests that LIO has completed. We only need to wait for replies * for outstanding requests so throttle the cmdproc thread now. */ - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->flags & TCMUR_DEV_FLAG_STOPPING) dev_stopping = true; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); } /* @@ -1020,7 +1020,7 @@ static int dev_added(struct tcmu_device *dev) tcmu_dev_dbg(dev, "Got block_size %d, size in bytes %"PRId64"\n", block_size, dev_size); - ret = pthread_spin_init(&rdev->lock, 0); + ret = pthread_spin_init(&rdev->cmds_list_lock, 0); if (ret) { ret = -ret; goto free_rdev; @@ -1038,7 +1038,7 @@ static int dev_added(struct tcmu_device *dev) goto cleanup_caw_lock; } - ret = pthread_mutex_init(&rdev->state_lock, NULL); + ret = pthread_mutex_init(&rdev->rdev_lock, NULL); if (ret) { ret = -ret; goto cleanup_format_lock; @@ -1046,7 +1046,7 @@ static int dev_added(struct tcmu_device *dev) ret = setup_io_work_queue(dev); if (ret < 0) - goto cleanup_state_lock; + goto cleanup_rdev_lock; ret = setup_aio_tracking(rdev); if (ret < 0) @@ -1088,14 +1088,14 @@ static int dev_added(struct tcmu_device *dev) cleanup_aio_tracking(rdev); cleanup_io_work_queue: cleanup_io_work_queue(dev, true); -cleanup_state_lock: - pthread_mutex_destroy(&rdev->state_lock); +cleanup_rdev_lock: + pthread_mutex_destroy(&rdev->rdev_lock); cleanup_format_lock: pthread_mutex_destroy(&rdev->format_lock); cleanup_caw_lock: pthread_mutex_destroy(&rdev->caw_lock); cleanup_dev_lock: - pthread_spin_destroy(&rdev->lock); + pthread_spin_destroy(&rdev->cmds_list_lock); free_rdev: free(rdev); return ret; @@ -1106,9 +1106,9 @@ static void dev_removed(struct tcmu_device *dev) struct tcmur_device *rdev = tcmu_dev_get_private(dev); int ret; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); rdev->flags |= TCMUR_DEV_FLAG_STOPPING; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); /* * The order of cleaning up worker threads and calling ->removed() @@ -1130,7 +1130,7 @@ static void dev_removed(struct tcmu_device *dev) tcmur_destroy_work(rdev->event_work); - ret = pthread_mutex_destroy(&rdev->state_lock); + ret = pthread_mutex_destroy(&rdev->rdev_lock); if (ret != 0) tcmu_err("could not cleanup state lock %d\n", ret); @@ -1142,7 +1142,7 @@ static void dev_removed(struct tcmu_device *dev) if (ret != 0) tcmu_err("could not cleanup caw lock %d\n", ret); - ret = pthread_spin_destroy(&rdev->lock); + ret = pthread_spin_destroy(&rdev->cmds_list_lock); if (ret != 0) tcmu_err("could not cleanup mailbox lock %d\n", ret); diff --git a/target.c b/target.c index 69c91120..51a0d97c 100644 --- a/target.c +++ b/target.c @@ -29,7 +29,7 @@ static struct list_head tpg_recovery_list = LIST_HEAD_INIT(tpg_recovery_list); /* * Locking ordering: - * rdev->state_lock + * rdev->rdev_lock * tpg_recovery_lock */ static pthread_mutex_t tpg_recovery_lock = PTHREAD_MUTEX_INITIALIZER; diff --git a/tcmur_cmd_handler.c b/tcmur_cmd_handler.c index 2b97fea7..01d6a109 100644 --- a/tcmur_cmd_handler.c +++ b/tcmur_cmd_handler.c @@ -40,8 +40,8 @@ void tcmur_tcmulib_cmd_complete(struct tcmu_device *dev, struct tcmur_cmd *tcmur_cmd = cmd->hm_private; struct timespec curr_time; - pthread_cleanup_push(_cleanup_spin_lock, (void *)&rdev->lock); - pthread_spin_lock(&rdev->lock); + pthread_cleanup_push(_cleanup_spin_lock, (void *)&rdev->cmds_list_lock); + pthread_spin_lock(&rdev->cmds_list_lock); if (tcmur_cmd->timed_out) { if (tcmur_get_time(dev, &curr_time)) { @@ -60,7 +60,7 @@ void tcmur_tcmulib_cmd_complete(struct tcmu_device *dev, tcmulib_command_complete(dev, cmd, rc); - pthread_spin_unlock(&rdev->lock); + pthread_spin_unlock(&rdev->cmds_list_lock); pthread_cleanup_pop(0); } @@ -2329,9 +2329,9 @@ void tcmur_set_pending_ua(struct tcmu_device *dev, int ua) { struct tcmur_device *rdev = tcmu_dev_get_private(dev); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); rdev->pending_uas |= (1 << ua); - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); } /* @@ -2348,7 +2348,7 @@ static int handle_pending_ua(struct tcmur_device *rdev, struct tcmulib_cmd *cmd) /* The kernel will handle REPORT_LUNS */ return TCMU_STS_NOT_HANDLED; } - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (!rdev->pending_uas) { ret = TCMU_STS_NOT_HANDLED; @@ -2364,7 +2364,7 @@ static int handle_pending_ua(struct tcmur_device *rdev, struct tcmulib_cmd *cmd) rdev->pending_uas &= ~(1 << ua); unlock: - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return ret; } diff --git a/tcmur_device.c b/tcmur_device.c index 562e6950..09b29ba9 100644 --- a/tcmur_device.c +++ b/tcmur_device.c @@ -29,10 +29,10 @@ bool tcmu_dev_in_recovery(struct tcmu_device *dev) struct tcmur_device *rdev = tcmu_dev_get_private(dev); int in_recov = false; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->flags & TCMUR_DEV_FLAG_IN_RECOVERY) in_recov = true; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return in_recov; } @@ -46,12 +46,12 @@ int __tcmu_reopen_dev(struct tcmu_device *dev, int retries) int ret, attempt = 0; bool needs_close = false; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->flags & TCMUR_DEV_FLAG_STOPPING) { ret = 0; goto done; } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); /* * There are no SCSI commands running but there may be @@ -60,11 +60,11 @@ int __tcmu_reopen_dev(struct tcmu_device *dev, int retries) */ tcmur_flush_work(rdev->event_work); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->flags & TCMUR_DEV_FLAG_IS_OPEN) needs_close = true; rdev->flags &= ~TCMUR_DEV_FLAG_IS_OPEN; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); if (pthread_self() != rdev->cmdproc_thread) /* @@ -77,7 +77,7 @@ int __tcmu_reopen_dev(struct tcmu_device *dev, int retries) tcmu_dev_dbg(dev, "Waiting for outstanding commands to complete\n"); ret = aio_wait_for_empty_queue(rdev); if (ret) { - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); goto done; } @@ -86,11 +86,11 @@ int __tcmu_reopen_dev(struct tcmu_device *dev, int retries) rhandler->close(dev); } - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); ret = -EIO; while (ret != 0 && !(rdev->flags & TCMUR_DEV_FLAG_STOPPING) && (retries < 0 || attempt <= retries)) { - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); tcmu_dev_dbg(dev, "Opening device. Attempt %d\n", attempt); ret = rhandler->open(dev, true); @@ -99,7 +99,7 @@ int __tcmu_reopen_dev(struct tcmu_device *dev, int retries) sleep(1); } - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (!ret) { rdev->flags |= TCMUR_DEV_FLAG_IS_OPEN; rdev->lock_lost = false; @@ -109,7 +109,7 @@ int __tcmu_reopen_dev(struct tcmu_device *dev, int retries) done: rdev->flags &= ~TCMUR_DEV_FLAG_IN_RECOVERY; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return ret; } @@ -123,13 +123,13 @@ int tcmu_reopen_dev(struct tcmu_device *dev, int retries) { struct tcmur_device *rdev = tcmu_dev_get_private(dev); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->flags & TCMUR_DEV_FLAG_IN_RECOVERY) { - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return -EBUSY; } rdev->flags |= TCMUR_DEV_FLAG_IN_RECOVERY; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return __tcmu_reopen_dev(dev, retries); } @@ -144,13 +144,13 @@ void tcmu_cancel_recovery(struct tcmu_device *dev) * handlers to fail/complete normally to avoid a segfault. */ tcmu_dev_dbg(dev, "Waiting on recovery thread\n"); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); while (rdev->flags & TCMUR_DEV_FLAG_IN_RECOVERY) { - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); sleep(1); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); tcmu_dev_dbg(dev, "Recovery thread wait done\n"); } @@ -167,11 +167,11 @@ static void __tcmu_report_event(void *data) */ sleep(1); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); ret = rhandler->report_event(dev); if (ret) tcmu_dev_err(dev, "Could not report events. Error %d.\n", ret); - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); } static void tcmu_report_event(struct tcmu_device *dev) @@ -236,9 +236,9 @@ void tcmu_notify_conn_lost(struct tcmu_device *dev) struct tcmur_device *rdev = tcmu_dev_get_private(dev); bool report; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); report =__tcmu_notify_conn_lost(dev); - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); if (report) tcmu_report_event(dev); @@ -267,7 +267,7 @@ void tcmu_notify_lock_lost(struct tcmu_device *dev) { struct tcmur_device *rdev = tcmu_dev_get_private(dev); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); tcmu_dev_warn(dev, "Async lock drop. Old state %d\n", rdev->lock_state); /* * We could be getting stale IO completions. If we are trying to @@ -276,7 +276,7 @@ void tcmu_notify_lock_lost(struct tcmu_device *dev) if (rdev->lock_state != TCMUR_DEV_LOCK_WRITE_LOCKING) { __tcmu_notify_lock_lost(dev); } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); } void tcmu_release_dev_lock(struct tcmu_device *dev) @@ -285,20 +285,20 @@ void tcmu_release_dev_lock(struct tcmu_device *dev) struct tcmur_device *rdev = tcmu_dev_get_private(dev); int ret; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->lock_state != TCMUR_DEV_LOCK_WRITE_LOCKED) { - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return; } if (!(rdev->flags & TCMUR_DEV_FLAG_IS_OPEN)) { tcmu_dev_dbg(dev, "Device is closed so unlock is not needed\n"); rdev->lock_state = TCMUR_DEV_LOCK_UNLOCKED; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return; } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); ret = rhandler->unlock(dev); if (ret != TCMU_STS_OK) @@ -309,9 +309,9 @@ void tcmu_release_dev_lock(struct tcmu_device *dev) * to unlocked to prevent new IO from executing in case the lock * is in a state where it cannot be fenced. */ - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); rdev->lock_state = TCMUR_DEV_LOCK_UNLOCKED; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); } int tcmu_get_lock_tag(struct tcmu_device *dev, uint16_t *tag) @@ -323,16 +323,16 @@ int tcmu_get_lock_tag(struct tcmu_device *dev, uint16_t *tag) if (rdev->failover_type != TCMUR_DEV_FAILOVER_EXPLICIT) return 0; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (!(rdev->flags & TCMUR_DEV_FLAG_IS_OPEN)) { /* * Return tmp error until the recovery thread is able to * start up. */ - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return TCMU_STS_BUSY; } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); retry: ret = rhandler->get_lock_tag(dev, tag); @@ -415,10 +415,10 @@ int tcmu_acquire_dev_lock(struct tcmu_device *dev, uint16_t tag) */ reopen = false; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->lock_lost || !(rdev->flags & TCMUR_DEV_FLAG_IS_OPEN)) reopen = true; - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); retry: tcmu_dev_dbg(dev, "lock call state %d retries %d. tag %hu reopen %d\n", @@ -436,13 +436,13 @@ int tcmu_acquire_dev_lock(struct tcmu_device *dev, uint16_t tag) } } - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (rdev->lock_state == TCMUR_DEV_LOCK_READ_LOCKING) { - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); ret = TCMU_STS_OK; goto done; } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); ret = rhandler->lock(dev, tag); if (ret == TCMU_STS_FENCED) { @@ -476,7 +476,7 @@ int tcmu_acquire_dev_lock(struct tcmu_device *dev, uint16_t tag) tcmu_dev_flush_ring(dev); /* TODO: set UA based on bgly's patches */ - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (ret != TCMU_STS_OK) { rdev->lock_state = TCMUR_DEV_LOCK_UNLOCKED; tcmu_dev_info(dev, "Lock acquisition unsuccessful\n"); @@ -498,7 +498,7 @@ int tcmu_acquire_dev_lock(struct tcmu_device *dev, uint16_t tag) tcmu_cfgfs_dev_exec_action(dev, "block_dev", 0); - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); return ret; } @@ -512,23 +512,23 @@ void tcmu_update_dev_lock_state(struct tcmu_device *dev) if (!rhandler->get_lock_state) return; - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); if (!(rdev->flags & TCMUR_DEV_FLAG_IS_OPEN)) { tcmu_dev_dbg(dev, "device closed.\n"); state = TCMUR_DEV_LOCK_UNKNOWN; goto check_state; } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); state = rhandler->get_lock_state(dev); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); check_state: if (rdev->lock_state == TCMUR_DEV_LOCK_WRITE_LOCKED && state != TCMUR_DEV_LOCK_WRITE_LOCKED) { tcmu_dev_dbg(dev, "Updated out of sync lock state.\n"); __tcmu_notify_lock_lost(dev); } - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); } void tcmur_dev_set_private(struct tcmu_device *dev, void *private) @@ -549,10 +549,10 @@ void tcmu_notify_cmd_timed_out(struct tcmu_device *dev) { struct tcmur_device *rdev = tcmu_dev_get_private(dev); - pthread_mutex_lock(&rdev->state_lock); + pthread_mutex_lock(&rdev->rdev_lock); rdev->cmd_timed_out_cnt++; __tcmu_notify_conn_lost(dev); - pthread_mutex_unlock(&rdev->state_lock); + pthread_mutex_unlock(&rdev->rdev_lock); tcmu_report_event(dev); } diff --git a/tcmur_device.h b/tcmur_device.h index 62151dad..cd37aabe 100644 --- a/tcmur_device.h +++ b/tcmur_device.h @@ -48,6 +48,9 @@ struct tcmur_device { pthread_t cmdproc_thread; + /* General lock for the members from "flags" to "pending_uas" */ + pthread_mutex_t rdev_lock; + /* TCMUR_DEV flags */ uint32_t flags; uint8_t failover_type; @@ -63,8 +66,6 @@ struct tcmur_device { bool lock_lost; uint8_t lock_state; - /* General lock for lock state, thread, dev state, etc */ - pthread_mutex_t state_lock; int pending_uas; /* @@ -75,13 +76,14 @@ struct tcmur_device { struct tcmu_io_queue work_queue; struct tcmu_track_aio track_queue; - pthread_spinlock_t lock; /* protects concurrent updates to mailbox */ pthread_mutex_t caw_lock; /* for atomic CAW operation */ uint32_t format_progress; pthread_mutex_t format_lock; /* for atomic format operations */ int cmd_time_out; + + pthread_spinlock_t cmds_list_lock; /* protects cmds_list */ struct list_head cmds_list; };