Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tcmur_device: rename lock names in tcmur_device struct #671

Merged
merged 2 commits into from
Sep 14, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions alua.c
Original file line number Diff line number Diff line change
Expand Up @@ -432,12 +432,12 @@ static int alua_sync_state(struct tcmu_device *dev,
* the first command sent to us so clear
* lock state to avoid later blacklist errors.
*/
pthread_mutex_lock(&rdev->state_lock);
pthread_mutex_lock(&rdev->rdev_lock);
if (rdev->lock_state == TCMUR_DEV_LOCK_WRITE_LOCKED) {
tcmu_dev_dbg(dev, "Dropping lock\n");
rdev->lock_state = TCMUR_DEV_LOCK_UNLOCKED;
}
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);
}
}

Expand Down Expand Up @@ -560,7 +560,7 @@ int alua_implicit_transition(struct tcmu_device *dev, struct tcmulib_cmd *cmd,
if (!lock_is_required(dev))
return ret;

pthread_mutex_lock(&rdev->state_lock);
pthread_mutex_lock(&rdev->rdev_lock);
if (rdev->lock_state == TCMUR_DEV_LOCK_WRITE_LOCKED) {
/* For both read/write cases in this state is good */
goto done;
Expand Down Expand Up @@ -617,7 +617,7 @@ int alua_implicit_transition(struct tcmu_device *dev, struct tcmulib_cmd *cmd,
}

done:
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);
return ret;
}

Expand Down
50 changes: 25 additions & 25 deletions main.c
Original file line number Diff line number Diff line change
Expand Up @@ -615,14 +615,14 @@ static void tcmur_stop_device(void *arg)
struct tcmur_device *rdev = tcmu_dev_get_private(dev);
bool is_open = false;

pthread_mutex_lock(&rdev->state_lock);
pthread_mutex_lock(&rdev->rdev_lock);
/* check if this was already called due to thread cancelation */
if (rdev->flags & TCMUR_DEV_FLAG_STOPPED) {
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);
return;
}
rdev->flags |= TCMUR_DEV_FLAG_STOPPING;
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);

/*
* The lock thread can fire off the recovery thread, so make sure
Expand All @@ -633,19 +633,19 @@ static void tcmur_stop_device(void *arg)

tcmu_release_dev_lock(dev);

pthread_mutex_lock(&rdev->state_lock);
pthread_mutex_lock(&rdev->rdev_lock);
if (rdev->flags & TCMUR_DEV_FLAG_IS_OPEN) {
rdev->flags &= ~TCMUR_DEV_FLAG_IS_OPEN;
is_open = true;
}
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);

if (is_open)
rhandler->close(dev);

pthread_mutex_lock(&rdev->state_lock);
pthread_mutex_lock(&rdev->rdev_lock);
rdev->flags |= TCMUR_DEV_FLAG_STOPPED;
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);

tcmu_dev_dbg(dev, "cmdproc cleanup done\n");
}
Expand Down Expand Up @@ -681,7 +681,7 @@ static bool get_next_cmd_timeout(struct tcmu_device *dev,

memset(tmo, 0, sizeof(*tmo));

pthread_spin_lock(&rdev->lock);
pthread_spin_lock(&rdev->cmds_list_lock);
list_for_each(&rdev->cmds_list, tcmur_cmd, cmds_list_entry) {
if (tcmur_cmd->timed_out)
continue;
Expand All @@ -705,7 +705,7 @@ static bool get_next_cmd_timeout(struct tcmu_device *dev,
(intmax_t)curr_time->tv_sec, (intmax_t)tcmur_cmd->start_time.tv_sec);
break;
}
pthread_spin_unlock(&rdev->lock);
pthread_spin_unlock(&rdev->cmds_list_lock);

return has_timeout;
}
Expand All @@ -728,7 +728,7 @@ static void check_for_timed_out_cmds(struct tcmu_device *dev)
if (tcmur_get_time(dev, &curr_time))
return;

pthread_spin_lock(&rdev->lock);
pthread_spin_lock(&rdev->cmds_list_lock);
list_for_each(&rdev->cmds_list, tcmur_cmd, cmds_list_entry) {
if (tcmur_cmd->timed_out)
continue;
Expand Down Expand Up @@ -758,7 +758,7 @@ static void check_for_timed_out_cmds(struct tcmu_device *dev)
*/
tcmu_notify_cmd_timed_out(dev);
}
pthread_spin_unlock(&rdev->lock);
pthread_spin_unlock(&rdev->cmds_list_lock);
}

static void tcmur_tcmulib_cmd_start(struct tcmu_device *dev,
Expand All @@ -775,9 +775,9 @@ static void tcmur_tcmulib_cmd_start(struct tcmu_device *dev,
if (rdev->cmd_time_out) {
tcmur_cmd->start_time.tv_sec = curr_time->tv_sec;

pthread_spin_lock(&rdev->lock);
pthread_spin_lock(&rdev->cmds_list_lock);
list_add_tail(&rdev->cmds_list, &tcmur_cmd->cmds_list_entry);
pthread_spin_unlock(&rdev->lock);
pthread_spin_unlock(&rdev->cmds_list_lock);
}
}

Expand Down Expand Up @@ -872,10 +872,10 @@ static void *tcmur_cmdproc_thread(void *arg)
* requests that LIO has completed. We only need to wait for replies
* for outstanding requests so throttle the cmdproc thread now.
*/
pthread_mutex_lock(&rdev->state_lock);
pthread_mutex_lock(&rdev->rdev_lock);
if (rdev->flags & TCMUR_DEV_FLAG_STOPPING)
dev_stopping = true;
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);
}

/*
Expand Down Expand Up @@ -1020,7 +1020,7 @@ static int dev_added(struct tcmu_device *dev)
tcmu_dev_dbg(dev, "Got block_size %d, size in bytes %"PRId64"\n",
block_size, dev_size);

ret = pthread_spin_init(&rdev->lock, 0);
ret = pthread_spin_init(&rdev->cmds_list_lock, 0);
if (ret) {
ret = -ret;
goto free_rdev;
Expand All @@ -1038,15 +1038,15 @@ static int dev_added(struct tcmu_device *dev)
goto cleanup_caw_lock;
}

ret = pthread_mutex_init(&rdev->state_lock, NULL);
ret = pthread_mutex_init(&rdev->rdev_lock, NULL);
if (ret) {
ret = -ret;
goto cleanup_format_lock;
}

ret = setup_io_work_queue(dev);
if (ret < 0)
goto cleanup_state_lock;
goto cleanup_rdev_lock;

ret = setup_aio_tracking(rdev);
if (ret < 0)
Expand Down Expand Up @@ -1088,14 +1088,14 @@ static int dev_added(struct tcmu_device *dev)
cleanup_aio_tracking(rdev);
cleanup_io_work_queue:
cleanup_io_work_queue(dev, true);
cleanup_state_lock:
pthread_mutex_destroy(&rdev->state_lock);
cleanup_rdev_lock:
pthread_mutex_destroy(&rdev->rdev_lock);
cleanup_format_lock:
pthread_mutex_destroy(&rdev->format_lock);
cleanup_caw_lock:
pthread_mutex_destroy(&rdev->caw_lock);
cleanup_dev_lock:
pthread_spin_destroy(&rdev->lock);
pthread_spin_destroy(&rdev->cmds_list_lock);
free_rdev:
free(rdev);
return ret;
Expand All @@ -1106,9 +1106,9 @@ static void dev_removed(struct tcmu_device *dev)
struct tcmur_device *rdev = tcmu_dev_get_private(dev);
int ret;

pthread_mutex_lock(&rdev->state_lock);
pthread_mutex_lock(&rdev->rdev_lock);
rdev->flags |= TCMUR_DEV_FLAG_STOPPING;
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);

/*
* The order of cleaning up worker threads and calling ->removed()
Expand All @@ -1130,7 +1130,7 @@ static void dev_removed(struct tcmu_device *dev)

tcmur_destroy_work(rdev->event_work);

ret = pthread_mutex_destroy(&rdev->state_lock);
ret = pthread_mutex_destroy(&rdev->rdev_lock);
if (ret != 0)
tcmu_err("could not cleanup state lock %d\n", ret);

Expand All @@ -1142,7 +1142,7 @@ static void dev_removed(struct tcmu_device *dev)
if (ret != 0)
tcmu_err("could not cleanup caw lock %d\n", ret);

ret = pthread_spin_destroy(&rdev->lock);
ret = pthread_spin_destroy(&rdev->cmds_list_lock);
if (ret != 0)
tcmu_err("could not cleanup mailbox lock %d\n", ret);

Expand Down
2 changes: 1 addition & 1 deletion target.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
static struct list_head tpg_recovery_list = LIST_HEAD_INIT(tpg_recovery_list);
/*
* Locking ordering:
* rdev->state_lock
* rdev->rdev_lock
* tpg_recovery_lock
*/
static pthread_mutex_t tpg_recovery_lock = PTHREAD_MUTEX_INITIALIZER;
Expand Down
14 changes: 7 additions & 7 deletions tcmur_cmd_handler.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ void tcmur_tcmulib_cmd_complete(struct tcmu_device *dev,
struct tcmur_cmd *tcmur_cmd = cmd->hm_private;
struct timespec curr_time;

pthread_cleanup_push(_cleanup_spin_lock, (void *)&rdev->lock);
pthread_spin_lock(&rdev->lock);
pthread_cleanup_push(_cleanup_spin_lock, (void *)&rdev->cmds_list_lock);
pthread_spin_lock(&rdev->cmds_list_lock);

if (tcmur_cmd->timed_out) {
if (tcmur_get_time(dev, &curr_time)) {
Expand All @@ -60,7 +60,7 @@ void tcmur_tcmulib_cmd_complete(struct tcmu_device *dev,

tcmulib_command_complete(dev, cmd, rc);

pthread_spin_unlock(&rdev->lock);
pthread_spin_unlock(&rdev->cmds_list_lock);
pthread_cleanup_pop(0);
}

Expand Down Expand Up @@ -2329,9 +2329,9 @@ void tcmur_set_pending_ua(struct tcmu_device *dev, int ua)
{
struct tcmur_device *rdev = tcmu_dev_get_private(dev);

pthread_mutex_lock(&rdev->state_lock);
pthread_mutex_lock(&rdev->rdev_lock);
rdev->pending_uas |= (1 << ua);
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);
}

/*
Expand All @@ -2348,7 +2348,7 @@ static int handle_pending_ua(struct tcmur_device *rdev, struct tcmulib_cmd *cmd)
/* The kernel will handle REPORT_LUNS */
return TCMU_STS_NOT_HANDLED;
}
pthread_mutex_lock(&rdev->state_lock);
pthread_mutex_lock(&rdev->rdev_lock);

if (!rdev->pending_uas) {
ret = TCMU_STS_NOT_HANDLED;
Expand All @@ -2364,7 +2364,7 @@ static int handle_pending_ua(struct tcmur_device *rdev, struct tcmulib_cmd *cmd)
rdev->pending_uas &= ~(1 << ua);

unlock:
pthread_mutex_unlock(&rdev->state_lock);
pthread_mutex_unlock(&rdev->rdev_lock);
return ret;
}

Expand Down
Loading