Skip to content

Commit 61fb248

Browse files
BStroessermartinkpetersen
authored andcommitted
scsi: target: tcmu: Userspace must not complete queued commands
When tcmu queues a new command - no matter whether in command ring or in qfull_queue - a cmd_id from IDR udev->commands is assigned to the command. If userspace sends a wrong command completion containing the cmd_id of a command on the qfull_queue, tcmu_handle_completions() finds the command in the IDR and calls tcmu_handle_completion() for it. This might do some nasty things because commands in qfull_queue do not have a valid dbi list. To fix this bug, we no longer add queued commands to the idr. Instead the cmd_id is assign when a command is written to the command ring. Due to this change I had to adapt the source code at several places where up to now an idr_for_each had been done. [mkp: fix checkpatch warnings] Link: https://lore.kernel.org/r/20200518164833.12775-1-bstroesser@ts.fujitsu.com Acked-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Bodo Stroesser <bstroesser@ts.fujitsu.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
1 parent 5482d56 commit 61fb248

File tree

1 file changed

+71
-83
lines changed

1 file changed

+71
-83
lines changed

drivers/target/target_core_user.c

Lines changed: 71 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -882,41 +882,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
882882
return command_size;
883883
}
884884

885-
static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
886-
struct timer_list *timer)
885+
static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
886+
struct timer_list *timer)
887887
{
888-
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
889-
int cmd_id;
890-
891-
if (tcmu_cmd->cmd_id)
892-
goto setup_timer;
893-
894-
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
895-
if (cmd_id < 0) {
896-
pr_err("tcmu: Could not allocate cmd id.\n");
897-
return cmd_id;
898-
}
899-
tcmu_cmd->cmd_id = cmd_id;
900-
901-
pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
902-
udev->name, tmo / MSEC_PER_SEC);
903-
904-
setup_timer:
905888
if (!tmo)
906-
return 0;
889+
return;
907890

908891
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
909892
if (!timer_pending(timer))
910893
mod_timer(timer, tcmu_cmd->deadline);
911894

912-
return 0;
895+
pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
896+
tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
913897
}
914898

915899
static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
916900
{
917901
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
918902
unsigned int tmo;
919-
int ret;
920903

921904
/*
922905
* For backwards compat if qfull_time_out is not set use
@@ -931,13 +914,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
931914
else
932915
tmo = TCMU_TIME_OUT;
933916

934-
ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
935-
if (ret)
936-
return ret;
917+
tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
937918

938919
list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
939-
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
940-
tcmu_cmd->cmd_id, udev->name);
920+
pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
921+
tcmu_cmd, udev->name);
941922
return 0;
942923
}
943924

@@ -959,7 +940,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
959940
struct tcmu_mailbox *mb;
960941
struct tcmu_cmd_entry *entry;
961942
struct iovec *iov;
962-
int iov_cnt, ret;
943+
int iov_cnt, cmd_id;
963944
uint32_t cmd_head;
964945
uint64_t cdb_off;
965946
bool copy_to_data_area;
@@ -1060,14 +1041,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
10601041
}
10611042
entry->req.iov_bidi_cnt = iov_cnt;
10621043

1063-
ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
1064-
&udev->cmd_timer);
1065-
if (ret) {
1066-
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1044+
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
1045+
if (cmd_id < 0) {
1046+
pr_err("tcmu: Could not allocate cmd id.\n");
10671047

1048+
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
10681049
*scsi_err = TCM_OUT_OF_RESOURCES;
10691050
return -1;
10701051
}
1052+
tcmu_cmd->cmd_id = cmd_id;
1053+
1054+
pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1055+
tcmu_cmd, udev->name);
1056+
1057+
tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1058+
10711059
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
10721060

10731061
/*
@@ -1279,50 +1267,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
12791267
return handled;
12801268
}
12811269

1282-
static int tcmu_check_expired_cmd(int id, void *p, void *data)
1270+
static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
12831271
{
1284-
struct tcmu_cmd *cmd = p;
1285-
struct tcmu_dev *udev = cmd->tcmu_dev;
1286-
u8 scsi_status;
12871272
struct se_cmd *se_cmd;
1288-
bool is_running;
1289-
1290-
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1291-
return 0;
12921273

12931274
if (!time_after(jiffies, cmd->deadline))
1294-
return 0;
1275+
return;
12951276

1296-
is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
1277+
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1278+
list_del_init(&cmd->queue_entry);
12971279
se_cmd = cmd->se_cmd;
1280+
cmd->se_cmd = NULL;
12981281

1299-
if (is_running) {
1300-
/*
1301-
* If cmd_time_out is disabled but qfull is set deadline
1302-
* will only reflect the qfull timeout. Ignore it.
1303-
*/
1304-
if (!udev->cmd_time_out)
1305-
return 0;
1282+
pr_debug("Timing out inflight cmd %u on dev %s.\n",
1283+
cmd->cmd_id, cmd->tcmu_dev->name);
13061284

1307-
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1308-
/*
1309-
* target_complete_cmd will translate this to LUN COMM FAILURE
1310-
*/
1311-
scsi_status = SAM_STAT_CHECK_CONDITION;
1312-
list_del_init(&cmd->queue_entry);
1313-
cmd->se_cmd = NULL;
1314-
} else {
1315-
list_del_init(&cmd->queue_entry);
1316-
idr_remove(&udev->commands, id);
1317-
tcmu_free_cmd(cmd);
1318-
scsi_status = SAM_STAT_TASK_SET_FULL;
1319-
}
1285+
target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1286+
}
13201287

1321-
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
1322-
id, udev->name, is_running ? "inflight" : "queued");
1288+
static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1289+
{
1290+
struct se_cmd *se_cmd;
13231291

1324-
target_complete_cmd(se_cmd, scsi_status);
1325-
return 0;
1292+
if (!time_after(jiffies, cmd->deadline))
1293+
return;
1294+
1295+
list_del_init(&cmd->queue_entry);
1296+
se_cmd = cmd->se_cmd;
1297+
tcmu_free_cmd(cmd);
1298+
1299+
pr_debug("Timing out queued cmd %p on dev %s.\n",
1300+
cmd, cmd->tcmu_dev->name);
1301+
1302+
target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
13261303
}
13271304

13281305
static void tcmu_device_timedout(struct tcmu_dev *udev)
@@ -1407,16 +1384,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
14071384
return &udev->se_dev;
14081385
}
14091386

1410-
static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
1387+
static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
14111388
{
14121389
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
14131390
LIST_HEAD(cmds);
1414-
bool drained = true;
14151391
sense_reason_t scsi_ret;
14161392
int ret;
14171393

14181394
if (list_empty(&udev->qfull_queue))
1419-
return true;
1395+
return;
14201396

14211397
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
14221398

@@ -1425,11 +1401,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
14251401
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
14261402
list_del_init(&tcmu_cmd->queue_entry);
14271403

1428-
pr_debug("removing cmd %u on dev %s from queue\n",
1429-
tcmu_cmd->cmd_id, udev->name);
1404+
pr_debug("removing cmd %p on dev %s from queue\n",
1405+
tcmu_cmd, udev->name);
14301406

14311407
if (fail) {
1432-
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
14331408
/*
14341409
* We were not able to even start the command, so
14351410
* fail with busy to allow a retry in case runner
@@ -1444,10 +1419,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
14441419

14451420
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
14461421
if (ret < 0) {
1447-
pr_debug("cmd %u on dev %s failed with %u\n",
1448-
tcmu_cmd->cmd_id, udev->name, scsi_ret);
1449-
1450-
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
1422+
pr_debug("cmd %p on dev %s failed with %u\n",
1423+
tcmu_cmd, udev->name, scsi_ret);
14511424
/*
14521425
* Ignore scsi_ret for now. target_complete_cmd
14531426
* drops it.
@@ -1462,13 +1435,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
14621435
* the queue
14631436
*/
14641437
list_splice_tail(&cmds, &udev->qfull_queue);
1465-
drained = false;
14661438
break;
14671439
}
14681440
}
14691441

14701442
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1471-
return drained;
14721443
}
14731444

14741445
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
@@ -1652,6 +1623,8 @@ static void tcmu_dev_kref_release(struct kref *kref)
16521623
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
16531624
all_expired = false;
16541625
}
1626+
if (!list_empty(&udev->qfull_queue))
1627+
all_expired = false;
16551628
idr_destroy(&udev->commands);
16561629
WARN_ON(!all_expired);
16571630

@@ -2037,9 +2010,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
20372010
mutex_lock(&udev->cmdr_lock);
20382011

20392012
idr_for_each_entry(&udev->commands, cmd, i) {
2040-
if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
2041-
continue;
2042-
20432013
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
20442014
cmd->cmd_id, udev->name,
20452015
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
@@ -2076,6 +2046,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
20762046

20772047
del_timer(&udev->cmd_timer);
20782048

2049+
run_qfull_queue(udev, false);
2050+
20792051
mutex_unlock(&udev->cmdr_lock);
20802052
}
20812053

@@ -2699,6 +2671,7 @@ static void find_free_blocks(void)
26992671
static void check_timedout_devices(void)
27002672
{
27012673
struct tcmu_dev *udev, *tmp_dev;
2674+
struct tcmu_cmd *cmd, *tmp_cmd;
27022675
LIST_HEAD(devs);
27032676

27042677
spin_lock_bh(&timed_out_udevs_lock);
@@ -2709,9 +2682,24 @@ static void check_timedout_devices(void)
27092682
spin_unlock_bh(&timed_out_udevs_lock);
27102683

27112684
mutex_lock(&udev->cmdr_lock);
2712-
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
27132685

2714-
tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
2686+
/*
2687+
* If cmd_time_out is disabled but qfull is set deadline
2688+
* will only reflect the qfull timeout. Ignore it.
2689+
*/
2690+
if (udev->cmd_time_out) {
2691+
list_for_each_entry_safe(cmd, tmp_cmd,
2692+
&udev->inflight_queue,
2693+
queue_entry) {
2694+
tcmu_check_expired_ring_cmd(cmd);
2695+
}
2696+
tcmu_set_next_deadline(&udev->inflight_queue,
2697+
&udev->cmd_timer);
2698+
}
2699+
list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
2700+
queue_entry) {
2701+
tcmu_check_expired_queue_cmd(cmd);
2702+
}
27152703
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
27162704

27172705
mutex_unlock(&udev->cmdr_lock);

0 commit comments

Comments
 (0)