Skip to content

Commit 0a562db

Browse files
BStroessergregkh
authored andcommitted
scsi: target: tcmu: Userspace must not complete queued commands
[ Upstream commit 61fb248 ] When tcmu queues a new command - no matter whether in command ring or in qfull_queue - a cmd_id from IDR udev->commands is assigned to the command. If userspace sends a wrong command completion containing the cmd_id of a command on the qfull_queue, tcmu_handle_completions() finds the command in the IDR and calls tcmu_handle_completion() for it. This might do some nasty things because commands in qfull_queue do not have a valid dbi list. To fix this bug, we no longer add queued commands to the idr. Instead the cmd_id is assign when a command is written to the command ring. Due to this change I had to adapt the source code at several places where up to now an idr_for_each had been done. [mkp: fix checkpatch warnings] Link: https://lore.kernel.org/r/20200518164833.12775-1-bstroesser@ts.fujitsu.com Acked-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Bodo Stroesser <bstroesser@ts.fujitsu.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 34d68f5 commit 0a562db

File tree

1 file changed

+71
-83
lines changed

1 file changed

+71
-83
lines changed

drivers/target/target_core_user.c

Lines changed: 71 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -893,41 +893,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
893893
return command_size;
894894
}
895895

896-
static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
897-
struct timer_list *timer)
896+
static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
897+
struct timer_list *timer)
898898
{
899-
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
900-
int cmd_id;
901-
902-
if (tcmu_cmd->cmd_id)
903-
goto setup_timer;
904-
905-
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
906-
if (cmd_id < 0) {
907-
pr_err("tcmu: Could not allocate cmd id.\n");
908-
return cmd_id;
909-
}
910-
tcmu_cmd->cmd_id = cmd_id;
911-
912-
pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
913-
udev->name, tmo / MSEC_PER_SEC);
914-
915-
setup_timer:
916899
if (!tmo)
917-
return 0;
900+
return;
918901

919902
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
920903
if (!timer_pending(timer))
921904
mod_timer(timer, tcmu_cmd->deadline);
922905

923-
return 0;
906+
pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
907+
tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
924908
}
925909

926910
static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
927911
{
928912
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
929913
unsigned int tmo;
930-
int ret;
931914

932915
/*
933916
* For backwards compat if qfull_time_out is not set use
@@ -942,13 +925,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
942925
else
943926
tmo = TCMU_TIME_OUT;
944927

945-
ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
946-
if (ret)
947-
return ret;
928+
tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
948929

949930
list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
950-
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
951-
tcmu_cmd->cmd_id, udev->name);
931+
pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
932+
tcmu_cmd, udev->name);
952933
return 0;
953934
}
954935

@@ -970,7 +951,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
970951
struct tcmu_mailbox *mb;
971952
struct tcmu_cmd_entry *entry;
972953
struct iovec *iov;
973-
int iov_cnt, ret;
954+
int iov_cnt, cmd_id;
974955
uint32_t cmd_head;
975956
uint64_t cdb_off;
976957
bool copy_to_data_area;
@@ -1071,14 +1052,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
10711052
}
10721053
entry->req.iov_bidi_cnt = iov_cnt;
10731054

1074-
ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
1075-
&udev->cmd_timer);
1076-
if (ret) {
1077-
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1055+
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
1056+
if (cmd_id < 0) {
1057+
pr_err("tcmu: Could not allocate cmd id.\n");
10781058

1059+
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
10791060
*scsi_err = TCM_OUT_OF_RESOURCES;
10801061
return -1;
10811062
}
1063+
tcmu_cmd->cmd_id = cmd_id;
1064+
1065+
pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1066+
tcmu_cmd, udev->name);
1067+
1068+
tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1069+
10821070
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
10831071

10841072
/*
@@ -1290,50 +1278,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
12901278
return handled;
12911279
}
12921280

1293-
static int tcmu_check_expired_cmd(int id, void *p, void *data)
1281+
static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
12941282
{
1295-
struct tcmu_cmd *cmd = p;
1296-
struct tcmu_dev *udev = cmd->tcmu_dev;
1297-
u8 scsi_status;
12981283
struct se_cmd *se_cmd;
1299-
bool is_running;
1300-
1301-
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1302-
return 0;
13031284

13041285
if (!time_after(jiffies, cmd->deadline))
1305-
return 0;
1286+
return;
13061287

1307-
is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
1288+
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1289+
list_del_init(&cmd->queue_entry);
13081290
se_cmd = cmd->se_cmd;
1291+
cmd->se_cmd = NULL;
13091292

1310-
if (is_running) {
1311-
/*
1312-
* If cmd_time_out is disabled but qfull is set deadline
1313-
* will only reflect the qfull timeout. Ignore it.
1314-
*/
1315-
if (!udev->cmd_time_out)
1316-
return 0;
1293+
pr_debug("Timing out inflight cmd %u on dev %s.\n",
1294+
cmd->cmd_id, cmd->tcmu_dev->name);
13171295

1318-
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1319-
/*
1320-
* target_complete_cmd will translate this to LUN COMM FAILURE
1321-
*/
1322-
scsi_status = SAM_STAT_CHECK_CONDITION;
1323-
list_del_init(&cmd->queue_entry);
1324-
cmd->se_cmd = NULL;
1325-
} else {
1326-
list_del_init(&cmd->queue_entry);
1327-
idr_remove(&udev->commands, id);
1328-
tcmu_free_cmd(cmd);
1329-
scsi_status = SAM_STAT_TASK_SET_FULL;
1330-
}
1296+
target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1297+
}
13311298

1332-
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
1333-
id, udev->name, is_running ? "inflight" : "queued");
1299+
static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1300+
{
1301+
struct se_cmd *se_cmd;
13341302

1335-
target_complete_cmd(se_cmd, scsi_status);
1336-
return 0;
1303+
if (!time_after(jiffies, cmd->deadline))
1304+
return;
1305+
1306+
list_del_init(&cmd->queue_entry);
1307+
se_cmd = cmd->se_cmd;
1308+
tcmu_free_cmd(cmd);
1309+
1310+
pr_debug("Timing out queued cmd %p on dev %s.\n",
1311+
cmd, cmd->tcmu_dev->name);
1312+
1313+
target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
13371314
}
13381315

13391316
static void tcmu_device_timedout(struct tcmu_dev *udev)
@@ -1418,16 +1395,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
14181395
return &udev->se_dev;
14191396
}
14201397

1421-
static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
1398+
static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
14221399
{
14231400
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
14241401
LIST_HEAD(cmds);
1425-
bool drained = true;
14261402
sense_reason_t scsi_ret;
14271403
int ret;
14281404

14291405
if (list_empty(&udev->qfull_queue))
1430-
return true;
1406+
return;
14311407

14321408
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
14331409

@@ -1436,11 +1412,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
14361412
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
14371413
list_del_init(&tcmu_cmd->queue_entry);
14381414

1439-
pr_debug("removing cmd %u on dev %s from queue\n",
1440-
tcmu_cmd->cmd_id, udev->name);
1415+
pr_debug("removing cmd %p on dev %s from queue\n",
1416+
tcmu_cmd, udev->name);
14411417

14421418
if (fail) {
1443-
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
14441419
/*
14451420
* We were not able to even start the command, so
14461421
* fail with busy to allow a retry in case runner
@@ -1455,10 +1430,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
14551430

14561431
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
14571432
if (ret < 0) {
1458-
pr_debug("cmd %u on dev %s failed with %u\n",
1459-
tcmu_cmd->cmd_id, udev->name, scsi_ret);
1460-
1461-
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
1433+
pr_debug("cmd %p on dev %s failed with %u\n",
1434+
tcmu_cmd, udev->name, scsi_ret);
14621435
/*
14631436
* Ignore scsi_ret for now. target_complete_cmd
14641437
* drops it.
@@ -1473,13 +1446,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
14731446
* the queue
14741447
*/
14751448
list_splice_tail(&cmds, &udev->qfull_queue);
1476-
drained = false;
14771449
break;
14781450
}
14791451
}
14801452

14811453
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1482-
return drained;
14831454
}
14841455

14851456
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
@@ -1663,6 +1634,8 @@ static void tcmu_dev_kref_release(struct kref *kref)
16631634
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
16641635
all_expired = false;
16651636
}
1637+
if (!list_empty(&udev->qfull_queue))
1638+
all_expired = false;
16661639
idr_destroy(&udev->commands);
16671640
WARN_ON(!all_expired);
16681641

@@ -2031,9 +2004,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
20312004
mutex_lock(&udev->cmdr_lock);
20322005

20332006
idr_for_each_entry(&udev->commands, cmd, i) {
2034-
if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
2035-
continue;
2036-
20372007
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
20382008
cmd->cmd_id, udev->name,
20392009
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
@@ -2071,6 +2041,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
20712041

20722042
del_timer(&udev->cmd_timer);
20732043

2044+
run_qfull_queue(udev, false);
2045+
20742046
mutex_unlock(&udev->cmdr_lock);
20752047
}
20762048

@@ -2692,6 +2664,7 @@ static void find_free_blocks(void)
26922664
static void check_timedout_devices(void)
26932665
{
26942666
struct tcmu_dev *udev, *tmp_dev;
2667+
struct tcmu_cmd *cmd, *tmp_cmd;
26952668
LIST_HEAD(devs);
26962669

26972670
spin_lock_bh(&timed_out_udevs_lock);
@@ -2702,9 +2675,24 @@ static void check_timedout_devices(void)
27022675
spin_unlock_bh(&timed_out_udevs_lock);
27032676

27042677
mutex_lock(&udev->cmdr_lock);
2705-
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
27062678

2707-
tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
2679+
/*
2680+
* If cmd_time_out is disabled but qfull is set deadline
2681+
* will only reflect the qfull timeout. Ignore it.
2682+
*/
2683+
if (udev->cmd_time_out) {
2684+
list_for_each_entry_safe(cmd, tmp_cmd,
2685+
&udev->inflight_queue,
2686+
queue_entry) {
2687+
tcmu_check_expired_ring_cmd(cmd);
2688+
}
2689+
tcmu_set_next_deadline(&udev->inflight_queue,
2690+
&udev->cmd_timer);
2691+
}
2692+
list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
2693+
queue_entry) {
2694+
tcmu_check_expired_queue_cmd(cmd);
2695+
}
27082696
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
27092697

27102698
mutex_unlock(&udev->cmdr_lock);

0 commit comments

Comments
 (0)