Skip to content

Commit 994f96e

Browse files
committed
test/uring_cmd_ublk.c: cover device buffer split
Force to split ublk-loop's device buffer into two parts: 1) the front part is fixed 4096 bytes 2) the remained bytes Add two member IO requests for submitting IOs via the two buffers. Then we can verify if buffer split works as expected. Signed-off-by: Ming Lei <tom.leiming@gmail.com>
1 parent 42a230e commit 994f96e

File tree

1 file changed

+51
-3
lines changed

1 file changed

+51
-3
lines changed

test/uring_cmd_ublk.c

+51-3
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,10 @@ struct ublk_io {
7272
#define UBLKSRV_NEED_FETCH_RQ (1UL << 0)
7373
#define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1)
7474
#define UBLKSRV_IO_FREE (1UL << 2)
75-
unsigned int flags;
75+
unsigned short flags;
76+
unsigned short refs; /* used by target code only */
7677

77-
unsigned int result;
78+
int result;
7879
};
7980

8081
struct ublk_tgt_ops {
@@ -1062,14 +1063,47 @@ static inline void ublk_uring_prep_rw_zc(struct ublk_queue *q,
10621063
__u64 off = iod->start_sector << 9;
10631064
struct io_uring_sqe *lead;
10641065
struct io_uring_sqe *mem;
1066+
struct ublk_io *io = &q->ios[tag];
1067+
const unsigned front_len = 4096;
10651068

1069+
io->refs = 1;
1070+
io->result = 0;
10661071
q->io_inflight++;
1072+
1073+
/* test buffer split */
1074+
if (len > front_len)
1075+
len = front_len;
1076+
10671077
ublk_get_sqe_pair(&q->ring, &lead, &mem);
10681078

10691079
io_uring_prep_grp_lead(lead, dev_fd, tag, q_id);
10701080
io_uring_prep_rw_group(op, mem, fd, 0, len, off);
10711081
io_uring_sqe_set_flags(mem, IOSQE_FIXED_FILE | IOSQE_GROUP_KBUF);
10721082
mem->user_data = build_user_data(tag, ublk_op, 0, 1);
1083+
1084+
len = (iod->nr_sectors << 9) - len;
1085+
if (len > 0) {
1086+
struct io_uring_sqe *mem2 = io_uring_get_sqe(&q->ring);
1087+
1088+
/* don't split buffer in case of running out of sqe */
1089+
if (!mem2) {
1090+
len = iod->nr_sectors << 9;
1091+
io_uring_prep_rw_group(op, mem, fd, 0, len, off);
1092+
return;
1093+
}
1094+
1095+
/*
1096+
* The 1st member consumers buffer size of `front_size`,
1097+
* and the 2nd member consumes the remained bytes
1098+
*/
1099+
mem->flags |= IOSQE_GROUP_LINK;
1100+
io_uring_prep_rw_group(op, mem2, fd, front_len, len,
1101+
off + front_len);
1102+
io_uring_sqe_set_flags(mem2, IOSQE_FIXED_FILE | IOSQE_GROUP_KBUF);
1103+
mem2->user_data = build_user_data(tag, ublk_op, 0, 1);
1104+
q->io_inflight += 1;
1105+
io->refs += 1;
1106+
}
10731107
}
10741108

10751109
static inline void ublk_uring_prep_flush(struct ublk_queue *q,
@@ -1078,7 +1112,10 @@ static inline void ublk_uring_prep_flush(struct ublk_queue *q,
10781112
{
10791113
unsigned ublk_op = ublksrv_get_op(iod);
10801114
struct io_uring_sqe *sqe;
1115+
struct ublk_io *io = &q->ios[tag];
10811116

1117+
io->refs = 1;
1118+
io->result = 0;
10821119
sqe = io_uring_get_sqe(&q->ring);
10831120
io_uring_prep_sync_file_range(sqe, fd,
10841121
iod->nr_sectors << 9,
@@ -1148,10 +1185,21 @@ static void ublk_loop_io_done(struct ublk_queue *q, int tag,
11481185
const struct io_uring_cqe *cqe)
11491186
{
11501187
int cqe_tag = user_data_to_tag(cqe->user_data);
1188+
struct ublk_io *io = &q->ios[tag];
11511189

11521190
assert(tag == cqe_tag);
1153-
ublk_complete_io(q, tag, cqe->res);
11541191
q->io_inflight--;
1192+
1193+
if (cqe->res >= 0) {
1194+
if (io->result >= 0)
1195+
io->result += cqe->res;
1196+
} else {
1197+
if (io->result >= 0)
1198+
io->result = cqe->res;
1199+
}
1200+
1201+
if (--io->refs == 0)
1202+
ublk_complete_io(q, tag, io->result);
11551203
}
11561204

11571205
static void ublk_loop_tgt_deinit(struct ublk_dev *dev)

0 commit comments

Comments
 (0)