@@ -72,9 +72,10 @@ struct ublk_io {
72
72
#define UBLKSRV_NEED_FETCH_RQ (1UL << 0)
73
73
#define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1)
74
74
#define UBLKSRV_IO_FREE (1UL << 2)
75
- unsigned int flags ;
75
+ unsigned short flags ;
76
+ unsigned short refs ; /* used by target code only */
76
77
77
- unsigned int result ;
78
+ int result ;
78
79
};
79
80
80
81
struct ublk_tgt_ops {
@@ -1062,14 +1063,47 @@ static inline void ublk_uring_prep_rw_zc(struct ublk_queue *q,
1062
1063
__u64 off = iod -> start_sector << 9 ;
1063
1064
struct io_uring_sqe * lead ;
1064
1065
struct io_uring_sqe * mem ;
1066
+ struct ublk_io * io = & q -> ios [tag ];
1067
+ const unsigned front_len = 4096 ;
1065
1068
1069
+ io -> refs = 1 ;
1070
+ io -> result = 0 ;
1066
1071
q -> io_inflight ++ ;
1072
+
1073
+ /* test buffer split */
1074
+ if (len > front_len )
1075
+ len = front_len ;
1076
+
1067
1077
ublk_get_sqe_pair (& q -> ring , & lead , & mem );
1068
1078
1069
1079
io_uring_prep_grp_lead (lead , dev_fd , tag , q_id );
1070
1080
io_uring_prep_rw_group (op , mem , fd , 0 , len , off );
1071
1081
io_uring_sqe_set_flags (mem , IOSQE_FIXED_FILE | IOSQE_GROUP_KBUF );
1072
1082
mem -> user_data = build_user_data (tag , ublk_op , 0 , 1 );
1083
+
1084
+ len = (iod -> nr_sectors << 9 ) - len ;
1085
+ if (len > 0 ) {
1086
+ struct io_uring_sqe * mem2 = io_uring_get_sqe (& q -> ring );
1087
+
1088
+ /* don't split buffer in case of running out of sqe */
1089
+ if (!mem2 ) {
1090
+ len = iod -> nr_sectors << 9 ;
1091
+ io_uring_prep_rw_group (op , mem , fd , 0 , len , off );
1092
+ return ;
1093
+ }
1094
+
1095
+ /*
1096
+ * The 1st member consumers buffer size of `front_size`,
1097
+ * and the 2nd member consumes the remained bytes
1098
+ */
1099
+ mem -> flags |= IOSQE_GROUP_LINK ;
1100
+ io_uring_prep_rw_group (op , mem2 , fd , front_len , len ,
1101
+ off + front_len );
1102
+ io_uring_sqe_set_flags (mem2 , IOSQE_FIXED_FILE | IOSQE_GROUP_KBUF );
1103
+ mem2 -> user_data = build_user_data (tag , ublk_op , 0 , 1 );
1104
+ q -> io_inflight += 1 ;
1105
+ io -> refs += 1 ;
1106
+ }
1073
1107
}
1074
1108
1075
1109
static inline void ublk_uring_prep_flush (struct ublk_queue * q ,
@@ -1078,7 +1112,10 @@ static inline void ublk_uring_prep_flush(struct ublk_queue *q,
1078
1112
{
1079
1113
unsigned ublk_op = ublksrv_get_op (iod );
1080
1114
struct io_uring_sqe * sqe ;
1115
+ struct ublk_io * io = & q -> ios [tag ];
1081
1116
1117
+ io -> refs = 1 ;
1118
+ io -> result = 0 ;
1082
1119
sqe = io_uring_get_sqe (& q -> ring );
1083
1120
io_uring_prep_sync_file_range (sqe , fd ,
1084
1121
iod -> nr_sectors << 9 ,
@@ -1148,10 +1185,21 @@ static void ublk_loop_io_done(struct ublk_queue *q, int tag,
1148
1185
const struct io_uring_cqe * cqe )
1149
1186
{
1150
1187
int cqe_tag = user_data_to_tag (cqe -> user_data );
1188
+ struct ublk_io * io = & q -> ios [tag ];
1151
1189
1152
1190
assert (tag == cqe_tag );
1153
- ublk_complete_io (q , tag , cqe -> res );
1154
1191
q -> io_inflight -- ;
1192
+
1193
+ if (cqe -> res >= 0 ) {
1194
+ if (io -> result >= 0 )
1195
+ io -> result += cqe -> res ;
1196
+ } else {
1197
+ if (io -> result >= 0 )
1198
+ io -> result = cqe -> res ;
1199
+ }
1200
+
1201
+ if (-- io -> refs == 0 )
1202
+ ublk_complete_io (q , tag , io -> result );
1155
1203
}
1156
1204
1157
1205
static void ublk_loop_tgt_deinit (struct ublk_dev * dev )
0 commit comments