Skip to content

Commit 521223d

Browse files
committed
io_uring/cancel: don't default to setting req->work.cancel_seq
Just leave it unset by default, avoiding dipping into the last cacheline (which is otherwise untouched) for the fast path of using poll to drive networked traffic. Add a flag that tells us if the sequence is valid or not, and then we can defer actually assigning the flag and sequence until someone runs cancelations. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 4bcb982 commit 521223d

File tree

5 files changed

+15
-8
lines changed

5 files changed

+15
-8
lines changed

include/linux/io_uring_types.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -463,6 +463,7 @@ enum {
463463
REQ_F_SUPPORT_NOWAIT_BIT,
464464
REQ_F_ISREG_BIT,
465465
REQ_F_POLL_NO_LAZY_BIT,
466+
REQ_F_CANCEL_SEQ_BIT,
466467

467468
/* not a real bit, just to check we're not overflowing the space */
468469
__REQ_F_LAST_BIT,
@@ -535,6 +536,8 @@ enum {
535536
REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
536537
/* don't use lazy poll wake for this request */
537538
REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
539+
/* cancel sequence is set and valid */
540+
REQ_F_CANCEL_SEQ = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT),
538541
};
539542

540543
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);

io_uring/cancel.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,9 +58,8 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
5858
return false;
5959
if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
6060
check_seq:
61-
if (cd->seq == req->work.cancel_seq)
61+
if (io_cancel_match_sequence(req, cd->seq))
6262
return false;
63-
req->work.cancel_seq = cd->seq;
6463
}
6564

6665
return true;

io_uring/cancel.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,14 @@ void init_hash_table(struct io_hash_table *table, unsigned size);
2525
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
2626
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
2727

28+
static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
29+
{
30+
if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
31+
return true;
32+
33+
req->flags |= REQ_F_CANCEL_SEQ;
34+
req->work.cancel_seq = sequence;
35+
return false;
36+
}
37+
2838
#endif

io_uring/io_uring.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -463,7 +463,6 @@ static void io_prep_async_work(struct io_kiocb *req)
463463

464464
req->work.list.next = NULL;
465465
req->work.flags = 0;
466-
req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
467466
if (req->flags & REQ_F_FORCE_ASYNC)
468467
req->work.flags |= IO_WQ_WORK_CONCURRENT;
469468

io_uring/poll.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -588,10 +588,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
588588
struct io_poll_table *ipt, __poll_t mask,
589589
unsigned issue_flags)
590590
{
591-
struct io_ring_ctx *ctx = req->ctx;
592-
593591
INIT_HLIST_NODE(&req->hash_node);
594-
req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
595592
io_init_poll_iocb(poll, mask);
596593
poll->file = req->file;
597594
req->apoll_events = poll->events;
@@ -818,9 +815,8 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
818815
if (poll_only && req->opcode != IORING_OP_POLL_ADD)
819816
continue;
820817
if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
821-
if (cd->seq == req->work.cancel_seq)
818+
if (io_cancel_match_sequence(req, cd->seq))
822819
continue;
823-
req->work.cancel_seq = cd->seq;
824820
}
825821
*out_bucket = hb;
826822
return req;

0 commit comments

Comments
 (0)