Skip to content

Commit 020b40f

Browse files
committed
io_uring: make ctx->timeout_lock a raw spinlock
Chase reports that their tester complaints about a locking context mismatch: ============================= [ BUG: Invalid wait context ] 6.13.0-rc1-gf137f14b7ccb-dirty Rust-for-Linux#9 Not tainted ----------------------------- syz.1.25198/182604 is trying to lock: ffff88805e66a358 (&ctx->timeout_lock){-.-.}-{3:3}, at: spin_lock_irq include/linux/spinlock.h:376 [inline] ffff88805e66a358 (&ctx->timeout_lock){-.-.}-{3:3}, at: io_match_task_safe io_uring/io_uring.c:218 [inline] ffff88805e66a358 (&ctx->timeout_lock){-.-.}-{3:3}, at: io_match_task_safe+0x187/0x250 io_uring/io_uring.c:204 other info that might help us debug this: context-{5:5} 1 lock held by syz.1.25198/182604: #0: ffff88802b7d48c0 (&acct->lock){+.+.}-{2:2}, at: io_acct_cancel_pending_work+0x2d/0x6b0 io_uring/io-wq.c:1049 stack backtrace: CPU: 0 UID: 0 PID: 182604 Comm: syz.1.25198 Not tainted 6.13.0-rc1-gf137f14b7ccb-dirty Rust-for-Linux#9 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014 Call Trace: <TASK> __dump_stack lib/dump_stack.c:94 [inline] dump_stack_lvl+0x82/0xd0 lib/dump_stack.c:120 print_lock_invalid_wait_context kernel/locking/lockdep.c:4826 [inline] check_wait_context kernel/locking/lockdep.c:4898 [inline] __lock_acquire+0x883/0x3c80 kernel/locking/lockdep.c:5176 lock_acquire.part.0+0x11b/0x370 kernel/locking/lockdep.c:5849 __raw_spin_lock_irq include/linux/spinlock_api_smp.h:119 [inline] _raw_spin_lock_irq+0x36/0x50 kernel/locking/spinlock.c:170 spin_lock_irq include/linux/spinlock.h:376 [inline] io_match_task_safe io_uring/io_uring.c:218 [inline] io_match_task_safe+0x187/0x250 io_uring/io_uring.c:204 io_acct_cancel_pending_work+0xb8/0x6b0 io_uring/io-wq.c:1052 io_wq_cancel_pending_work io_uring/io-wq.c:1074 [inline] io_wq_cancel_cb+0xb0/0x390 io_uring/io-wq.c:1112 io_uring_try_cancel_requests+0x15e/0xd70 io_uring/io_uring.c:3062 io_uring_cancel_generic+0x6ec/0x8c0 io_uring/io_uring.c:3140 io_uring_files_cancel include/linux/io_uring.h:20 [inline] do_exit+0x494/0x27a0 kernel/exit.c:894 do_group_exit+0xb3/0x250 kernel/exit.c:1087 get_signal+0x1d77/0x1ef0 kernel/signal.c:3017 arch_do_signal_or_restart+0x79/0x5b0 arch/x86/kernel/signal.c:337 exit_to_user_mode_loop kernel/entry/common.c:111 [inline] exit_to_user_mode_prepare include/linux/entry-common.h:329 [inline] __syscall_exit_to_user_mode_work kernel/entry/common.c:207 [inline] syscall_exit_to_user_mode+0x150/0x2a0 kernel/entry/common.c:218 do_syscall_64+0xd8/0x250 arch/x86/entry/common.c:89 entry_SYSCALL_64_after_hwframe+0x77/0x7f which is because io_uring has ctx->timeout_lock nesting inside the io-wq acct lock, the latter of which is used from inside the scheduler and hence is a raw spinlock, while the former is a "normal" spinlock and can hence be sleeping on PREEMPT_RT. Change ctx->timeout_lock to be a raw spinlock to solve this nesting dependency on PREEMPT_RT=y. Reported-by: chase xd <sl1589472800@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 99d6af6 commit 020b40f

File tree

3 files changed

+26
-26
lines changed

3 files changed

+26
-26
lines changed

include/linux/io_uring_types.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ struct io_ring_ctx {
345345

346346
/* timeouts */
347347
struct {
348-
spinlock_t timeout_lock;
348+
raw_spinlock_t timeout_lock;
349349
struct list_head timeout_list;
350350
struct list_head ltimeout_list;
351351
unsigned cq_last_tm_flush;

io_uring/io_uring.c

+5-5
Original file line numberDiff line numberDiff line change
@@ -215,9 +215,9 @@ bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
215215
struct io_ring_ctx *ctx = head->ctx;
216216

217217
/* protect against races with linked timeouts */
218-
spin_lock_irq(&ctx->timeout_lock);
218+
raw_spin_lock_irq(&ctx->timeout_lock);
219219
matched = io_match_linked(head);
220-
spin_unlock_irq(&ctx->timeout_lock);
220+
raw_spin_unlock_irq(&ctx->timeout_lock);
221221
} else {
222222
matched = io_match_linked(head);
223223
}
@@ -333,7 +333,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
333333
init_waitqueue_head(&ctx->cq_wait);
334334
init_waitqueue_head(&ctx->poll_wq);
335335
spin_lock_init(&ctx->completion_lock);
336-
spin_lock_init(&ctx->timeout_lock);
336+
raw_spin_lock_init(&ctx->timeout_lock);
337337
INIT_WQ_LIST(&ctx->iopoll_list);
338338
INIT_LIST_HEAD(&ctx->io_buffers_comp);
339339
INIT_LIST_HEAD(&ctx->defer_list);
@@ -498,10 +498,10 @@ static void io_prep_async_link(struct io_kiocb *req)
498498
if (req->flags & REQ_F_LINK_TIMEOUT) {
499499
struct io_ring_ctx *ctx = req->ctx;
500500

501-
spin_lock_irq(&ctx->timeout_lock);
501+
raw_spin_lock_irq(&ctx->timeout_lock);
502502
io_for_each_link(cur, req)
503503
io_prep_async_work(cur);
504-
spin_unlock_irq(&ctx->timeout_lock);
504+
raw_spin_unlock_irq(&ctx->timeout_lock);
505505
} else {
506506
io_for_each_link(cur, req)
507507
io_prep_async_work(cur);

io_uring/timeout.c

+20-20
Original file line numberDiff line numberDiff line change
@@ -74,10 +74,10 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
7474
if (!io_timeout_finish(timeout, data)) {
7575
if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) {
7676
/* re-arm timer */
77-
spin_lock_irq(&ctx->timeout_lock);
77+
raw_spin_lock_irq(&ctx->timeout_lock);
7878
list_add(&timeout->list, ctx->timeout_list.prev);
7979
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
80-
spin_unlock_irq(&ctx->timeout_lock);
80+
raw_spin_unlock_irq(&ctx->timeout_lock);
8181
return;
8282
}
8383
}
@@ -109,7 +109,7 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
109109
u32 seq;
110110
struct io_timeout *timeout, *tmp;
111111

112-
spin_lock_irq(&ctx->timeout_lock);
112+
raw_spin_lock_irq(&ctx->timeout_lock);
113113
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
114114

115115
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
@@ -134,7 +134,7 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
134134
io_kill_timeout(req, 0);
135135
}
136136
ctx->cq_last_tm_flush = seq;
137-
spin_unlock_irq(&ctx->timeout_lock);
137+
raw_spin_unlock_irq(&ctx->timeout_lock);
138138
}
139139

140140
static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts)
@@ -200,9 +200,9 @@ void io_disarm_next(struct io_kiocb *req)
200200
} else if (req->flags & REQ_F_LINK_TIMEOUT) {
201201
struct io_ring_ctx *ctx = req->ctx;
202202

203-
spin_lock_irq(&ctx->timeout_lock);
203+
raw_spin_lock_irq(&ctx->timeout_lock);
204204
link = io_disarm_linked_timeout(req);
205-
spin_unlock_irq(&ctx->timeout_lock);
205+
raw_spin_unlock_irq(&ctx->timeout_lock);
206206
if (link)
207207
io_req_queue_tw_complete(link, -ECANCELED);
208208
}
@@ -238,11 +238,11 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
238238
struct io_ring_ctx *ctx = req->ctx;
239239
unsigned long flags;
240240

241-
spin_lock_irqsave(&ctx->timeout_lock, flags);
241+
raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
242242
list_del_init(&timeout->list);
243243
atomic_set(&req->ctx->cq_timeouts,
244244
atomic_read(&req->ctx->cq_timeouts) + 1);
245-
spin_unlock_irqrestore(&ctx->timeout_lock, flags);
245+
raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);
246246

247247
if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
248248
req_set_fail(req);
@@ -285,9 +285,9 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
285285
{
286286
struct io_kiocb *req;
287287

288-
spin_lock_irq(&ctx->timeout_lock);
288+
raw_spin_lock_irq(&ctx->timeout_lock);
289289
req = io_timeout_extract(ctx, cd);
290-
spin_unlock_irq(&ctx->timeout_lock);
290+
raw_spin_unlock_irq(&ctx->timeout_lock);
291291

292292
if (IS_ERR(req))
293293
return PTR_ERR(req);
@@ -330,7 +330,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
330330
struct io_ring_ctx *ctx = req->ctx;
331331
unsigned long flags;
332332

333-
spin_lock_irqsave(&ctx->timeout_lock, flags);
333+
raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
334334
prev = timeout->head;
335335
timeout->head = NULL;
336336

@@ -345,7 +345,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
345345
}
346346
list_del(&timeout->list);
347347
timeout->prev = prev;
348-
spin_unlock_irqrestore(&ctx->timeout_lock, flags);
348+
raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);
349349

350350
req->io_task_work.func = io_req_task_link_timeout;
351351
io_req_task_work_add(req);
@@ -472,12 +472,12 @@ int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
472472
} else {
473473
enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
474474

475-
spin_lock_irq(&ctx->timeout_lock);
475+
raw_spin_lock_irq(&ctx->timeout_lock);
476476
if (tr->ltimeout)
477477
ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
478478
else
479479
ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
480-
spin_unlock_irq(&ctx->timeout_lock);
480+
raw_spin_unlock_irq(&ctx->timeout_lock);
481481
}
482482

483483
if (ret < 0)
@@ -572,7 +572,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
572572
struct list_head *entry;
573573
u32 tail, off = timeout->off;
574574

575-
spin_lock_irq(&ctx->timeout_lock);
575+
raw_spin_lock_irq(&ctx->timeout_lock);
576576

577577
/*
578578
* sqe->off holds how many events that need to occur for this
@@ -611,7 +611,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
611611
list_add(&timeout->list, entry);
612612
data->timer.function = io_timeout_fn;
613613
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
614-
spin_unlock_irq(&ctx->timeout_lock);
614+
raw_spin_unlock_irq(&ctx->timeout_lock);
615615
return IOU_ISSUE_SKIP_COMPLETE;
616616
}
617617

@@ -620,7 +620,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
620620
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
621621
struct io_ring_ctx *ctx = req->ctx;
622622

623-
spin_lock_irq(&ctx->timeout_lock);
623+
raw_spin_lock_irq(&ctx->timeout_lock);
624624
/*
625625
* If the back reference is NULL, then our linked request finished
626626
* before we got a chance to setup the timer
@@ -633,7 +633,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
633633
data->mode);
634634
list_add_tail(&timeout->list, &ctx->ltimeout_list);
635635
}
636-
spin_unlock_irq(&ctx->timeout_lock);
636+
raw_spin_unlock_irq(&ctx->timeout_lock);
637637
/* drop submission reference */
638638
io_put_req(req);
639639
}
@@ -668,15 +668,15 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx
668668
* timeout_lockfirst to keep locking ordering.
669669
*/
670670
spin_lock(&ctx->completion_lock);
671-
spin_lock_irq(&ctx->timeout_lock);
671+
raw_spin_lock_irq(&ctx->timeout_lock);
672672
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
673673
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
674674

675675
if (io_match_task(req, tctx, cancel_all) &&
676676
io_kill_timeout(req, -ECANCELED))
677677
canceled++;
678678
}
679-
spin_unlock_irq(&ctx->timeout_lock);
679+
raw_spin_unlock_irq(&ctx->timeout_lock);
680680
spin_unlock(&ctx->completion_lock);
681681
return canceled != 0;
682682
}

0 commit comments

Comments
 (0)