aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-12-03 15:37:53 +0000
committerJens Axboe <axboe@kernel.dk>2023-12-03 19:09:28 -0700
commitf7b32e785042d2357c5abc23ca6db1b92c91a070 (patch)
tree0a06a82fbfd4f58f40b49ba689d1c5aa43ca4aa3 /io_uring
parent73363c262d6a7d26063da96610f61baf69a70f7c (diff)
downloadlinux-f7b32e785042d2357c5abc23ca6db1b92c91a070.tar.gz
io_uring: fix mutex_unlock with unreferenced ctx
Callers of mutex_unlock() have to make sure that the mutex stays alive for the whole duration of the function call. For io_uring that means that the following pattern is not valid unless we ensure that the context outlives the mutex_unlock() call. mutex_lock(&ctx->uring_lock); req_put(req); // typically via io_req_task_submit() mutex_unlock(&ctx->uring_lock); Most contexts are fine: io-wq pins requests, syscalls hold the file, task works are taking ctx references and so on. However, the task work fallback path doesn't follow the rule. Cc: <stable@vger.kernel.org> Fixes: 04fc6c802d ("io_uring: save ctx put/get for task_work submit") Reported-by: Jann Horn <jannh@google.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/io-uring/CAG48ez3xSoYb+45f1RLtktROJrpiDQ1otNvdR+YLQf7m+Krj5Q@mail.gmail.com/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index aba5657d287ef6..9626a363f12136 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -271,6 +271,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
struct io_kiocb *req, *tmp;
struct io_tw_state ts = { .locked = true, };
+ percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
req->io_task_work.func(req, &ts);
@@ -278,6 +279,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
return;
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
+ percpu_ref_put(&ctx->refs);
}
static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
@@ -3146,12 +3148,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
init_completion(&exit.completion);
init_task_work(&exit.task_work, io_tctx_exit_cb);
exit.ctx = ctx;
- /*
- * Some may use context even when all refs and requests have been put,
- * and they are free to do so while still holding uring_lock or
- * completion_lock, see io_req_task_submit(). Apart from other work,
- * this lock/unlock section also waits them to finish.
- */
+
mutex_lock(&ctx->uring_lock);
while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout));