aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring/cancel.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-12-21 07:11:33 -0700
committerJens Axboe <axboe@kernel.dk>2022-12-21 13:31:40 -0700
commit23fffb2f09ce1145cbd751801d45ba74acaa6542 (patch)
treee671424b224afc2f9a8500e87fdfd0f3cefe42c6 /io_uring/cancel.c
parent52ea806ad983490b3132a9e526e11a10dc2fd10c (diff)
downloadlinux-23fffb2f09ce1145cbd751801d45ba74acaa6542.tar.gz
io_uring/cancel: re-grab ctx mutex after finishing wait
If we have a signal pending during cancelations, it'll cause the task_work run to return an error. Since we didn't run task_work, the current task is left in TASK_INTERRUPTIBLE state when we need to re-grab the ctx mutex, and the kernel will rightfully complain about that. Move the lock grabbing for the error cases outside the loop to avoid that issue. Reported-by: syzbot+7df055631cd1be4586fd@syzkaller.appspotmail.com Link: https://lore.kernel.org/io-uring/0000000000003a14a905f05050b0@google.com/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/cancel.c')
-rw-r--r--io_uring/cancel.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index 2291a53cdabd1..b4f5dfacc0c31 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -288,24 +288,23 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
+ mutex_unlock(&ctx->uring_lock);
if (ret != -EALREADY)
break;
- mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig(ctx);
- if (ret < 0) {
- mutex_lock(&ctx->uring_lock);
+ if (ret < 0)
break;
- }
ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
- mutex_lock(&ctx->uring_lock);
if (!ret) {
ret = -ETIME;
break;
}
+ mutex_lock(&ctx->uring_lock);
} while (1);
finish_wait(&ctx->cq_wait, &wait);
+ mutex_lock(&ctx->uring_lock);
if (ret == -ENOENT || ret > 0)
ret = 0;