aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-10-13 09:43:58 +0100
committerJens Axboe <axboe@kernel.dk>2020-10-17 09:25:42 -0600
commit6a0af224c21309f24dbb1b79d0744b255d7156a0 (patch)
treeab03a941f52e2531908879bb2db92c789f6f5696
parentb1b74cfc1967bd0747ff85f650f598e84eeb3d1c (diff)
downloadlinux-6a0af224c21309f24dbb1b79d0744b255d7156a0.tar.gz
io_uring: don't put a poll req under spinlock
Move io_put_req() in io_poll_task_handler() from under spinlock. This eliminates the need to use REQ_F_COMP_LOCKED, at the expense of potentially having to grab the lock again. That's still a better trade off than relying on the locked flag. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ca9be31b76b3cd..92546f90defdf0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4844,10 +4844,9 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
hash_del(&req->hash_node);
io_poll_complete(req, req->result, 0);
- req->flags |= REQ_F_COMP_LOCKED;
- *nxt = io_put_req_find_next(req);
spin_unlock_irq(&ctx->completion_lock);
+ *nxt = io_put_req_find_next(req);
io_cqring_ev_posted(ctx);
}