aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-10-06 02:06:10 +0100
committerJens Axboe <axboe@kernel.dk>2022-10-12 16:30:56 -0600
commitfc86f9d3bb4904117eea70347d323fde34a47c79 (patch)
tree5c4b611b28fb3ad7f5412edcd3b057540768d1c5 /io_uring
parent3fb1bd68817288729179444caf1fd5c5c4d2d65d (diff)
downloadaccel-fc86f9d3bb4904117eea70347d323fde34a47c79.tar.gz
io_uring: remove redundant memory barrier in io_req_local_work_add
io_cqring_wake() needs a barrier for the waitqueue_active() check. However, in the case of io_req_local_work_add(), we call llist_add() first, which implies an atomic. Hence we can replace smb_mb() with smp_mb__after_atomic(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/43983bc8bc507172adda7a0f00cab1aff09fd238.1665018309.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c5
-rw-r--r--io_uring/io_uring.h11
2 files changed, 12 insertions, 4 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index b12ec6b5a4640b..12870cd7cb07cd 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1106,6 +1106,8 @@ static void io_req_local_work_add(struct io_kiocb *req)
if (!llist_add(&req->io_task_work.node, &ctx->work_llist))
return;
+ /* need it for the following io_cqring_wake() */
+ smp_mb__after_atomic();
if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
io_move_task_work_from_local(ctx);
@@ -1117,8 +1119,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
if (ctx->has_evfd)
io_eventfd_signal(ctx);
- io_cqring_wake(ctx);
-
+ __io_cqring_wake(ctx);
}
static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 48ce2348c8c1e8..47d4cad1e9c468 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -203,17 +203,24 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}
-static inline void io_cqring_wake(struct io_ring_ctx *ctx)
+/* requires smb_mb() prior, see wq_has_sleeper() */
+static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
{
/*
* wake_up_all() may seem excessive, but io_wake_function() and
* io_should_wake() handle the termination of the loop and only
* wake as many waiters as we need to.
*/
- if (wq_has_sleeper(&ctx->cq_wait))
+ if (waitqueue_active(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait);
}
+static inline void io_cqring_wake(struct io_ring_ctx *ctx)
+{
+ smp_mb();
+ __io_cqring_wake(ctx);
+}
+
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{
struct io_rings *r = ctx->rings;