aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-01-31 10:39:05 -0700
committerJens Axboe <axboe@kernel.dk>2024-02-08 13:27:06 -0700
commit670d9d3df8808b39430ade7a04b38363971167f5 (patch)
tree07b3c3fa4bde3da3e4899b1667e606ae659e3353 /io_uring
parent170539bdf1094e6e43e9aa86bf2dcaff0857df41 (diff)
downloadlinux-670d9d3df8808b39430ade7a04b38363971167f5.tar.gz
io_uring: remove next io_kiocb fetch in task_work running
We just reversed the task_work list and that will have touched requests as well, just get rid of this optimization as it should not make a difference anymore. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c3
1 files changed, 0 insertions, 3 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 4678d9ec810be7..007cef9738eeea 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1184,8 +1184,6 @@ static unsigned int handle_tw_list(struct llist_node *node,
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
- prefetch(container_of(next, struct io_kiocb, io_task_work.node));
-
if (req->ctx != *ctx) {
ctx_flush_and_put(*ctx, ts);
*ctx = req->ctx;
@@ -1408,7 +1406,6 @@ again:
struct llist_node *next = node->next;
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
- prefetch(container_of(next, struct io_kiocb, io_task_work.node));
INDIRECT_CALL_2(req->io_task_work.func,
io_poll_task_func, io_req_rw_complete,
req, ts);