From 42c0905f0cac9a86d2cb8138665a6d62ea607078 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 2 Feb 2024 10:06:38 -0700 Subject: io_uring: cleanup handle_tw_list() calling convention Now that we don't loop around task_work anymore, there's no point in maintaining the ring and locked state outside of handle_tw_list(). Get rid of passing in those pointers (and pointers to pointers) and just do the management internally in handle_tw_list(). Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) (limited to 'io_uring') diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index d31e8b110de91..0b1a065a21c18 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1173,10 +1173,10 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts) percpu_ref_put(&ctx->refs); } -static unsigned int handle_tw_list(struct llist_node *node, - struct io_ring_ctx **ctx, - struct io_tw_state *ts) +static unsigned int handle_tw_list(struct llist_node *node) { + struct io_ring_ctx *ctx = NULL; + struct io_tw_state ts = { }; unsigned int count = 0; do { @@ -1184,25 +1184,26 @@ static unsigned int handle_tw_list(struct llist_node *node, struct io_kiocb *req = container_of(node, struct io_kiocb, io_task_work.node); - if (req->ctx != *ctx) { - ctx_flush_and_put(*ctx, ts); - *ctx = req->ctx; + if (req->ctx != ctx) { + ctx_flush_and_put(ctx, &ts); + ctx = req->ctx; /* if not contended, grab and improve batching */ - ts->locked = mutex_trylock(&(*ctx)->uring_lock); - percpu_ref_get(&(*ctx)->refs); + ts.locked = mutex_trylock(&ctx->uring_lock); + percpu_ref_get(&ctx->refs); } INDIRECT_CALL_2(req->io_task_work.func, io_poll_task_func, io_req_rw_complete, - req, ts); + req, &ts); node = next; count++; if (unlikely(need_resched())) { - ctx_flush_and_put(*ctx, ts); - *ctx = NULL; + ctx_flush_and_put(ctx, &ts); + ctx = NULL; cond_resched(); } } while (node); + ctx_flush_and_put(ctx, &ts); return count; } @@ -1250,8 +1251,6 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync) void tctx_task_work(struct callback_head *cb) { - struct io_tw_state ts = {}; - struct io_ring_ctx *ctx = NULL; struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work); struct llist_node *node; @@ -1264,9 +1263,7 @@ void tctx_task_work(struct callback_head *cb) node = llist_del_all(&tctx->task_list); if (node) - count = handle_tw_list(llist_reverse_order(node), &ctx, &ts); - - ctx_flush_and_put(ctx, &ts); + count = handle_tw_list(llist_reverse_order(node)); /* relaxed read is enough as only the task itself sets ->in_cancel */ if (unlikely(atomic_read(&tctx->in_cancel))) -- cgit 1.2.3-korg