aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-01-28 20:11:55 -0700
committerJens Axboe <axboe@kernel.dk>2024-02-08 13:27:06 -0700
commit521223d7c229f83915619f888c99e952f24dc39f (patch)
treeb19885749949ac462562ebc474b8964edc85331f /io_uring
parent4bcb982cce74e18155fba0d97394ca9634e0d8f0 (diff)
downloadlinux-521223d7c229f83915619f888c99e952f24dc39f.tar.gz
io_uring/cancel: don't default to setting req->work.cancel_seq
Just leave it unset by default, avoiding dipping into the last cacheline (which is otherwise untouched) for the fast path of using poll to drive networked traffic. Add a flag that tells us if the sequence is valid or not, and then we can defer actually assigning the flag and sequence until someone runs cancelations. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/cancel.c3
-rw-r--r--io_uring/cancel.h10
-rw-r--r--io_uring/io_uring.c1
-rw-r--r--io_uring/poll.c6
4 files changed, 12 insertions, 8 deletions
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index 8a8b07dfc444cd..acfcdd7f059afd 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -58,9 +58,8 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
return false;
if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
check_seq:
- if (cd->seq == req->work.cancel_seq)
+ if (io_cancel_match_sequence(req, cd->seq))
return false;
- req->work.cancel_seq = cd->seq;
}
return true;
diff --git a/io_uring/cancel.h b/io_uring/cancel.h
index c0a8e7c520b6d6..76b32e65c03cd7 100644
--- a/io_uring/cancel.h
+++ b/io_uring/cancel.h
@@ -25,4 +25,14 @@ void init_hash_table(struct io_hash_table *table, unsigned size);
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
+static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
+{
+ if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
+ return true;
+
+ req->flags |= REQ_F_CANCEL_SEQ;
+ req->work.cancel_seq = sequence;
+ return false;
+}
+
#endif
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index b8ca907b77eb90..fd552b260eefb6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -463,7 +463,6 @@ static void io_prep_async_work(struct io_kiocb *req)
req->work.list.next = NULL;
req->work.flags = 0;
- req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 7513afc7b702e4..c2b0a2d0762b2d 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -588,10 +588,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
struct io_poll_table *ipt, __poll_t mask,
unsigned issue_flags)
{
- struct io_ring_ctx *ctx = req->ctx;
-
INIT_HLIST_NODE(&req->hash_node);
- req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
io_init_poll_iocb(poll, mask);
poll->file = req->file;
req->apoll_events = poll->events;
@@ -818,9 +815,8 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
if (poll_only && req->opcode != IORING_OP_POLL_ADD)
continue;
if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
- if (cd->seq == req->work.cancel_seq)
+ if (io_cancel_match_sequence(req, cd->seq))
continue;
- req->work.cancel_seq = cd->seq;
}
*out_bucket = hb;
return req;