aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-11-06 07:43:16 -0700
committerJens Axboe <axboe@kernel.dk>2023-11-06 07:43:16 -0700
commitf688944cfb810986c626cb13d95bc666e5c8a36c (patch)
treed9c01b7c5182cf3a896f5e320834cbad5ae33671 /io_uring
parent0e984ec88da9747549227900e5215c5e6a1b65ae (diff)
downloadlinux-f688944cfb810986c626cb13d95bc666e5c8a36c.tar.gz
io_uring/rw: add separate prep handler for fixed read/write
Rather than sprinkle opcode checks in the generic read/write prep handler, have a separate prep handler for the vectored readv/writev operation. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/opdef.c4
-rw-r--r--io_uring/rw.c30
-rw-r--r--io_uring/rw.h1
3 files changed, 21 insertions, 14 deletions
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 0521a26bc6cd17..799db44283c7e3 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -98,7 +98,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rw_fixed,
.issue = io_read,
},
[IORING_OP_WRITE_FIXED] = {
@@ -111,7 +111,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_rw_fixed,
.issue = io_write,
},
[IORING_OP_POLL_ADD] = {
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 63d343bae76229..9e3e56b74e35ea 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -83,18 +83,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
/* used for fixed read/write too - just read unconditionally */
req->buf_index = READ_ONCE(sqe->buf_index);
- if (req->opcode == IORING_OP_READ_FIXED ||
- req->opcode == IORING_OP_WRITE_FIXED) {
- struct io_ring_ctx *ctx = req->ctx;
- u16 index;
-
- if (unlikely(req->buf_index >= ctx->nr_user_bufs))
- return -EFAULT;
- index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
- req->imu = ctx->user_bufs[index];
- io_req_set_rsrc_node(req, ctx, 0);
- }
-
ioprio = READ_ONCE(sqe->ioprio);
if (ioprio) {
ret = ioprio_check_cap(ioprio);
@@ -131,6 +119,24 @@ int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
+int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ u16 index;
+ int ret;
+
+ ret = io_prep_rw(req, sqe);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+ return -EFAULT;
+ index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+ req->imu = ctx->user_bufs[index];
+ io_req_set_rsrc_node(req, ctx, 0);
+ return 0;
+}
+
/*
* Multishot read is prepared just like a normal read/write request, only
* difference is that we set the MULTISHOT flag.
diff --git a/io_uring/rw.h b/io_uring/rw.h
index 32aa7937513a58..f9e89b4fe4da91 100644
--- a/io_uring/rw.h
+++ b/io_uring/rw.h
@@ -17,6 +17,7 @@ struct io_async_rw {
int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_read(struct io_kiocb *req, unsigned int issue_flags);
int io_readv_prep_async(struct io_kiocb *req);
int io_write(struct io_kiocb *req, unsigned int issue_flags);