aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring/net.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-09-21 12:17:50 +0100
committerJens Axboe <axboe@kernel.dk>2022-09-21 13:15:02 -0600
commit6ae61b7aa2c758ce07347ebfa9c79b6f208098d5 (patch)
tree391404b215bf20c6ad86275d9a0f782b9a6e82d1 /io_uring/net.c
parent5693bcce892d7b8b15a7a92b011d3d40a023b53c (diff)
downloadlinux-6ae61b7aa2c758ce07347ebfa9c79b6f208098d5.tar.gz
io_uring/net: refactor io_setup_async_addr
Instead of passing the right address into io_setup_async_addr() only specify local on-stack storage and let the function infer where to grab it from. It optimises out one local variable we have to deal with. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/6bfa9ab810d776853eb26ed59301e2536c3a5471.1663668091.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index 8d90f8eeb2d0d..021ca2edf44a1 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -196,17 +196,18 @@ int io_sendzc_prep_async(struct io_kiocb *req)
}
static int io_setup_async_addr(struct io_kiocb *req,
- struct sockaddr_storage *addr,
+ struct sockaddr_storage *addr_storage,
unsigned int issue_flags)
{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
- if (!addr || req_has_async_data(req))
+ if (!sr->addr || req_has_async_data(req))
return -EAGAIN;
io = io_msg_alloc_async(req, issue_flags);
if (!io)
return -ENOMEM;
- memcpy(&io->addr, addr, sizeof(io->addr));
+ memcpy(&io->addr, addr_storage, sizeof(io->addr));
return -EAGAIN;
}
@@ -1000,7 +1001,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
{
- struct sockaddr_storage __address, *addr = NULL;
+ struct sockaddr_storage __address;
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg;
struct iovec iov;
@@ -1021,20 +1022,19 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
if (req_has_async_data(req)) {
struct io_async_msghdr *io = req->async_data;
- msg.msg_name = addr = &io->addr;
+ msg.msg_name = &io->addr;
} else {
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
if (unlikely(ret < 0))
return ret;
msg.msg_name = (struct sockaddr *)&__address;
- addr = &__address;
}
msg.msg_namelen = zc->addr_len;
}
if (!(req->flags & REQ_F_POLLED) &&
(zc->flags & IORING_RECVSEND_POLL_FIRST))
- return io_setup_async_addr(req, addr, issue_flags);
+ return io_setup_async_addr(req, &__address, issue_flags);
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
@@ -1065,14 +1065,14 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
- return io_setup_async_addr(req, addr, issue_flags);
+ return io_setup_async_addr(req, &__address, issue_flags);
if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO;
- return io_setup_async_addr(req, addr, issue_flags);
+ return io_setup_async_addr(req, &__address, issue_flags);
}
if (ret < 0 && !zc->done_io)
zc->notif->flags |= REQ_F_CQE_SKIP;