aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2023-07-19 14:31:09 -0400
committerChuck Lever <chuck.lever@oracle.com>2023-08-29 17:45:22 -0400
commite18e157bb5c8c1cd8a9ba25acfdcf4f3035836f4 (patch)
tree597d551dce6ba7f69a6a2107330db454b9327ad2 /net/sunrpc
parent2eb2b93581813b74c7174961126f6ec38eadb5a7 (diff)
downloadlinux-e18e157bb5c8c1cd8a9ba25acfdcf4f3035836f4.tar.gz
SUNRPC: Send RPC message on TCP with a single sock_sendmsg() call
There is now enough infrastructure in place to combine the stream record marker into the biovec array used to send each outgoing RPC message on TCP. The whole message can be more efficiently sent with a single call to sock_sendmsg() using a bio_vec iterator. Note that this also helps with RPC-with-TLS: the TLS implementation can now clearly see where the upper layer message boundaries are. Before, it would send each component of the xdr_buf (record marker, head, page payload, tail) in separate TLS records. Suggested-by: David Howells <dhowells@redhat.com> Reviewed-by: David Howells <dhowells@redhat.com> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/svcsock.c33
1 files changed, 18 insertions, 15 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 90b1ab95c223e7..d4d816036c0409 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1213,31 +1213,30 @@ err_noclose:
static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
rpc_fraghdr marker, unsigned int *sentp)
{
- struct kvec rm = {
- .iov_base = &marker,
- .iov_len = sizeof(marker),
- };
struct msghdr msg = {
- .msg_flags = MSG_MORE,
+ .msg_flags = MSG_SPLICE_PAGES,
};
unsigned int count;
+ void *buf;
int ret;
*sentp = 0;
- ret = kernel_sendmsg(svsk->sk_sock, &msg, &rm, 1, rm.iov_len);
- if (ret < 0)
- return ret;
- *sentp += ret;
- if (ret != rm.iov_len)
- return -EAGAIN;
+ /* The stream record marker is copied into a temporary page
+ * fragment buffer so that it can be included in rq_bvec.
+ */
+ buf = page_frag_alloc(&svsk->sk_frag_cache, sizeof(marker),
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ memcpy(buf, &marker, sizeof(marker));
+ bvec_set_virt(rqstp->rq_bvec, buf, sizeof(marker));
- count = xdr_buf_to_bvec(rqstp->rq_bvec, ARRAY_SIZE(rqstp->rq_bvec),
- &rqstp->rq_res);
+ count = xdr_buf_to_bvec(rqstp->rq_bvec + 1,
+ ARRAY_SIZE(rqstp->rq_bvec) - 1, &rqstp->rq_res);
- msg.msg_flags = MSG_SPLICE_PAGES;
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
- count, rqstp->rq_res.len);
+ 1 + count, sizeof(marker) + rqstp->rq_res.len);
ret = sock_sendmsg(svsk->sk_sock, &msg);
if (ret < 0)
return ret;
@@ -1616,6 +1615,7 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
static void svc_sock_free(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
+ struct page_frag_cache *pfc = &svsk->sk_frag_cache;
struct socket *sock = svsk->sk_sock;
trace_svcsock_free(svsk, sock);
@@ -1625,5 +1625,8 @@ static void svc_sock_free(struct svc_xprt *xprt)
sockfd_put(sock);
else
sock_release(sock);
+ if (pfc->va)
+ __page_frag_cache_drain(virt_to_head_page(pfc->va),
+ pfc->pagecnt_bias);
kfree(svsk);
}