aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2023-03-15 09:16:55 +0100
committerHannes Reinecke <hare@suse.de>2023-03-21 12:59:10 +0100
commit5c13b43fa2c503188a6d668b0dec6a22078329a2 (patch)
treec96e2a0357a6aac33fdd28cd8e71a40a5f82350b
parentb7b332e027a121cb65de11d8659bc47e5a1ab749 (diff)
downloadscsi-devel-tls-netlink.v7.tar.gz
nvmet-tcp: peek icreq before starting TLStls-netlink.v7
Incoming connection might be either 'normal' NVMe-TCP connections starting with icreq or TLS handshakes. To ensure that 'normal' connections can still be handled we need to peek the first packet and only start TLS handshake if it's not an icreq. Signed-off-by: Hannes Reinecke <hare@suse.de>
-rw-r--r--drivers/nvme/target/tcp.c60
1 files changed, 58 insertions, 2 deletions
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index a69647fb2c81e..a328a303c2be7 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1105,6 +1105,61 @@ static inline bool nvmet_tcp_pdu_valid(u8 type)
return false;
}
+static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
+ int len;
+ struct kvec iov = {
+ .iov_base = (u8 *)&queue->pdu + queue->offset,
+ .iov_len = sizeof(struct nvme_tcp_hdr),
+ };
+ char cbuf[CMSG_LEN(sizeof(char))] = {};
+ unsigned char ctype;
+ struct cmsghdr *cmsg;
+ struct msghdr msg = {
+ .msg_control = cbuf,
+ .msg_controllen = sizeof(cbuf),
+ .msg_flags = MSG_PEEK,
+ };
+
+ len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+ if (unlikely(len < 0)) {
+ pr_debug("queue %d peek error %d\n",
+ queue->idx, len);
+ return len;
+ }
+
+ cmsg = (struct cmsghdr *)cbuf;
+ if (CMSG_OK(&msg, cmsg) &&
+ cmsg->cmsg_level == SOL_TLS &&
+ cmsg->cmsg_type == TLS_GET_RECORD_TYPE) {
+ ctype = *((unsigned char *)CMSG_DATA(cmsg));
+ if (ctype != TLS_RECORD_TYPE_DATA) {
+ pr_err("queue %d unhandled TLS record %d\n",
+ queue->idx, ctype);
+ return -ENOTCONN;
+ }
+ }
+
+ if (len < sizeof(struct nvme_tcp_hdr)) {
+ pr_debug("queue %d short read, %d bytes missing\n",
+ queue->idx, (int)iov.iov_len - len);
+ return -EAGAIN;
+ }
+ pr_debug("queue %d hdr type %d hlen %d plen %d size %d\n",
+ queue->idx, hdr->type, hdr->hlen, hdr->plen,
+ (int)sizeof(struct nvme_tcp_icreq_pdu));
+ if (hdr->type == nvme_tcp_icreq &&
+ hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
+ hdr->plen == sizeof(struct nvme_tcp_icreq_pdu)) {
+ pr_debug("queue %d icreq detected\n",
+ queue->idx);
+ return len;
+ }
+ return 0;
+}
+
static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
@@ -1879,8 +1934,9 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
nvmet_tcp_save_tls_callbacks(queue);
- if (!nvmet_tcp_tls_handshake(queue))
- return;
+ if (!nvmet_tcp_try_peek_pdu(queue))
+ if (!nvmet_tcp_tls_handshake(queue))
+ return;
nvmet_tcp_restore_tls_callbacks(queue);
/*