aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2023-03-15 09:16:55 +0100
committerHannes Reinecke <hare@suse.de>2023-08-14 13:13:21 +0200
commitce525e34ae9d3f5ed12f23caea57c0aa2e8359ee (patch)
treec26a11d898c1b2ebad1743c5d17e31091dc6f607
parent1cd1bfe171d7938cbaa2ada9c7cf8e1ceda40e87 (diff)
downloadnvme-tls.v13.tar.gz
nvmet-tcp: peek icreq before starting TLStls.v13
Incoming connection might be either 'normal' NVMe-TCP connections starting with icreq or TLS handshakes. To ensure that 'normal' connections can still be handled we need to peek the first packet and only start TLS handshake if it's not an icreq. With that we can lift the restriction to always set TREQ to 'required' when TLS1.3 is enabled. Signed-off-by: Hannes Reinecke <hare@suse.de>
-rw-r--r--drivers/nvme/target/configfs.c17
-rw-r--r--drivers/nvme/target/nvmet.h10
-rw-r--r--drivers/nvme/target/tcp.c61
3 files changed, 66 insertions, 22 deletions
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index ad1fb32c738797..56ed6ce4d4d5fb 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -175,11 +175,6 @@ static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
return snprintf(page, PAGE_SIZE, "\n");
}
-static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
-{
- return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
-}
-
static ssize_t nvmet_addr_treq_store(struct config_item *item,
const char *page, size_t count)
{
@@ -377,7 +372,6 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
- u8 treq = nvmet_port_disc_addr_treq_mask(port);
u8 sectype;
int i;
@@ -410,17 +404,6 @@ found:
}
nvmet_port_init_tsas_tcp(port, sectype);
- /*
- * The TLS implementation currently does not support
- * secure concatenation, so TREQ is always set to 'required'
- * if TLS is enabled.
- */
- if (sectype == NVMF_TCP_SECTYPE_TLS13) {
- treq |= NVMF_TREQ_REQUIRED;
- } else {
- treq |= NVMF_TREQ_NOT_SPECIFIED;
- }
- port->disc_addr.treq = treq;
return count;
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7f9ae53c1df58b..c47bab3281e01c 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -179,6 +179,16 @@ static inline struct nvmet_port *ana_groups_to_port(
ana_groups_group);
}
+static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
+{
+ return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
+}
+
+static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
+{
+ return nvmet_port_disc_addr_treq_mask(port) == NVMF_TREQ_REQUIRED;
+}
+
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index ded985efdb66a9..2b60e6f23cc942 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1156,6 +1156,54 @@ static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
return ret;
}
+static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
+ int len, ret;
+ struct kvec iov = {
+ .iov_base = (u8 *)&queue->pdu + queue->offset,
+ .iov_len = sizeof(struct nvme_tcp_hdr),
+ };
+ char cbuf[CMSG_LEN(sizeof(char))] = {};
+ struct msghdr msg = {
+ .msg_control = cbuf,
+ .msg_controllen = sizeof(cbuf),
+ .msg_flags = MSG_PEEK,
+ };
+
+ if (nvmet_port_secure_channel_required(queue->port->nport))
+ return 0;
+
+ len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+ if (unlikely(len < 0)) {
+ pr_debug("queue %d: peek error %d\n",
+ queue->idx, len);
+ return len;
+ }
+
+ ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
+ if (ret < 0)
+ return ret;
+
+ if (len < sizeof(struct nvme_tcp_hdr)) {
+ pr_debug("queue %d: short read, %d bytes missing\n",
+ queue->idx, (int)iov.iov_len - len);
+ return -EAGAIN;
+ }
+ pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n",
+ queue->idx, hdr->type, hdr->hlen, hdr->plen,
+ (int)sizeof(struct nvme_tcp_icreq_pdu));
+ if (hdr->type == nvme_tcp_icreq &&
+ hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
+ hdr->plen == sizeof(struct nvme_tcp_icreq_pdu)) {
+ pr_debug("queue %d: icreq detected\n",
+ queue->idx);
+ return len;
+ }
+ return 0;
+}
+
static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
@@ -1868,11 +1916,14 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
sk->sk_user_data = NULL;
sk->sk_data_ready = port->data_ready;
read_unlock_bh(&sk->sk_callback_lock);
- if (!nvmet_tcp_tls_handshake(queue))
- return;
-
- /* TLS handshake failed, terminate the connection */
- goto out_destroy_sq;
+ if (!nvmet_tcp_try_peek_pdu(queue)) {
+ if (!nvmet_tcp_tls_handshake(queue))
+ return;
+ /* TLS handshake failed, terminate the connection */
+ goto out_destroy_sq;
+ }
+ /* Not a TLS connection, continue with normal processing */
+ queue->state = NVMET_TCP_Q_CONNECTING;
}
#endif