summaryrefslogtreecommitdiffstats
path: root/net/tls
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /net/tls
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/Kconfig39
-rw-r--r--net/tls/Makefile13
-rw-r--r--net/tls/tls.h328
-rw-r--r--net/tls/tls_device.c1487
-rw-r--r--net/tls/tls_device_fallback.c513
-rw-r--r--net/tls/tls_main.c1246
-rw-r--r--net/tls/tls_proc.c56
-rw-r--r--net/tls/tls_strp.c633
-rw-r--r--net/tls/tls_sw.c2818
-rw-r--r--net/tls/tls_toe.c141
-rw-r--r--net/tls/trace.c10
-rw-r--r--net/tls/trace.h202
12 files changed, 7486 insertions, 0 deletions
diff --git a/net/tls/Kconfig b/net/tls/Kconfig
new file mode 100644
index 000000000..0cdc1f7b6
--- /dev/null
+++ b/net/tls/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# TLS configuration
+#
+config TLS
+ tristate "Transport Layer Security support"
+ depends on INET
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ select STREAM_PARSER
+ select NET_SOCK_MSG
+ default n
+ help
+ Enable kernel support for TLS protocol. This allows symmetric
+ encryption handling of the TLS protocol to be done in-kernel.
+
+ If unsure, say N.
+
+config TLS_DEVICE
+ bool "Transport Layer Security HW offload"
+ depends on TLS
+ select SOCK_VALIDATE_XMIT
+ select SOCK_RX_QUEUE_MAPPING
+ default n
+ help
+ Enable kernel support for HW offload of the TLS protocol.
+
+ If unsure, say N.
+
+config TLS_TOE
+ bool "Transport Layer Security TCP stack bypass"
+ depends on TLS
+ default n
+ help
+ Enable kernel support for legacy HW offload of the TLS protocol,
+ which is incompatible with the Linux networking stack semantics.
+
+ If unsure, say N.
diff --git a/net/tls/Makefile b/net/tls/Makefile
new file mode 100644
index 000000000..e41c80048
--- /dev/null
+++ b/net/tls/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the TLS subsystem.
+#
+
+CFLAGS_trace.o := -I$(src)
+
+obj-$(CONFIG_TLS) += tls.o
+
+tls-y := tls_main.o tls_sw.o tls_proc.o trace.o tls_strp.o
+
+tls-$(CONFIG_TLS_TOE) += tls_toe.o
+tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
diff --git a/net/tls/tls.h b/net/tls/tls.h
new file mode 100644
index 000000000..0672acab2
--- /dev/null
+++ b/net/tls/tls.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _TLS_INT_H
+#define _TLS_INT_H
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/skmsg.h>
+#include <net/tls.h>
+
+#define TLS_PAGE_ORDER (min_t(unsigned int, PAGE_ALLOC_COSTLY_ORDER, \
+ TLS_MAX_PAYLOAD_SIZE >> PAGE_SHIFT))
+
+#define __TLS_INC_STATS(net, field) \
+ __SNMP_INC_STATS((net)->mib.tls_statistics, field)
+#define TLS_INC_STATS(net, field) \
+ SNMP_INC_STATS((net)->mib.tls_statistics, field)
+#define TLS_DEC_STATS(net, field) \
+ SNMP_DEC_STATS((net)->mib.tls_statistics, field)
+
+/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
+ * allocated or mapped for each TLS record. After encryption, the records are
+ * stores in a linked list.
+ */
+struct tls_rec {
+ struct list_head list;
+ int tx_ready;
+ int tx_flags;
+
+ struct sk_msg msg_plaintext;
+ struct sk_msg msg_encrypted;
+
+ /* AAD | msg_plaintext.sg.data | sg_tag */
+ struct scatterlist sg_aead_in[2];
+ /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
+ struct scatterlist sg_aead_out[2];
+
+ char content_type;
+ struct scatterlist sg_content_type;
+
+ struct sock *sk;
+
+ char aad_space[TLS_AAD_SPACE_SIZE];
+ u8 iv_data[MAX_IV_SIZE];
+ struct aead_request aead_req;
+ u8 aead_req_ctx[];
+};
+
+int __net_init tls_proc_init(struct net *net);
+void __net_exit tls_proc_fini(struct net *net);
+
+struct tls_context *tls_ctx_create(struct sock *sk);
+void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
+void update_sk_prot(struct sock *sk, struct tls_context *ctx);
+
+int wait_on_pending_writer(struct sock *sk, long *timeo);
+int tls_sk_query(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen);
+int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
+ unsigned int optlen);
+void tls_err_abort(struct sock *sk, int err);
+
+int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
+void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
+void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
+void tls_sw_strparser_done(struct tls_context *tls_ctx);
+int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+int tls_sw_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
+void tls_sw_release_resources_tx(struct sock *sk);
+void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
+void tls_sw_free_resources_rx(struct sock *sk);
+void tls_sw_release_resources_rx(struct sock *sk);
+void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
+int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len);
+bool tls_sw_sock_is_readable(struct sock *sk);
+ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
+
+int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+int tls_device_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+int tls_tx_records(struct sock *sk, int flags);
+
+void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
+void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
+
+int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
+ unsigned char *record_type);
+int decrypt_skb(struct sock *sk, struct scatterlist *sgout);
+
+int tls_sw_fallback_init(struct sock *sk,
+ struct tls_offload_context_tx *offload_ctx,
+ struct tls_crypto_info *crypto_info);
+
+int tls_strp_dev_init(void);
+void tls_strp_dev_exit(void);
+
+void tls_strp_done(struct tls_strparser *strp);
+void tls_strp_stop(struct tls_strparser *strp);
+int tls_strp_init(struct tls_strparser *strp, struct sock *sk);
+void tls_strp_data_ready(struct tls_strparser *strp);
+
+void tls_strp_check_rcv(struct tls_strparser *strp);
+void tls_strp_msg_done(struct tls_strparser *strp);
+
+int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb);
+void tls_rx_msg_ready(struct tls_strparser *strp);
+
+void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
+int tls_strp_msg_cow(struct tls_sw_context_rx *ctx);
+struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx);
+int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst);
+
+static inline struct tls_msg *tls_msg(struct sk_buff *skb)
+{
+ struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
+
+ return &scb->tls;
+}
+
+static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx)
+{
+ DEBUG_NET_WARN_ON_ONCE(!ctx->strp.msg_ready || !ctx->strp.anchor->len);
+ return ctx->strp.anchor;
+}
+
+static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
+{
+ return ctx->strp.msg_ready;
+}
+
+static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
+{
+ return ctx->strp.mixed_decrypted;
+}
+
+#ifdef CONFIG_TLS_DEVICE
+int tls_device_init(void);
+void tls_device_cleanup(void);
+int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
+void tls_device_free_resources_tx(struct sock *sk);
+int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
+void tls_device_offload_cleanup_rx(struct sock *sk);
+void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
+int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx);
+#else
+static inline int tls_device_init(void) { return 0; }
+static inline void tls_device_cleanup(void) {}
+
+static inline int
+tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tls_device_free_resources_tx(struct sock *sk) {}
+
+static inline int
+tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
+static inline void
+tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
+
+static inline int
+tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
+{
+ return 0;
+}
+#endif
+
+int tls_push_sg(struct sock *sk, struct tls_context *ctx,
+ struct scatterlist *sg, u16 first_offset,
+ int flags);
+int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
+ int flags);
+void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
+
+static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
+{
+ return !!ctx->partially_sent_record;
+}
+
+static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
+{
+ return tls_ctx->pending_open_record_frags;
+}
+
+static inline bool tls_bigint_increment(unsigned char *seq, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ ++seq[i];
+ if (seq[i] != 0)
+ break;
+ }
+
+ return (i == -1);
+}
+
+static inline void tls_bigint_subtract(unsigned char *seq, int n)
+{
+ u64 rcd_sn;
+ __be64 *p;
+
+ BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
+
+ p = (__be64 *)seq;
+ rcd_sn = be64_to_cpu(*p);
+ *p = cpu_to_be64(rcd_sn - n);
+}
+
+static inline void
+tls_advance_record_sn(struct sock *sk, struct tls_prot_info *prot,
+ struct cipher_context *ctx)
+{
+ if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
+ tls_err_abort(sk, -EBADMSG);
+
+ if (prot->version != TLS_1_3_VERSION &&
+ prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
+ tls_bigint_increment(ctx->iv + prot->salt_size,
+ prot->iv_size);
+}
+
+static inline void
+tls_xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq)
+{
+ int i;
+
+ if (prot->version == TLS_1_3_VERSION ||
+ prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
+ for (i = 0; i < 8; i++)
+ iv[i + 4] ^= seq[i];
+ }
+}
+
+static inline void
+tls_fill_prepend(struct tls_context *ctx, char *buf, size_t plaintext_len,
+ unsigned char record_type)
+{
+ struct tls_prot_info *prot = &ctx->prot_info;
+ size_t pkt_len, iv_size = prot->iv_size;
+
+ pkt_len = plaintext_len + prot->tag_size;
+ if (prot->version != TLS_1_3_VERSION &&
+ prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) {
+ pkt_len += iv_size;
+
+ memcpy(buf + TLS_NONCE_OFFSET,
+ ctx->tx.iv + prot->salt_size, iv_size);
+ }
+
+ /* we cover nonce explicit here as well, so buf should be of
+ * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
+ */
+ buf[0] = prot->version == TLS_1_3_VERSION ?
+ TLS_RECORD_TYPE_DATA : record_type;
+ /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
+ buf[1] = TLS_1_2_VERSION_MINOR;
+ buf[2] = TLS_1_2_VERSION_MAJOR;
+ /* we can use IV for nonce explicit according to spec */
+ buf[3] = pkt_len >> 8;
+ buf[4] = pkt_len & 0xFF;
+}
+
+static inline
+void tls_make_aad(char *buf, size_t size, char *record_sequence,
+ unsigned char record_type, struct tls_prot_info *prot)
+{
+ if (prot->version != TLS_1_3_VERSION) {
+ memcpy(buf, record_sequence, prot->rec_seq_size);
+ buf += 8;
+ } else {
+ size += prot->tag_size;
+ }
+
+ buf[0] = prot->version == TLS_1_3_VERSION ?
+ TLS_RECORD_TYPE_DATA : record_type;
+ buf[1] = TLS_1_2_VERSION_MAJOR;
+ buf[2] = TLS_1_2_VERSION_MINOR;
+ buf[3] = size >> 8;
+ buf[4] = size & 0xFF;
+}
+
+#endif
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
new file mode 100644
index 000000000..184982788
--- /dev/null
+++ b/net/tls/tls_device.c
@@ -0,0 +1,1487 @@
+/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <crypto/aead.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/dst.h>
+#include <net/inet_connection_sock.h>
+#include <net/tcp.h>
+#include <net/tls.h>
+
+#include "tls.h"
+#include "trace.h"
+
+/* device_offload_lock is used to synchronize tls_dev_add
+ * against NETDEV_DOWN notifications.
+ */
+static DECLARE_RWSEM(device_offload_lock);
+
+static struct workqueue_struct *destruct_wq __read_mostly;
+
+static LIST_HEAD(tls_device_list);
+static LIST_HEAD(tls_device_down_list);
+static DEFINE_SPINLOCK(tls_device_lock);
+
+static struct page *dummy_page;
+
+static void tls_device_free_ctx(struct tls_context *ctx)
+{
+ if (ctx->tx_conf == TLS_HW) {
+ kfree(tls_offload_ctx_tx(ctx));
+ kfree(ctx->tx.rec_seq);
+ kfree(ctx->tx.iv);
+ }
+
+ if (ctx->rx_conf == TLS_HW)
+ kfree(tls_offload_ctx_rx(ctx));
+
+ tls_ctx_free(NULL, ctx);
+}
+
+static void tls_device_tx_del_task(struct work_struct *work)
+{
+ struct tls_offload_context_tx *offload_ctx =
+ container_of(work, struct tls_offload_context_tx, destruct_work);
+ struct tls_context *ctx = offload_ctx->ctx;
+ struct net_device *netdev;
+
+ /* Safe, because this is the destroy flow, refcount is 0, so
+ * tls_device_down can't store this field in parallel.
+ */
+ netdev = rcu_dereference_protected(ctx->netdev,
+ !refcount_read(&ctx->refcount));
+
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
+ dev_put(netdev);
+ ctx->netdev = NULL;
+ tls_device_free_ctx(ctx);
+}
+
+static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
+{
+ struct net_device *netdev;
+ unsigned long flags;
+ bool async_cleanup;
+
+ spin_lock_irqsave(&tls_device_lock, flags);
+ if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
+ spin_unlock_irqrestore(&tls_device_lock, flags);
+ return;
+ }
+
+ list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
+
+ /* Safe, because this is the destroy flow, refcount is 0, so
+ * tls_device_down can't store this field in parallel.
+ */
+ netdev = rcu_dereference_protected(ctx->netdev,
+ !refcount_read(&ctx->refcount));
+
+ async_cleanup = netdev && ctx->tx_conf == TLS_HW;
+ if (async_cleanup) {
+ struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
+
+ /* queue_work inside the spinlock
+ * to make sure tls_device_down waits for that work.
+ */
+ queue_work(destruct_wq, &offload_ctx->destruct_work);
+ }
+ spin_unlock_irqrestore(&tls_device_lock, flags);
+
+ if (!async_cleanup)
+ tls_device_free_ctx(ctx);
+}
+
+/* We assume that the socket is already connected */
+static struct net_device *get_netdev_for_sock(struct sock *sk)
+{
+ struct dst_entry *dst = sk_dst_get(sk);
+ struct net_device *netdev = NULL;
+
+ if (likely(dst)) {
+ netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
+ dev_hold(netdev);
+ }
+
+ dst_release(dst);
+
+ return netdev;
+}
+
+static void destroy_record(struct tls_record_info *record)
+{
+ int i;
+
+ for (i = 0; i < record->num_frags; i++)
+ __skb_frag_unref(&record->frags[i], false);
+ kfree(record);
+}
+
+static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
+{
+ struct tls_record_info *info, *temp;
+
+ list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
+ list_del(&info->list);
+ destroy_record(info);
+ }
+
+ offload_ctx->retransmit_hint = NULL;
+}
+
+static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_record_info *info, *temp;
+ struct tls_offload_context_tx *ctx;
+ u64 deleted_records = 0;
+ unsigned long flags;
+
+ if (!tls_ctx)
+ return;
+
+ ctx = tls_offload_ctx_tx(tls_ctx);
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ info = ctx->retransmit_hint;
+ if (info && !before(acked_seq, info->end_seq))
+ ctx->retransmit_hint = NULL;
+
+ list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
+ if (before(acked_seq, info->end_seq))
+ break;
+ list_del(&info->list);
+
+ destroy_record(info);
+ deleted_records++;
+ }
+
+ ctx->unacked_record_sn += deleted_records;
+ spin_unlock_irqrestore(&ctx->lock, flags);
+}
+
+/* At this point, there should be no references on this
+ * socket and no in-flight SKBs associated with this
+ * socket, so it is safe to free all the resources.
+ */
+void tls_device_sk_destruct(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+
+ tls_ctx->sk_destruct(sk);
+
+ if (tls_ctx->tx_conf == TLS_HW) {
+ if (ctx->open_record)
+ destroy_record(ctx->open_record);
+ delete_all_records(ctx);
+ crypto_free_aead(ctx->aead_send);
+ clean_acked_data_disable(inet_csk(sk));
+ }
+
+ tls_device_queue_ctx_destruction(tls_ctx);
+}
+EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
+
+void tls_device_free_resources_tx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_free_partial_record(sk, tls_ctx);
+}
+
+void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
+ WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
+}
+EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
+
+static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
+ u32 seq)
+{
+ struct net_device *netdev;
+ struct sk_buff *skb;
+ int err = 0;
+ u8 *rcd_sn;
+
+ skb = tcp_write_queue_tail(sk);
+ if (skb)
+ TCP_SKB_CB(skb)->eor = 1;
+
+ rcd_sn = tls_ctx->tx.rec_seq;
+
+ trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
+ down_read(&device_offload_lock);
+ netdev = rcu_dereference_protected(tls_ctx->netdev,
+ lockdep_is_held(&device_offload_lock));
+ if (netdev)
+ err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
+ rcd_sn,
+ TLS_OFFLOAD_CTX_DIR_TX);
+ up_read(&device_offload_lock);
+ if (err)
+ return;
+
+ clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
+}
+
+static void tls_append_frag(struct tls_record_info *record,
+ struct page_frag *pfrag,
+ int size)
+{
+ skb_frag_t *frag;
+
+ frag = &record->frags[record->num_frags - 1];
+ if (skb_frag_page(frag) == pfrag->page &&
+ skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
+ skb_frag_size_add(frag, size);
+ } else {
+ ++frag;
+ __skb_frag_set_page(frag, pfrag->page);
+ skb_frag_off_set(frag, pfrag->offset);
+ skb_frag_size_set(frag, size);
+ ++record->num_frags;
+ get_page(pfrag->page);
+ }
+
+ pfrag->offset += size;
+ record->len += size;
+}
+
+static int tls_push_record(struct sock *sk,
+ struct tls_context *ctx,
+ struct tls_offload_context_tx *offload_ctx,
+ struct tls_record_info *record,
+ int flags)
+{
+ struct tls_prot_info *prot = &ctx->prot_info;
+ struct tcp_sock *tp = tcp_sk(sk);
+ skb_frag_t *frag;
+ int i;
+
+ record->end_seq = tp->write_seq + record->len;
+ list_add_tail_rcu(&record->list, &offload_ctx->records_list);
+ offload_ctx->open_record = NULL;
+
+ if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
+ tls_device_resync_tx(sk, ctx, tp->write_seq);
+
+ tls_advance_record_sn(sk, prot, &ctx->tx);
+
+ for (i = 0; i < record->num_frags; i++) {
+ frag = &record->frags[i];
+ sg_unmark_end(&offload_ctx->sg_tx_data[i]);
+ sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
+ skb_frag_size(frag), skb_frag_off(frag));
+ sk_mem_charge(sk, skb_frag_size(frag));
+ get_page(skb_frag_page(frag));
+ }
+ sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
+
+ /* all ready, send */
+ return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
+}
+
+static void tls_device_record_close(struct sock *sk,
+ struct tls_context *ctx,
+ struct tls_record_info *record,
+ struct page_frag *pfrag,
+ unsigned char record_type)
+{
+ struct tls_prot_info *prot = &ctx->prot_info;
+ struct page_frag dummy_tag_frag;
+
+ /* append tag
+ * device will fill in the tag, we just need to append a placeholder
+ * use socket memory to improve coalescing (re-using a single buffer
+ * increases frag count)
+ * if we can't allocate memory now use the dummy page
+ */
+ if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) &&
+ !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) {
+ dummy_tag_frag.page = dummy_page;
+ dummy_tag_frag.offset = 0;
+ pfrag = &dummy_tag_frag;
+ }
+ tls_append_frag(record, pfrag, prot->tag_size);
+
+ /* fill prepend */
+ tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
+ record->len - prot->overhead_size,
+ record_type);
+}
+
+static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
+ struct page_frag *pfrag,
+ size_t prepend_size)
+{
+ struct tls_record_info *record;
+ skb_frag_t *frag;
+
+ record = kmalloc(sizeof(*record), GFP_KERNEL);
+ if (!record)
+ return -ENOMEM;
+
+ frag = &record->frags[0];
+ __skb_frag_set_page(frag, pfrag->page);
+ skb_frag_off_set(frag, pfrag->offset);
+ skb_frag_size_set(frag, prepend_size);
+
+ get_page(pfrag->page);
+ pfrag->offset += prepend_size;
+
+ record->num_frags = 1;
+ record->len = prepend_size;
+ offload_ctx->open_record = record;
+ return 0;
+}
+
+static int tls_do_allocation(struct sock *sk,
+ struct tls_offload_context_tx *offload_ctx,
+ struct page_frag *pfrag,
+ size_t prepend_size)
+{
+ int ret;
+
+ if (!offload_ctx->open_record) {
+ if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
+ sk->sk_allocation))) {
+ READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
+ sk_stream_moderate_sndbuf(sk);
+ return -ENOMEM;
+ }
+
+ ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
+ if (ret)
+ return ret;
+
+ if (pfrag->size > pfrag->offset)
+ return 0;
+ }
+
+ if (!sk_page_frag_refill(sk, pfrag))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
+{
+ size_t pre_copy, nocache;
+
+ pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
+ if (pre_copy) {
+ pre_copy = min(pre_copy, bytes);
+ if (copy_from_iter(addr, pre_copy, i) != pre_copy)
+ return -EFAULT;
+ bytes -= pre_copy;
+ addr += pre_copy;
+ }
+
+ nocache = round_down(bytes, SMP_CACHE_BYTES);
+ if (copy_from_iter_nocache(addr, nocache, i) != nocache)
+ return -EFAULT;
+ bytes -= nocache;
+ addr += nocache;
+
+ if (bytes && copy_from_iter(addr, bytes, i) != bytes)
+ return -EFAULT;
+
+ return 0;
+}
+
+union tls_iter_offset {
+ struct iov_iter *msg_iter;
+ int offset;
+};
+
+static int tls_push_data(struct sock *sk,
+ union tls_iter_offset iter_offset,
+ size_t size, int flags,
+ unsigned char record_type,
+ struct page *zc_page)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+ struct tls_record_info *record;
+ int tls_push_record_flags;
+ struct page_frag *pfrag;
+ size_t orig_size = size;
+ u32 max_open_record_len;
+ bool more = false;
+ bool done = false;
+ int copy, rc = 0;
+ long timeo;
+
+ if (flags &
+ ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
+ return -EOPNOTSUPP;
+
+ if (unlikely(sk->sk_err))
+ return -sk->sk_err;
+
+ flags |= MSG_SENDPAGE_DECRYPTED;
+ tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
+
+ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+ if (tls_is_partially_sent_record(tls_ctx)) {
+ rc = tls_push_partial_record(sk, tls_ctx, flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ pfrag = sk_page_frag(sk);
+
+ /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
+ * we need to leave room for an authentication tag.
+ */
+ max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
+ prot->prepend_size;
+ do {
+ rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
+ if (unlikely(rc)) {
+ rc = sk_stream_wait_memory(sk, &timeo);
+ if (!rc)
+ continue;
+
+ record = ctx->open_record;
+ if (!record)
+ break;
+handle_error:
+ if (record_type != TLS_RECORD_TYPE_DATA) {
+ /* avoid sending partial
+ * record with type !=
+ * application_data
+ */
+ size = orig_size;
+ destroy_record(record);
+ ctx->open_record = NULL;
+ } else if (record->len > prot->prepend_size) {
+ goto last_record;
+ }
+
+ break;
+ }
+
+ record = ctx->open_record;
+
+ copy = min_t(size_t, size, max_open_record_len - record->len);
+ if (copy && zc_page) {
+ struct page_frag zc_pfrag;
+
+ zc_pfrag.page = zc_page;
+ zc_pfrag.offset = iter_offset.offset;
+ zc_pfrag.size = copy;
+ tls_append_frag(record, &zc_pfrag, copy);
+
+ iter_offset.offset += copy;
+ } else if (copy) {
+ copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
+
+ rc = tls_device_copy_data(page_address(pfrag->page) +
+ pfrag->offset, copy,
+ iter_offset.msg_iter);
+ if (rc)
+ goto handle_error;
+ tls_append_frag(record, pfrag, copy);
+ }
+
+ size -= copy;
+ if (!size) {
+last_record:
+ tls_push_record_flags = flags;
+ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
+ more = true;
+ break;
+ }
+
+ done = true;
+ }
+
+ if (done || record->len >= max_open_record_len ||
+ (record->num_frags >= MAX_SKB_FRAGS - 1)) {
+ tls_device_record_close(sk, tls_ctx, record,
+ pfrag, record_type);
+
+ rc = tls_push_record(sk,
+ tls_ctx,
+ ctx,
+ record,
+ tls_push_record_flags);
+ if (rc < 0)
+ break;
+ }
+ } while (!done);
+
+ tls_ctx->pending_open_record_frags = more;
+
+ if (orig_size - size > 0)
+ rc = orig_size - size;
+
+ return rc;
+}
+
+int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ union tls_iter_offset iter;
+ int rc;
+
+ mutex_lock(&tls_ctx->tx_lock);
+ lock_sock(sk);
+
+ if (unlikely(msg->msg_controllen)) {
+ rc = tls_process_cmsg(sk, msg, &record_type);
+ if (rc)
+ goto out;
+ }
+
+ iter.msg_iter = &msg->msg_iter;
+ rc = tls_push_data(sk, iter, size, msg->msg_flags, record_type, NULL);
+
+out:
+ release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
+ return rc;
+}
+
+int tls_device_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ union tls_iter_offset iter_offset;
+ struct iov_iter msg_iter;
+ char *kaddr;
+ struct kvec iov;
+ int rc;
+
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
+ mutex_lock(&tls_ctx->tx_lock);
+ lock_sock(sk);
+
+ if (flags & MSG_OOB) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (tls_ctx->zerocopy_sendfile) {
+ iter_offset.offset = offset;
+ rc = tls_push_data(sk, iter_offset, size,
+ flags, TLS_RECORD_TYPE_DATA, page);
+ goto out;
+ }
+
+ kaddr = kmap(page);
+ iov.iov_base = kaddr + offset;
+ iov.iov_len = size;
+ iov_iter_kvec(&msg_iter, ITER_SOURCE, &iov, 1, size);
+ iter_offset.msg_iter = &msg_iter;
+ rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA,
+ NULL);
+ kunmap(page);
+
+out:
+ release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
+ return rc;
+}
+
+struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
+ u32 seq, u64 *p_record_sn)
+{
+ u64 record_sn = context->hint_record_sn;
+ struct tls_record_info *info, *last;
+
+ info = context->retransmit_hint;
+ if (!info ||
+ before(seq, info->end_seq - info->len)) {
+ /* if retransmit_hint is irrelevant start
+ * from the beginning of the list
+ */
+ info = list_first_entry_or_null(&context->records_list,
+ struct tls_record_info, list);
+ if (!info)
+ return NULL;
+ /* send the start_marker record if seq number is before the
+ * tls offload start marker sequence number. This record is
+ * required to handle TCP packets which are before TLS offload
+ * started.
+ * And if it's not start marker, look if this seq number
+ * belongs to the list.
+ */
+ if (likely(!tls_record_is_start_marker(info))) {
+ /* we have the first record, get the last record to see
+ * if this seq number belongs to the list.
+ */
+ last = list_last_entry(&context->records_list,
+ struct tls_record_info, list);
+
+ if (!between(seq, tls_record_start_seq(info),
+ last->end_seq))
+ return NULL;
+ }
+ record_sn = context->unacked_record_sn;
+ }
+
+ /* We just need the _rcu for the READ_ONCE() */
+ rcu_read_lock();
+ list_for_each_entry_from_rcu(info, &context->records_list, list) {
+ if (before(seq, info->end_seq)) {
+ if (!context->retransmit_hint ||
+ after(info->end_seq,
+ context->retransmit_hint->end_seq)) {
+ context->hint_record_sn = record_sn;
+ context->retransmit_hint = info;
+ }
+ *p_record_sn = record_sn;
+ goto exit_rcu_unlock;
+ }
+ record_sn++;
+ }
+ info = NULL;
+
+exit_rcu_unlock:
+ rcu_read_unlock();
+ return info;
+}
+EXPORT_SYMBOL(tls_get_record);
+
+static int tls_device_push_pending_record(struct sock *sk, int flags)
+{
+ union tls_iter_offset iter;
+ struct iov_iter msg_iter;
+
+ iov_iter_kvec(&msg_iter, ITER_SOURCE, NULL, 0, 0);
+ iter.msg_iter = &msg_iter;
+ return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL);
+}
+
+void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
+{
+ if (tls_is_partially_sent_record(ctx)) {
+ gfp_t sk_allocation = sk->sk_allocation;
+
+ WARN_ON_ONCE(sk->sk_write_pending);
+
+ sk->sk_allocation = GFP_ATOMIC;
+ tls_push_partial_record(sk, ctx,
+ MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_DECRYPTED);
+ sk->sk_allocation = sk_allocation;
+ }
+}
+
+static void tls_device_resync_rx(struct tls_context *tls_ctx,
+ struct sock *sk, u32 seq, u8 *rcd_sn)
+{
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+ struct net_device *netdev;
+
+ trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
+ rcu_read_lock();
+ netdev = rcu_dereference(tls_ctx->netdev);
+ if (netdev)
+ netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
+ TLS_OFFLOAD_CTX_DIR_RX);
+ rcu_read_unlock();
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
+}
+
+static bool
+tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
+ s64 resync_req, u32 *seq, u16 *rcd_delta)
+{
+ u32 is_async = resync_req & RESYNC_REQ_ASYNC;
+ u32 req_seq = resync_req >> 32;
+ u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
+ u16 i;
+
+ *rcd_delta = 0;
+
+ if (is_async) {
+ /* shouldn't get to wraparound:
+ * too long in async stage, something bad happened
+ */
+ if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
+ return false;
+
+ /* asynchronous stage: log all headers seq such that
+ * req_seq <= seq <= end_seq, and wait for real resync request
+ */
+ if (before(*seq, req_seq))
+ return false;
+ if (!after(*seq, req_end) &&
+ resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
+ resync_async->log[resync_async->loglen++] = *seq;
+
+ resync_async->rcd_delta++;
+
+ return false;
+ }
+
+ /* synchronous stage: check against the logged entries and
+ * proceed to check the next entries if no match was found
+ */
+ for (i = 0; i < resync_async->loglen; i++)
+ if (req_seq == resync_async->log[i] &&
+ atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
+ *rcd_delta = resync_async->rcd_delta - i;
+ *seq = req_seq;
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
+ return true;
+ }
+
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
+
+ if (req_seq == *seq &&
+ atomic64_try_cmpxchg(&resync_async->req,
+ &resync_req, 0))
+ return true;
+
+ return false;
+}
+
+void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx;
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+ u32 sock_data, is_req_pending;
+ struct tls_prot_info *prot;
+ s64 resync_req;
+ u16 rcd_delta;
+ u32 req_seq;
+
+ if (tls_ctx->rx_conf != TLS_HW)
+ return;
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
+ return;
+
+ prot = &tls_ctx->prot_info;
+ rx_ctx = tls_offload_ctx_rx(tls_ctx);
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
+
+ switch (rx_ctx->resync_type) {
+ case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
+ resync_req = atomic64_read(&rx_ctx->resync_req);
+ req_seq = resync_req >> 32;
+ seq += TLS_HEADER_SIZE - 1;
+ is_req_pending = resync_req;
+
+ if (likely(!is_req_pending) || req_seq != seq ||
+ !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
+ return;
+ break;
+ case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
+ if (likely(!rx_ctx->resync_nh_do_now))
+ return;
+
+ /* head of next rec is already in, note that the sock_inq will
+ * include the currently parsed message when called from parser
+ */
+ sock_data = tcp_inq(sk);
+ if (sock_data > rcd_len) {
+ trace_tls_device_rx_resync_nh_delay(sk, sock_data,
+ rcd_len);
+ return;
+ }
+
+ rx_ctx->resync_nh_do_now = 0;
+ seq += rcd_len;
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
+ break;
+ case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
+ resync_req = atomic64_read(&rx_ctx->resync_async->req);
+ is_req_pending = resync_req;
+ if (likely(!is_req_pending))
+ return;
+
+ if (!tls_device_rx_resync_async(rx_ctx->resync_async,
+ resync_req, &seq, &rcd_delta))
+ return;
+ tls_bigint_subtract(rcd_sn, rcd_delta);
+ break;
+ }
+
+ tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
+}
+
+static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
+ struct tls_offload_context_rx *ctx,
+ struct sock *sk, struct sk_buff *skb)
+{
+ struct strp_msg *rxm;
+
+ /* device will request resyncs by itself based on stream scan */
+ if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
+ return;
+ /* already scheduled */
+ if (ctx->resync_nh_do_now)
+ return;
+ /* seen decrypted fragments since last fully-failed record */
+ if (ctx->resync_nh_reset) {
+ ctx->resync_nh_reset = 0;
+ ctx->resync_nh.decrypted_failed = 1;
+ ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
+ return;
+ }
+
+ if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
+ return;
+
+ /* doing resync, bump the next target in case it fails */
+ if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
+ ctx->resync_nh.decrypted_tgt *= 2;
+ else
+ ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
+
+ rxm = strp_msg(skb);
+
+ /* head of next rec is already in, parser will sync for us */
+ if (tcp_inq(sk) > rxm->full_len) {
+ trace_tls_device_rx_resync_nh_schedule(sk);
+ ctx->resync_nh_do_now = 1;
+ } else {
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
+
+ tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
+ rcd_sn);
+ }
+}
+
+static int
+tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
+ const struct tls_cipher_size_desc *cipher_sz;
+ int err, offset, copy, data_len, pos;
+ struct sk_buff *skb, *skb_iter;
+ struct scatterlist sg[1];
+ struct strp_msg *rxm;
+ char *orig_buf, *buf;
+
+ switch (tls_ctx->crypto_recv.info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ case TLS_CIPHER_AES_GCM_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+ cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_recv.info.cipher_type];
+
+ rxm = strp_msg(tls_strp_msg(sw_ctx));
+ orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv,
+ sk->sk_allocation);
+ if (!orig_buf)
+ return -ENOMEM;
+ buf = orig_buf;
+
+ err = tls_strp_msg_cow(sw_ctx);
+ if (unlikely(err))
+ goto free_buf;
+
+ skb = tls_strp_msg(sw_ctx);
+ rxm = strp_msg(skb);
+ offset = rxm->offset;
+
+ sg_init_table(sg, 1);
+ sg_set_buf(&sg[0], buf,
+ rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv);
+ err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv);
+ if (err)
+ goto free_buf;
+
+ /* We are interested only in the decrypted data not the auth */
+ err = decrypt_skb(sk, sg);
+ if (err != -EBADMSG)
+ goto free_buf;
+ else
+ err = 0;
+
+ data_len = rxm->full_len - cipher_sz->tag;
+
+ if (skb_pagelen(skb) > offset) {
+ copy = min_t(int, skb_pagelen(skb) - offset, data_len);
+
+ if (skb->decrypted) {
+ err = skb_store_bits(skb, offset, buf, copy);
+ if (err)
+ goto free_buf;
+ }
+
+ offset += copy;
+ buf += copy;
+ }
+
+ pos = skb_pagelen(skb);
+ skb_walk_frags(skb, skb_iter) {
+ int frag_pos;
+
+ /* Practically all frags must belong to msg if reencrypt
+ * is needed with current strparser and coalescing logic,
+ * but strparser may "get optimized", so let's be safe.
+ */
+ if (pos + skb_iter->len <= offset)
+ goto done_with_frag;
+ if (pos >= data_len + rxm->offset)
+ break;
+
+ frag_pos = offset - pos;
+ copy = min_t(int, skb_iter->len - frag_pos,
+ data_len + rxm->offset - offset);
+
+ if (skb_iter->decrypted) {
+ err = skb_store_bits(skb_iter, frag_pos, buf, copy);
+ if (err)
+ goto free_buf;
+ }
+
+ offset += copy;
+ buf += copy;
+done_with_frag:
+ pos += skb_iter->len;
+ }
+
+free_buf:
+ kfree(orig_buf);
+ return err;
+}
+
+int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
+{
+ struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
+ struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
+ struct sk_buff *skb = tls_strp_msg(sw_ctx);
+ struct strp_msg *rxm = strp_msg(skb);
+ int is_decrypted, is_encrypted;
+
+ if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
+ is_decrypted = skb->decrypted;
+ is_encrypted = !is_decrypted;
+ } else {
+ is_decrypted = 0;
+ is_encrypted = 0;
+ }
+
+ trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
+ tls_ctx->rx.rec_seq, rxm->full_len,
+ is_encrypted, is_decrypted);
+
+ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
+ if (likely(is_encrypted || is_decrypted))
+ return is_decrypted;
+
+ /* After tls_device_down disables the offload, the next SKB will
+ * likely have initial fragments decrypted, and final ones not
+ * decrypted. We need to reencrypt that single SKB.
+ */
+ return tls_device_reencrypt(sk, tls_ctx);
+ }
+
+ /* Return immediately if the record is either entirely plaintext or
+ * entirely ciphertext. Otherwise handle reencrypt partially decrypted
+ * record.
+ */
+ if (is_decrypted) {
+ ctx->resync_nh_reset = 1;
+ return is_decrypted;
+ }
+ if (is_encrypted) {
+ tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
+ return 0;
+ }
+
+ ctx->resync_nh_reset = 1;
+ return tls_device_reencrypt(sk, tls_ctx);
+}
+
+static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
+ struct net_device *netdev)
+{
+ if (sk->sk_destruct != tls_device_sk_destruct) {
+ refcount_set(&ctx->refcount, 1);
+ dev_hold(netdev);
+ RCU_INIT_POINTER(ctx->netdev, netdev);
+ spin_lock_irq(&tls_device_lock);
+ list_add_tail(&ctx->list, &tls_device_list);
+ spin_unlock_irq(&tls_device_lock);
+
+ ctx->sk_destruct = sk->sk_destruct;
+ smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
+ }
+}
+
+int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ const struct tls_cipher_size_desc *cipher_sz;
+ struct tls_record_info *start_marker_record;
+ struct tls_offload_context_tx *offload_ctx;
+ struct tls_crypto_info *crypto_info;
+ struct net_device *netdev;
+ char *iv, *rec_seq;
+ struct sk_buff *skb;
+ __be64 rcd_sn;
+ int rc;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (ctx->priv_ctx_tx)
+ return -EEXIST;
+
+ netdev = get_netdev_for_sock(sk);
+ if (!netdev) {
+ pr_err_ratelimited("%s: netdev not found\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
+ rc = -EOPNOTSUPP;
+ goto release_netdev;
+ }
+
+ crypto_info = &ctx->crypto_send.info;
+ if (crypto_info->version != TLS_1_2_VERSION) {
+ rc = -EOPNOTSUPP;
+ goto release_netdev;
+ }
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
+ rec_seq =
+ ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
+ rec_seq =
+ ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
+ break;
+ default:
+ rc = -EINVAL;
+ goto release_netdev;
+ }
+ cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
+
+ /* Sanity-check the rec_seq_size for stack allocations */
+ if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) {
+ rc = -EINVAL;
+ goto release_netdev;
+ }
+
+ prot->version = crypto_info->version;
+ prot->cipher_type = crypto_info->cipher_type;
+ prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv;
+ prot->tag_size = cipher_sz->tag;
+ prot->overhead_size = prot->prepend_size + prot->tag_size;
+ prot->iv_size = cipher_sz->iv;
+ prot->salt_size = cipher_sz->salt;
+ ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL);
+ if (!ctx->tx.iv) {
+ rc = -ENOMEM;
+ goto release_netdev;
+ }
+
+ memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv);
+
+ prot->rec_seq_size = cipher_sz->rec_seq;
+ ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL);
+ if (!ctx->tx.rec_seq) {
+ rc = -ENOMEM;
+ goto free_iv;
+ }
+
+ start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
+ if (!start_marker_record) {
+ rc = -ENOMEM;
+ goto free_rec_seq;
+ }
+
+ offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
+ if (!offload_ctx) {
+ rc = -ENOMEM;
+ goto free_marker_record;
+ }
+
+ rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
+ if (rc)
+ goto free_offload_ctx;
+
+ /* start at rec_seq - 1 to account for the start marker record */
+ memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
+ offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
+
+ start_marker_record->end_seq = tcp_sk(sk)->write_seq;
+ start_marker_record->len = 0;
+ start_marker_record->num_frags = 0;
+
+ INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
+ offload_ctx->ctx = ctx;
+
+ INIT_LIST_HEAD(&offload_ctx->records_list);
+ list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
+ spin_lock_init(&offload_ctx->lock);
+ sg_init_table(offload_ctx->sg_tx_data,
+ ARRAY_SIZE(offload_ctx->sg_tx_data));
+
+ clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
+ ctx->push_pending_record = tls_device_push_pending_record;
+
+ /* TLS offload is greatly simplified if we don't send
+ * SKBs where only part of the payload needs to be encrypted.
+ * So mark the last skb in the write queue as end of record.
+ */
+ skb = tcp_write_queue_tail(sk);
+ if (skb)
+ TCP_SKB_CB(skb)->eor = 1;
+
+ /* Avoid offloading if the device is down
+ * We don't want to offload new flows after
+ * the NETDEV_DOWN event
+ *
+ * device_offload_lock is taken in tls_devices's NETDEV_DOWN
+ * handler thus protecting from the device going down before
+ * ctx was added to tls_device_list.
+ */
+ down_read(&device_offload_lock);
+ if (!(netdev->flags & IFF_UP)) {
+ rc = -EINVAL;
+ goto release_lock;
+ }
+
+ ctx->priv_ctx_tx = offload_ctx;
+ rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
+ &ctx->crypto_send.info,
+ tcp_sk(sk)->write_seq);
+ trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
+ tcp_sk(sk)->write_seq, rec_seq, rc);
+ if (rc)
+ goto release_lock;
+
+ tls_device_attach(ctx, sk, netdev);
+ up_read(&device_offload_lock);
+
+ /* following this assignment tls_is_sk_tx_device_offloaded
+ * will return true and the context might be accessed
+ * by the netdev's xmit function.
+ */
+ smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
+ dev_put(netdev);
+
+ return 0;
+
+release_lock:
+ up_read(&device_offload_lock);
+ clean_acked_data_disable(inet_csk(sk));
+ crypto_free_aead(offload_ctx->aead_send);
+free_offload_ctx:
+ kfree(offload_ctx);
+ ctx->priv_ctx_tx = NULL;
+free_marker_record:
+ kfree(start_marker_record);
+free_rec_seq:
+ kfree(ctx->tx.rec_seq);
+free_iv:
+ kfree(ctx->tx.iv);
+release_netdev:
+ dev_put(netdev);
+ return rc;
+}
+
+int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
+{
+ struct tls12_crypto_info_aes_gcm_128 *info;
+ struct tls_offload_context_rx *context;
+ struct net_device *netdev;
+ int rc = 0;
+
+ if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
+ return -EOPNOTSUPP;
+
+ netdev = get_netdev_for_sock(sk);
+ if (!netdev) {
+ pr_err_ratelimited("%s: netdev not found\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
+ rc = -EOPNOTSUPP;
+ goto release_netdev;
+ }
+
+ /* Avoid offloading if the device is down
+ * We don't want to offload new flows after
+ * the NETDEV_DOWN event
+ *
+ * device_offload_lock is taken in tls_devices's NETDEV_DOWN
+ * handler thus protecting from the device going down before
+ * ctx was added to tls_device_list.
+ */
+ down_read(&device_offload_lock);
+ if (!(netdev->flags & IFF_UP)) {
+ rc = -EINVAL;
+ goto release_lock;
+ }
+
+ context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
+ if (!context) {
+ rc = -ENOMEM;
+ goto release_lock;
+ }
+ context->resync_nh_reset = 1;
+
+ ctx->priv_ctx_rx = context;
+ rc = tls_set_sw_offload(sk, ctx, 0);
+ if (rc)
+ goto release_ctx;
+
+ rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
+ &ctx->crypto_recv.info,
+ tcp_sk(sk)->copied_seq);
+ info = (void *)&ctx->crypto_recv.info;
+ trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
+ tcp_sk(sk)->copied_seq, info->rec_seq, rc);
+ if (rc)
+ goto free_sw_resources;
+
+ tls_device_attach(ctx, sk, netdev);
+ up_read(&device_offload_lock);
+
+ dev_put(netdev);
+
+ return 0;
+
+free_sw_resources:
+ up_read(&device_offload_lock);
+ tls_sw_free_resources_rx(sk);
+ down_read(&device_offload_lock);
+release_ctx:
+ ctx->priv_ctx_rx = NULL;
+release_lock:
+ up_read(&device_offload_lock);
+release_netdev:
+ dev_put(netdev);
+ return rc;
+}
+
+void tls_device_offload_cleanup_rx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct net_device *netdev;
+
+ down_read(&device_offload_lock);
+ netdev = rcu_dereference_protected(tls_ctx->netdev,
+ lockdep_is_held(&device_offload_lock));
+ if (!netdev)
+ goto out;
+
+ netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+ TLS_OFFLOAD_CTX_DIR_RX);
+
+ if (tls_ctx->tx_conf != TLS_HW) {
+ dev_put(netdev);
+ rcu_assign_pointer(tls_ctx->netdev, NULL);
+ } else {
+ set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
+ }
+out:
+ up_read(&device_offload_lock);
+ tls_sw_release_resources_rx(sk);
+}
+
+static int tls_device_down(struct net_device *netdev)
+{
+ struct tls_context *ctx, *tmp;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ /* Request a write lock to block new offload attempts */
+ down_write(&device_offload_lock);
+
+ spin_lock_irqsave(&tls_device_lock, flags);
+ list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
+ struct net_device *ctx_netdev =
+ rcu_dereference_protected(ctx->netdev,
+ lockdep_is_held(&device_offload_lock));
+
+ if (ctx_netdev != netdev ||
+ !refcount_inc_not_zero(&ctx->refcount))
+ continue;
+
+ list_move(&ctx->list, &list);
+ }
+ spin_unlock_irqrestore(&tls_device_lock, flags);
+
+ list_for_each_entry_safe(ctx, tmp, &list, list) {
+ /* Stop offloaded TX and switch to the fallback.
+ * tls_is_sk_tx_device_offloaded will return false.
+ */
+ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
+
+ /* Stop the RX and TX resync.
+ * tls_dev_resync must not be called after tls_dev_del.
+ */
+ rcu_assign_pointer(ctx->netdev, NULL);
+
+ /* Start skipping the RX resync logic completely. */
+ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
+
+ /* Sync with inflight packets. After this point:
+ * TX: no non-encrypted packets will be passed to the driver.
+ * RX: resync requests from the driver will be ignored.
+ */
+ synchronize_net();
+
+ /* Release the offload context on the driver side. */
+ if (ctx->tx_conf == TLS_HW)
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ TLS_OFFLOAD_CTX_DIR_TX);
+ if (ctx->rx_conf == TLS_HW &&
+ !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
+ netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ TLS_OFFLOAD_CTX_DIR_RX);
+
+ dev_put(netdev);
+
+ /* Move the context to a separate list for two reasons:
+ * 1. When the context is deallocated, list_del is called.
+ * 2. It's no longer an offloaded context, so we don't want to
+ * run offload-specific code on this context.
+ */
+ spin_lock_irqsave(&tls_device_lock, flags);
+ list_move_tail(&ctx->list, &tls_device_down_list);
+ spin_unlock_irqrestore(&tls_device_lock, flags);
+
+ /* Device contexts for RX and TX will be freed in on sk_destruct
+ * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+ * Now release the ref taken above.
+ */
+ if (refcount_dec_and_test(&ctx->refcount)) {
+ /* sk_destruct ran after tls_device_down took a ref, and
+ * it returned early. Complete the destruction here.
+ */
+ list_del(&ctx->list);
+ tls_device_free_ctx(ctx);
+ }
+ }
+
+ up_write(&device_offload_lock);
+
+ flush_workqueue(destruct_wq);
+
+ return NOTIFY_DONE;
+}
+
+static int tls_dev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (!dev->tlsdev_ops &&
+ !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ case NETDEV_FEAT_CHANGE:
+ if (netif_is_bond_master(dev))
+ return NOTIFY_DONE;
+ if ((dev->features & NETIF_F_HW_TLS_RX) &&
+ !dev->tlsdev_ops->tls_dev_resync)
+ return NOTIFY_BAD;
+
+ if (dev->tlsdev_ops &&
+ dev->tlsdev_ops->tls_dev_add &&
+ dev->tlsdev_ops->tls_dev_del)
+ return NOTIFY_DONE;
+ else
+ return NOTIFY_BAD;
+ case NETDEV_DOWN:
+ return tls_device_down(dev);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tls_dev_notifier = {
+ .notifier_call = tls_dev_event,
+};
+
+int __init tls_device_init(void)
+{
+ int err;
+
+ dummy_page = alloc_page(GFP_KERNEL);
+ if (!dummy_page)
+ return -ENOMEM;
+
+ destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
+ if (!destruct_wq) {
+ err = -ENOMEM;
+ goto err_free_dummy;
+ }
+
+ err = register_netdevice_notifier(&tls_dev_notifier);
+ if (err)
+ goto err_destroy_wq;
+
+ return 0;
+
+err_destroy_wq:
+ destroy_workqueue(destruct_wq);
+err_free_dummy:
+ put_page(dummy_page);
+ return err;
+}
+
+void __exit tls_device_cleanup(void)
+{
+ unregister_netdevice_notifier(&tls_dev_notifier);
+ destroy_workqueue(destruct_wq);
+ clean_acked_data_flush();
+ put_page(dummy_page);
+}
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
new file mode 100644
index 000000000..7fbb1d0b6
--- /dev/null
+++ b/net/tls/tls_device_fallback.c
@@ -0,0 +1,513 @@
+/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/tls.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <net/ip6_checksum.h>
+
+#include "tls.h"
+
+static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
+{
+ struct scatterlist *src = walk->sg;
+ int diff = walk->offset - src->offset;
+
+ sg_set_page(sg, sg_page(src),
+ src->length - diff, walk->offset);
+
+ scatterwalk_crypto_chain(sg, sg_next(src), 2);
+}
+
+static int tls_enc_record(struct aead_request *aead_req,
+ struct crypto_aead *aead, char *aad,
+ char *iv, __be64 rcd_sn,
+ struct scatter_walk *in,
+ struct scatter_walk *out, int *in_len,
+ struct tls_prot_info *prot)
+{
+ unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE];
+ const struct tls_cipher_size_desc *cipher_sz;
+ struct scatterlist sg_in[3];
+ struct scatterlist sg_out[3];
+ unsigned int buf_size;
+ u16 len;
+ int rc;
+
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ case TLS_CIPHER_AES_GCM_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+ cipher_sz = &tls_cipher_size_desc[prot->cipher_type];
+
+ buf_size = TLS_HEADER_SIZE + cipher_sz->iv;
+ len = min_t(int, *in_len, buf_size);
+
+ scatterwalk_copychunks(buf, in, len, 0);
+ scatterwalk_copychunks(buf, out, len, 1);
+
+ *in_len -= len;
+ if (!*in_len)
+ return 0;
+
+ scatterwalk_pagedone(in, 0, 1);
+ scatterwalk_pagedone(out, 1, 1);
+
+ len = buf[4] | (buf[3] << 8);
+ len -= cipher_sz->iv;
+
+ tls_make_aad(aad, len - cipher_sz->tag, (char *)&rcd_sn, buf[0], prot);
+
+ memcpy(iv + cipher_sz->salt, buf + TLS_HEADER_SIZE, cipher_sz->iv);
+
+ sg_init_table(sg_in, ARRAY_SIZE(sg_in));
+ sg_init_table(sg_out, ARRAY_SIZE(sg_out));
+ sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
+ sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
+ chain_to_walk(sg_in + 1, in);
+ chain_to_walk(sg_out + 1, out);
+
+ *in_len -= len;
+ if (*in_len < 0) {
+ *in_len += cipher_sz->tag;
+ /* the input buffer doesn't contain the entire record.
+ * trim len accordingly. The resulting authentication tag
+ * will contain garbage, but we don't care, so we won't
+ * include any of it in the output skb
+ * Note that we assume the output buffer length
+ * is larger then input buffer length + tag size
+ */
+ if (*in_len < 0)
+ len += *in_len;
+
+ *in_len = 0;
+ }
+
+ if (*in_len) {
+ scatterwalk_copychunks(NULL, in, len, 2);
+ scatterwalk_pagedone(in, 0, 1);
+ scatterwalk_copychunks(NULL, out, len, 2);
+ scatterwalk_pagedone(out, 1, 1);
+ }
+
+ len -= cipher_sz->tag;
+ aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
+
+ rc = crypto_aead_encrypt(aead_req);
+
+ return rc;
+}
+
+static void tls_init_aead_request(struct aead_request *aead_req,
+ struct crypto_aead *aead)
+{
+ aead_request_set_tfm(aead_req, aead);
+ aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
+}
+
+static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
+ gfp_t flags)
+{
+ unsigned int req_size = sizeof(struct aead_request) +
+ crypto_aead_reqsize(aead);
+ struct aead_request *aead_req;
+
+ aead_req = kzalloc(req_size, flags);
+ if (aead_req)
+ tls_init_aead_request(aead_req, aead);
+ return aead_req;
+}
+
+static int tls_enc_records(struct aead_request *aead_req,
+ struct crypto_aead *aead, struct scatterlist *sg_in,
+ struct scatterlist *sg_out, char *aad, char *iv,
+ u64 rcd_sn, int len, struct tls_prot_info *prot)
+{
+ struct scatter_walk out, in;
+ int rc;
+
+ scatterwalk_start(&in, sg_in);
+ scatterwalk_start(&out, sg_out);
+
+ do {
+ rc = tls_enc_record(aead_req, aead, aad, iv,
+ cpu_to_be64(rcd_sn), &in, &out, &len, prot);
+ rcd_sn++;
+
+ } while (rc == 0 && len);
+
+ scatterwalk_done(&in, 0, 0);
+ scatterwalk_done(&out, 1, 0);
+
+ return rc;
+}
+
+/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
+ * might have been changed by NAT.
+ */
+static void update_chksum(struct sk_buff *skb, int headln)
+{
+ struct tcphdr *th = tcp_hdr(skb);
+ int datalen = skb->len - headln;
+ const struct ipv6hdr *ipv6h;
+ const struct iphdr *iph;
+
+ /* We only changed the payload so if we are using partial we don't
+ * need to update anything.
+ */
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ return;
+
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (skb->sk->sk_family == AF_INET6) {
+ ipv6h = ipv6_hdr(skb);
+ th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ datalen, IPPROTO_TCP, 0);
+ } else {
+ iph = ip_hdr(skb);
+ th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
+ IPPROTO_TCP, 0);
+ }
+}
+
+static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
+{
+ struct sock *sk = skb->sk;
+ int delta;
+
+ skb_copy_header(nskb, skb);
+
+ skb_put(nskb, skb->len);
+ memcpy(nskb->data, skb->data, headln);
+
+ nskb->destructor = skb->destructor;
+ nskb->sk = sk;
+ skb->destructor = NULL;
+ skb->sk = NULL;
+
+ update_chksum(nskb, headln);
+
+ /* sock_efree means skb must gone through skb_orphan_partial() */
+ if (nskb->destructor == sock_efree)
+ return;
+
+ delta = nskb->truesize - skb->truesize;
+ if (likely(delta < 0))
+ WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
+ else if (delta)
+ refcount_add(delta, &sk->sk_wmem_alloc);
+}
+
+/* This function may be called after the user socket is already
+ * closed so make sure we don't use anything freed during
+ * tls_sk_proto_close here
+ */
+
+static int fill_sg_in(struct scatterlist *sg_in,
+ struct sk_buff *skb,
+ struct tls_offload_context_tx *ctx,
+ u64 *rcd_sn,
+ s32 *sync_size,
+ int *resync_sgs)
+{
+ int tcp_payload_offset = skb_tcp_all_headers(skb);
+ int payload_len = skb->len - tcp_payload_offset;
+ u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ struct tls_record_info *record;
+ unsigned long flags;
+ int remaining;
+ int i;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ record = tls_get_record(ctx, tcp_seq, rcd_sn);
+ if (!record) {
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return -EINVAL;
+ }
+
+ *sync_size = tcp_seq - tls_record_start_seq(record);
+ if (*sync_size < 0) {
+ int is_start_marker = tls_record_is_start_marker(record);
+
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ /* This should only occur if the relevant record was
+ * already acked. In that case it should be ok
+ * to drop the packet and avoid retransmission.
+ *
+ * There is a corner case where the packet contains
+ * both an acked and a non-acked record.
+ * We currently don't handle that case and rely
+ * on TCP to retranmit a packet that doesn't contain
+ * already acked payload.
+ */
+ if (!is_start_marker)
+ *sync_size = 0;
+ return -EINVAL;
+ }
+
+ remaining = *sync_size;
+ for (i = 0; remaining > 0; i++) {
+ skb_frag_t *frag = &record->frags[i];
+
+ __skb_frag_ref(frag);
+ sg_set_page(sg_in + i, skb_frag_page(frag),
+ skb_frag_size(frag), skb_frag_off(frag));
+
+ remaining -= skb_frag_size(frag);
+
+ if (remaining < 0)
+ sg_in[i].length += remaining;
+ }
+ *resync_sgs = i;
+
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
+ struct tls_context *tls_ctx,
+ struct sk_buff *nskb,
+ int tcp_payload_offset,
+ int payload_len,
+ int sync_size,
+ void *dummy_buf)
+{
+ const struct tls_cipher_size_desc *cipher_sz =
+ &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
+
+ sg_set_buf(&sg_out[0], dummy_buf, sync_size);
+ sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
+ /* Add room for authentication tag produced by crypto */
+ dummy_buf += sync_size;
+ sg_set_buf(&sg_out[2], dummy_buf, cipher_sz->tag);
+}
+
+static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
+ struct scatterlist sg_out[3],
+ struct scatterlist *sg_in,
+ struct sk_buff *skb,
+ s32 sync_size, u64 rcd_sn)
+{
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+ int tcp_payload_offset = skb_tcp_all_headers(skb);
+ int payload_len = skb->len - tcp_payload_offset;
+ const struct tls_cipher_size_desc *cipher_sz;
+ void *buf, *iv, *aad, *dummy_buf, *salt;
+ struct aead_request *aead_req;
+ struct sk_buff *nskb = NULL;
+ int buf_len;
+
+ aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
+ if (!aead_req)
+ return NULL;
+
+ switch (tls_ctx->crypto_send.info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ salt = tls_ctx->crypto_send.aes_gcm_128.salt;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ salt = tls_ctx->crypto_send.aes_gcm_256.salt;
+ break;
+ default:
+ goto free_req;
+ }
+ cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
+ buf_len = cipher_sz->salt + cipher_sz->iv + TLS_AAD_SPACE_SIZE +
+ sync_size + cipher_sz->tag;
+ buf = kmalloc(buf_len, GFP_ATOMIC);
+ if (!buf)
+ goto free_req;
+
+ iv = buf;
+ memcpy(iv, salt, cipher_sz->salt);
+ aad = buf + cipher_sz->salt + cipher_sz->iv;
+ dummy_buf = aad + TLS_AAD_SPACE_SIZE;
+
+ nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
+ if (!nskb)
+ goto free_buf;
+
+ skb_reserve(nskb, skb_headroom(skb));
+
+ fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
+ payload_len, sync_size, dummy_buf);
+
+ if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
+ rcd_sn, sync_size + payload_len,
+ &tls_ctx->prot_info) < 0)
+ goto free_nskb;
+
+ complete_skb(nskb, skb, tcp_payload_offset);
+
+ /* validate_xmit_skb_list assumes that if the skb wasn't segmented
+ * nskb->prev will point to the skb itself
+ */
+ nskb->prev = nskb;
+
+free_buf:
+ kfree(buf);
+free_req:
+ kfree(aead_req);
+ return nskb;
+free_nskb:
+ kfree_skb(nskb);
+ nskb = NULL;
+ goto free_buf;
+}
+
+static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
+{
+ int tcp_payload_offset = skb_tcp_all_headers(skb);
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+ int payload_len = skb->len - tcp_payload_offset;
+ struct scatterlist *sg_in, sg_out[3];
+ struct sk_buff *nskb = NULL;
+ int sg_in_max_elements;
+ int resync_sgs = 0;
+ s32 sync_size = 0;
+ u64 rcd_sn;
+
+ /* worst case is:
+ * MAX_SKB_FRAGS in tls_record_info
+ * MAX_SKB_FRAGS + 1 in SKB head and frags.
+ */
+ sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
+
+ if (!payload_len)
+ return skb;
+
+ sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
+ if (!sg_in)
+ goto free_orig;
+
+ sg_init_table(sg_in, sg_in_max_elements);
+ sg_init_table(sg_out, ARRAY_SIZE(sg_out));
+
+ if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
+ /* bypass packets before kernel TLS socket option was set */
+ if (sync_size < 0 && payload_len <= -sync_size)
+ nskb = skb_get(skb);
+ goto put_sg;
+ }
+
+ nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
+
+put_sg:
+ while (resync_sgs)
+ put_page(sg_page(&sg_in[--resync_sgs]));
+ kfree(sg_in);
+free_orig:
+ if (nskb)
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
+ return nskb;
+}
+
+struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb)
+{
+ if (dev == rcu_dereference_bh(tls_get_ctx(sk)->netdev) ||
+ netif_is_bond_master(dev))
+ return skb;
+
+ return tls_sw_fallback(sk, skb);
+}
+EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
+
+struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return tls_sw_fallback(sk, skb);
+}
+
+struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
+{
+ return tls_sw_fallback(skb->sk, skb);
+}
+EXPORT_SYMBOL_GPL(tls_encrypt_skb);
+
+int tls_sw_fallback_init(struct sock *sk,
+ struct tls_offload_context_tx *offload_ctx,
+ struct tls_crypto_info *crypto_info)
+{
+ const struct tls_cipher_size_desc *cipher_sz;
+ const u8 *key;
+ int rc;
+
+ offload_ctx->aead_send =
+ crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(offload_ctx->aead_send)) {
+ rc = PTR_ERR(offload_ctx->aead_send);
+ pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
+ offload_ctx->aead_send = NULL;
+ goto err_out;
+ }
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ key = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->key;
+ break;
+ default:
+ rc = -EINVAL;
+ goto free_aead;
+ }
+ cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
+
+ rc = crypto_aead_setkey(offload_ctx->aead_send, key, cipher_sz->key);
+ if (rc)
+ goto free_aead;
+
+ rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_sz->tag);
+ if (rc)
+ goto free_aead;
+
+ return 0;
+free_aead:
+ crypto_free_aead(offload_ctx->aead_send);
+err_out:
+ return rc;
+}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
new file mode 100644
index 000000000..338a443fa
--- /dev/null
+++ b/net/tls/tls_main.c
@@ -0,0 +1,1246 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#include <net/tcp.h>
+#include <net/inet_common.h>
+#include <linux/highmem.h>
+#include <linux/netdevice.h>
+#include <linux/sched/signal.h>
+#include <linux/inetdevice.h>
+#include <linux/inet_diag.h>
+
+#include <net/snmp.h>
+#include <net/tls.h>
+#include <net/tls_toe.h>
+
+#include "tls.h"
+
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_DESCRIPTION("Transport Layer Security Support");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS_TCP_ULP("tls");
+
+enum {
+ TLSV4,
+ TLSV6,
+ TLS_NUM_PROTS,
+};
+
+#define CIPHER_SIZE_DESC(cipher) [cipher] = { \
+ .iv = cipher ## _IV_SIZE, \
+ .key = cipher ## _KEY_SIZE, \
+ .salt = cipher ## _SALT_SIZE, \
+ .tag = cipher ## _TAG_SIZE, \
+ .rec_seq = cipher ## _REC_SEQ_SIZE, \
+}
+
+const struct tls_cipher_size_desc tls_cipher_size_desc[] = {
+ CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_128),
+ CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_256),
+ CIPHER_SIZE_DESC(TLS_CIPHER_AES_CCM_128),
+ CIPHER_SIZE_DESC(TLS_CIPHER_CHACHA20_POLY1305),
+ CIPHER_SIZE_DESC(TLS_CIPHER_SM4_GCM),
+ CIPHER_SIZE_DESC(TLS_CIPHER_SM4_CCM),
+};
+
+static const struct proto *saved_tcpv6_prot;
+static DEFINE_MUTEX(tcpv6_prot_mutex);
+static const struct proto *saved_tcpv4_prot;
+static DEFINE_MUTEX(tcpv4_prot_mutex);
+static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
+static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
+static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ const struct proto *base);
+
+void update_sk_prot(struct sock *sk, struct tls_context *ctx)
+{
+ int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
+
+ WRITE_ONCE(sk->sk_prot,
+ &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
+ WRITE_ONCE(sk->sk_socket->ops,
+ &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
+}
+
+int wait_on_pending_writer(struct sock *sk, long *timeo)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret, rc = 0;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ while (1) {
+ if (!*timeo) {
+ rc = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ rc = sock_intr_errno(*timeo);
+ break;
+ }
+
+ ret = sk_wait_event(sk, timeo,
+ !READ_ONCE(sk->sk_write_pending), &wait);
+ if (ret) {
+ if (ret < 0)
+ rc = ret;
+ break;
+ }
+ }
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return rc;
+}
+
+int tls_push_sg(struct sock *sk,
+ struct tls_context *ctx,
+ struct scatterlist *sg,
+ u16 first_offset,
+ int flags)
+{
+ int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
+ int ret = 0;
+ struct page *p;
+ size_t size;
+ int offset = first_offset;
+
+ size = sg->length - offset;
+ offset += sg->offset;
+
+ ctx->in_tcp_sendpages = true;
+ while (1) {
+ if (sg_is_last(sg))
+ sendpage_flags = flags;
+
+ /* is sending application-limited? */
+ tcp_rate_check_app_limited(sk);
+ p = sg_page(sg);
+retry:
+ ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
+
+ if (ret != size) {
+ if (ret > 0) {
+ offset += ret;
+ size -= ret;
+ goto retry;
+ }
+
+ offset -= sg->offset;
+ ctx->partially_sent_offset = offset;
+ ctx->partially_sent_record = (void *)sg;
+ ctx->in_tcp_sendpages = false;
+ return ret;
+ }
+
+ put_page(p);
+ sk_mem_uncharge(sk, sg->length);
+ sg = sg_next(sg);
+ if (!sg)
+ break;
+
+ offset = sg->offset;
+ size = sg->length;
+ }
+
+ ctx->in_tcp_sendpages = false;
+
+ return 0;
+}
+
+static int tls_handle_open_record(struct sock *sk, int flags)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (tls_is_pending_open_record(ctx))
+ return ctx->push_pending_record(sk, flags);
+
+ return 0;
+}
+
+int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
+ unsigned char *record_type)
+{
+ struct cmsghdr *cmsg;
+ int rc = -EINVAL;
+
+ for_each_cmsghdr(cmsg, msg) {
+ if (!CMSG_OK(msg, cmsg))
+ return -EINVAL;
+ if (cmsg->cmsg_level != SOL_TLS)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case TLS_SET_RECORD_TYPE:
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
+ return -EINVAL;
+
+ if (msg->msg_flags & MSG_MORE)
+ return -EINVAL;
+
+ rc = tls_handle_open_record(sk, msg->msg_flags);
+ if (rc)
+ return rc;
+
+ *record_type = *(unsigned char *)CMSG_DATA(cmsg);
+ rc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
+int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
+ int flags)
+{
+ struct scatterlist *sg;
+ u16 offset;
+
+ sg = ctx->partially_sent_record;
+ offset = ctx->partially_sent_offset;
+
+ ctx->partially_sent_record = NULL;
+ return tls_push_sg(sk, ctx, sg, offset, flags);
+}
+
+void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
+{
+ struct scatterlist *sg;
+
+ for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) {
+ put_page(sg_page(sg));
+ sk_mem_uncharge(sk, sg->length);
+ }
+ ctx->partially_sent_record = NULL;
+}
+
+static void tls_write_space(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ /* If in_tcp_sendpages call lower protocol write space handler
+ * to ensure we wake up any waiting operations there. For example
+ * if do_tcp_sendpages where to call sk_wait_event.
+ */
+ if (ctx->in_tcp_sendpages) {
+ ctx->sk_write_space(sk);
+ return;
+ }
+
+#ifdef CONFIG_TLS_DEVICE
+ if (ctx->tx_conf == TLS_HW)
+ tls_device_write_space(sk, ctx);
+ else
+#endif
+ tls_sw_write_space(sk, ctx);
+
+ ctx->sk_write_space(sk);
+}
+
+/**
+ * tls_ctx_free() - free TLS ULP context
+ * @sk: socket to with @ctx is attached
+ * @ctx: TLS context structure
+ *
+ * Free TLS context. If @sk is %NULL caller guarantees that the socket
+ * to which @ctx was attached has no outstanding references.
+ */
+void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
+{
+ if (!ctx)
+ return;
+
+ memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
+ memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
+ mutex_destroy(&ctx->tx_lock);
+
+ if (sk)
+ kfree_rcu(ctx, rcu);
+ else
+ kfree(ctx);
+}
+
+static void tls_sk_proto_cleanup(struct sock *sk,
+ struct tls_context *ctx, long timeo)
+{
+ if (unlikely(sk->sk_write_pending) &&
+ !wait_on_pending_writer(sk, &timeo))
+ tls_handle_open_record(sk, 0);
+
+ /* We need these for tls_sw_fallback handling of other packets */
+ if (ctx->tx_conf == TLS_SW) {
+ kfree(ctx->tx.rec_seq);
+ kfree(ctx->tx.iv);
+ tls_sw_release_resources_tx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
+ } else if (ctx->tx_conf == TLS_HW) {
+ tls_device_free_resources_tx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
+ }
+
+ if (ctx->rx_conf == TLS_SW) {
+ tls_sw_release_resources_rx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
+ } else if (ctx->rx_conf == TLS_HW) {
+ tls_device_offload_cleanup_rx(sk);
+ TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
+ }
+}
+
+static void tls_sk_proto_close(struct sock *sk, long timeout)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tls_context *ctx = tls_get_ctx(sk);
+ long timeo = sock_sndtimeo(sk, 0);
+ bool free_ctx;
+
+ if (ctx->tx_conf == TLS_SW)
+ tls_sw_cancel_work_tx(ctx);
+
+ lock_sock(sk);
+ free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
+
+ if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
+ tls_sk_proto_cleanup(sk, ctx, timeo);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (free_ctx)
+ rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
+ WRITE_ONCE(sk->sk_prot, ctx->sk_proto);
+ if (sk->sk_write_space == tls_write_space)
+ sk->sk_write_space = ctx->sk_write_space;
+ write_unlock_bh(&sk->sk_callback_lock);
+ release_sock(sk);
+ if (ctx->tx_conf == TLS_SW)
+ tls_sw_free_ctx_tx(ctx);
+ if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
+ tls_sw_strparser_done(ctx);
+ if (ctx->rx_conf == TLS_SW)
+ tls_sw_free_ctx_rx(ctx);
+ ctx->sk_proto->close(sk, timeout);
+
+ if (free_ctx)
+ tls_ctx_free(sk, ctx);
+}
+
+static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ int __user *optlen, int tx)
+{
+ int rc = 0;
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_crypto_info *crypto_info;
+ struct cipher_context *cctx;
+ int len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ if (!optval || (len < sizeof(*crypto_info))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!ctx) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* get user crypto info */
+ if (tx) {
+ crypto_info = &ctx->crypto_send.info;
+ cctx = &ctx->tx;
+ } else {
+ crypto_info = &ctx->crypto_recv.info;
+ cctx = &ctx->rx;
+ }
+
+ if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (len == sizeof(*crypto_info)) {
+ if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
+ rc = -EFAULT;
+ goto out;
+ }
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *
+ crypto_info_aes_gcm_128 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aes_gcm_128,
+ info);
+
+ if (len != sizeof(*crypto_info_aes_gcm_128)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(crypto_info_aes_gcm_128->iv,
+ cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
+ TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
+ if (copy_to_user(optval,
+ crypto_info_aes_gcm_128,
+ sizeof(*crypto_info_aes_gcm_128)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *
+ crypto_info_aes_gcm_256 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aes_gcm_256,
+ info);
+
+ if (len != sizeof(*crypto_info_aes_gcm_256)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(crypto_info_aes_gcm_256->iv,
+ cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
+ TLS_CIPHER_AES_GCM_256_IV_SIZE);
+ memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
+ if (copy_to_user(optval,
+ crypto_info_aes_gcm_256,
+ sizeof(*crypto_info_aes_gcm_256)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_AES_CCM_128: {
+ struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aes_ccm_128, info);
+
+ if (len != sizeof(*aes_ccm_128)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(aes_ccm_128->iv,
+ cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
+ TLS_CIPHER_AES_CCM_128_IV_SIZE);
+ memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
+ if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_CHACHA20_POLY1305: {
+ struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_chacha20_poly1305,
+ info);
+
+ if (len != sizeof(*chacha20_poly1305)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(chacha20_poly1305->iv,
+ cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
+ TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
+ memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
+ if (copy_to_user(optval, chacha20_poly1305,
+ sizeof(*chacha20_poly1305)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_SM4_GCM: {
+ struct tls12_crypto_info_sm4_gcm *sm4_gcm_info =
+ container_of(crypto_info,
+ struct tls12_crypto_info_sm4_gcm, info);
+
+ if (len != sizeof(*sm4_gcm_info)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(sm4_gcm_info->iv,
+ cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
+ TLS_CIPHER_SM4_GCM_IV_SIZE);
+ memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
+ if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_SM4_CCM: {
+ struct tls12_crypto_info_sm4_ccm *sm4_ccm_info =
+ container_of(crypto_info,
+ struct tls12_crypto_info_sm4_ccm, info);
+
+ if (len != sizeof(*sm4_ccm_info)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(sm4_ccm_info->iv,
+ cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
+ TLS_CIPHER_SM4_CCM_IV_SIZE);
+ memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
+ if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_ARIA_GCM_128: {
+ struct tls12_crypto_info_aria_gcm_128 *
+ crypto_info_aria_gcm_128 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aria_gcm_128,
+ info);
+
+ if (len != sizeof(*crypto_info_aria_gcm_128)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(crypto_info_aria_gcm_128->iv,
+ cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE,
+ TLS_CIPHER_ARIA_GCM_128_IV_SIZE);
+ memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE);
+ if (copy_to_user(optval,
+ crypto_info_aria_gcm_128,
+ sizeof(*crypto_info_aria_gcm_128)))
+ rc = -EFAULT;
+ break;
+ }
+ case TLS_CIPHER_ARIA_GCM_256: {
+ struct tls12_crypto_info_aria_gcm_256 *
+ crypto_info_aria_gcm_256 =
+ container_of(crypto_info,
+ struct tls12_crypto_info_aria_gcm_256,
+ info);
+
+ if (len != sizeof(*crypto_info_aria_gcm_256)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(crypto_info_aria_gcm_256->iv,
+ cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE,
+ TLS_CIPHER_ARIA_GCM_256_IV_SIZE);
+ memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq,
+ TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE);
+ if (copy_to_user(optval,
+ crypto_info_aria_gcm_256,
+ sizeof(*crypto_info_aria_gcm_256)))
+ rc = -EFAULT;
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ }
+
+out:
+ return rc;
+}
+
+static int do_tls_getsockopt_tx_zc(struct sock *sk, char __user *optval,
+ int __user *optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ unsigned int value;
+ int len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ if (len != sizeof(value))
+ return -EINVAL;
+
+ value = ctx->zerocopy_sendfile;
+ if (copy_to_user(optval, &value, sizeof(value)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval,
+ int __user *optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ int value, len;
+
+ if (ctx->prot_info.version != TLS_1_3_VERSION)
+ return -EINVAL;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+ if (len < sizeof(value))
+ return -EINVAL;
+
+ value = -EINVAL;
+ if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
+ value = ctx->rx_no_pad;
+ if (value < 0)
+ return value;
+
+ if (put_user(sizeof(value), optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &value, sizeof(value)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int do_tls_getsockopt(struct sock *sk, int optname,
+ char __user *optval, int __user *optlen)
+{
+ int rc = 0;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case TLS_TX:
+ case TLS_RX:
+ rc = do_tls_getsockopt_conf(sk, optval, optlen,
+ optname == TLS_TX);
+ break;
+ case TLS_TX_ZEROCOPY_RO:
+ rc = do_tls_getsockopt_tx_zc(sk, optval, optlen);
+ break;
+ case TLS_RX_EXPECT_NO_PAD:
+ rc = do_tls_getsockopt_no_pad(sk, optval, optlen);
+ break;
+ default:
+ rc = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+
+ return rc;
+}
+
+static int tls_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (level != SOL_TLS)
+ return ctx->sk_proto->getsockopt(sk, level,
+ optname, optval, optlen);
+
+ return do_tls_getsockopt(sk, optname, optval, optlen);
+}
+
+static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
+ unsigned int optlen, int tx)
+{
+ struct tls_crypto_info *crypto_info;
+ struct tls_crypto_info *alt_crypto_info;
+ struct tls_context *ctx = tls_get_ctx(sk);
+ size_t optsize;
+ int rc = 0;
+ int conf;
+
+ if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info)))
+ return -EINVAL;
+
+ if (tx) {
+ crypto_info = &ctx->crypto_send.info;
+ alt_crypto_info = &ctx->crypto_recv.info;
+ } else {
+ crypto_info = &ctx->crypto_recv.info;
+ alt_crypto_info = &ctx->crypto_send.info;
+ }
+
+ /* Currently we don't support set crypto info more than one time */
+ if (TLS_CRYPTO_INFO_READY(crypto_info))
+ return -EBUSY;
+
+ rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info));
+ if (rc) {
+ rc = -EFAULT;
+ goto err_crypto_info;
+ }
+
+ /* check version */
+ if (crypto_info->version != TLS_1_2_VERSION &&
+ crypto_info->version != TLS_1_3_VERSION) {
+ rc = -EINVAL;
+ goto err_crypto_info;
+ }
+
+ /* Ensure that TLS version and ciphers are same in both directions */
+ if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
+ if (alt_crypto_info->version != crypto_info->version ||
+ alt_crypto_info->cipher_type != crypto_info->cipher_type) {
+ rc = -EINVAL;
+ goto err_crypto_info;
+ }
+ }
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ optsize = sizeof(struct tls12_crypto_info_aes_gcm_128);
+ break;
+ case TLS_CIPHER_AES_GCM_256: {
+ optsize = sizeof(struct tls12_crypto_info_aes_gcm_256);
+ break;
+ }
+ case TLS_CIPHER_AES_CCM_128:
+ optsize = sizeof(struct tls12_crypto_info_aes_ccm_128);
+ break;
+ case TLS_CIPHER_CHACHA20_POLY1305:
+ optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305);
+ break;
+ case TLS_CIPHER_SM4_GCM:
+ optsize = sizeof(struct tls12_crypto_info_sm4_gcm);
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ optsize = sizeof(struct tls12_crypto_info_sm4_ccm);
+ break;
+ case TLS_CIPHER_ARIA_GCM_128:
+ if (crypto_info->version != TLS_1_2_VERSION) {
+ rc = -EINVAL;
+ goto err_crypto_info;
+ }
+ optsize = sizeof(struct tls12_crypto_info_aria_gcm_128);
+ break;
+ case TLS_CIPHER_ARIA_GCM_256:
+ if (crypto_info->version != TLS_1_2_VERSION) {
+ rc = -EINVAL;
+ goto err_crypto_info;
+ }
+ optsize = sizeof(struct tls12_crypto_info_aria_gcm_256);
+ break;
+ default:
+ rc = -EINVAL;
+ goto err_crypto_info;
+ }
+
+ if (optlen != optsize) {
+ rc = -EINVAL;
+ goto err_crypto_info;
+ }
+
+ rc = copy_from_sockptr_offset(crypto_info + 1, optval,
+ sizeof(*crypto_info),
+ optlen - sizeof(*crypto_info));
+ if (rc) {
+ rc = -EFAULT;
+ goto err_crypto_info;
+ }
+
+ if (tx) {
+ rc = tls_set_device_offload(sk, ctx);
+ conf = TLS_HW;
+ if (!rc) {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
+ } else {
+ rc = tls_set_sw_offload(sk, ctx, 1);
+ if (rc)
+ goto err_crypto_info;
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
+ conf = TLS_SW;
+ }
+ } else {
+ rc = tls_set_device_offload_rx(sk, ctx);
+ conf = TLS_HW;
+ if (!rc) {
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
+ } else {
+ rc = tls_set_sw_offload(sk, ctx, 0);
+ if (rc)
+ goto err_crypto_info;
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
+ conf = TLS_SW;
+ }
+ tls_sw_strparser_arm(sk, ctx);
+ }
+
+ if (tx)
+ ctx->tx_conf = conf;
+ else
+ ctx->rx_conf = conf;
+ update_sk_prot(sk, ctx);
+ if (tx) {
+ ctx->sk_write_space = sk->sk_write_space;
+ sk->sk_write_space = tls_write_space;
+ } else {
+ struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(ctx);
+
+ tls_strp_check_rcv(&rx_ctx->strp);
+ }
+ return 0;
+
+err_crypto_info:
+ memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
+ return rc;
+}
+
+static int do_tls_setsockopt_tx_zc(struct sock *sk, sockptr_t optval,
+ unsigned int optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ unsigned int value;
+
+ if (sockptr_is_null(optval) || optlen != sizeof(value))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&value, optval, sizeof(value)))
+ return -EFAULT;
+
+ if (value > 1)
+ return -EINVAL;
+
+ ctx->zerocopy_sendfile = value;
+
+ return 0;
+}
+
+static int do_tls_setsockopt_no_pad(struct sock *sk, sockptr_t optval,
+ unsigned int optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ u32 val;
+ int rc;
+
+ if (ctx->prot_info.version != TLS_1_3_VERSION ||
+ sockptr_is_null(optval) || optlen < sizeof(val))
+ return -EINVAL;
+
+ rc = copy_from_sockptr(&val, optval, sizeof(val));
+ if (rc)
+ return -EFAULT;
+ if (val > 1)
+ return -EINVAL;
+ rc = check_zeroed_sockptr(optval, sizeof(val), optlen - sizeof(val));
+ if (rc < 1)
+ return rc == 0 ? -EINVAL : rc;
+
+ lock_sock(sk);
+ rc = -EINVAL;
+ if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) {
+ ctx->rx_no_pad = val;
+ tls_update_rx_zc_capable(ctx);
+ rc = 0;
+ }
+ release_sock(sk);
+
+ return rc;
+}
+
+static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
+ unsigned int optlen)
+{
+ int rc = 0;
+
+ switch (optname) {
+ case TLS_TX:
+ case TLS_RX:
+ lock_sock(sk);
+ rc = do_tls_setsockopt_conf(sk, optval, optlen,
+ optname == TLS_TX);
+ release_sock(sk);
+ break;
+ case TLS_TX_ZEROCOPY_RO:
+ lock_sock(sk);
+ rc = do_tls_setsockopt_tx_zc(sk, optval, optlen);
+ release_sock(sk);
+ break;
+ case TLS_RX_EXPECT_NO_PAD:
+ rc = do_tls_setsockopt_no_pad(sk, optval, optlen);
+ break;
+ default:
+ rc = -ENOPROTOOPT;
+ break;
+ }
+ return rc;
+}
+
+static int tls_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (level != SOL_TLS)
+ return ctx->sk_proto->setsockopt(sk, level, optname, optval,
+ optlen);
+
+ return do_tls_setsockopt(sk, optname, optval, optlen);
+}
+
+struct tls_context *tls_ctx_create(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tls_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return NULL;
+
+ mutex_init(&ctx->tx_lock);
+ rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ ctx->sk_proto = READ_ONCE(sk->sk_prot);
+ ctx->sk = sk;
+ return ctx;
+}
+
+static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ const struct proto_ops *base)
+{
+ ops[TLS_BASE][TLS_BASE] = *base;
+
+ ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
+
+ ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
+
+ ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
+ ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
+
+#ifdef CONFIG_TLS_DEVICE
+ ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
+
+ ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
+ ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
+
+ ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
+
+ ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
+
+ ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
+ ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
+#endif
+#ifdef CONFIG_TLS_TOE
+ ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
+#endif
+}
+
+static void tls_build_proto(struct sock *sk)
+{
+ int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
+ struct proto *prot = READ_ONCE(sk->sk_prot);
+
+ /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
+ if (ip_ver == TLSV6 &&
+ unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) {
+ mutex_lock(&tcpv6_prot_mutex);
+ if (likely(prot != saved_tcpv6_prot)) {
+ build_protos(tls_prots[TLSV6], prot);
+ build_proto_ops(tls_proto_ops[TLSV6],
+ sk->sk_socket->ops);
+ smp_store_release(&saved_tcpv6_prot, prot);
+ }
+ mutex_unlock(&tcpv6_prot_mutex);
+ }
+
+ if (ip_ver == TLSV4 &&
+ unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) {
+ mutex_lock(&tcpv4_prot_mutex);
+ if (likely(prot != saved_tcpv4_prot)) {
+ build_protos(tls_prots[TLSV4], prot);
+ build_proto_ops(tls_proto_ops[TLSV4],
+ sk->sk_socket->ops);
+ smp_store_release(&saved_tcpv4_prot, prot);
+ }
+ mutex_unlock(&tcpv4_prot_mutex);
+ }
+}
+
+static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ const struct proto *base)
+{
+ prot[TLS_BASE][TLS_BASE] = *base;
+ prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
+ prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
+ prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
+
+ prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
+ prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
+ prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
+
+ prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
+ prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
+ prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
+ prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
+
+ prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
+ prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
+ prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
+ prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
+
+#ifdef CONFIG_TLS_DEVICE
+ prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
+ prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
+ prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
+
+ prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
+ prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
+ prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
+
+ prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
+
+ prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
+
+ prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
+#endif
+#ifdef CONFIG_TLS_TOE
+ prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
+ prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash;
+ prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash;
+#endif
+}
+
+static int tls_init(struct sock *sk)
+{
+ struct tls_context *ctx;
+ int rc = 0;
+
+ tls_build_proto(sk);
+
+#ifdef CONFIG_TLS_TOE
+ if (tls_toe_bypass(sk))
+ return 0;
+#endif
+
+ /* The TLS ulp is currently supported only for TCP sockets
+ * in ESTABLISHED state.
+ * Supporting sockets in LISTEN state will require us
+ * to modify the accept implementation to clone rather then
+ * share the ulp context.
+ */
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ /* allocate tls context */
+ write_lock_bh(&sk->sk_callback_lock);
+ ctx = tls_ctx_create(sk);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ctx->tx_conf = TLS_BASE;
+ ctx->rx_conf = TLS_BASE;
+ update_sk_prot(sk, ctx);
+out:
+ write_unlock_bh(&sk->sk_callback_lock);
+ return rc;
+}
+
+static void tls_update(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk))
+{
+ struct tls_context *ctx;
+
+ WARN_ON_ONCE(sk->sk_prot == p);
+
+ ctx = tls_get_ctx(sk);
+ if (likely(ctx)) {
+ ctx->sk_write_space = write_space;
+ ctx->sk_proto = p;
+ } else {
+ /* Pairs with lockless read in sk_clone_lock(). */
+ WRITE_ONCE(sk->sk_prot, p);
+ sk->sk_write_space = write_space;
+ }
+}
+
+static u16 tls_user_config(struct tls_context *ctx, bool tx)
+{
+ u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
+
+ switch (config) {
+ case TLS_BASE:
+ return TLS_CONF_BASE;
+ case TLS_SW:
+ return TLS_CONF_SW;
+ case TLS_HW:
+ return TLS_CONF_HW;
+ case TLS_HW_RECORD:
+ return TLS_CONF_HW_RECORD;
+ }
+ return 0;
+}
+
+static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
+{
+ u16 version, cipher_type;
+ struct tls_context *ctx;
+ struct nlattr *start;
+ int err;
+
+ start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS);
+ if (!start)
+ return -EMSGSIZE;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
+ if (!ctx) {
+ err = 0;
+ goto nla_failure;
+ }
+ version = ctx->prot_info.version;
+ if (version) {
+ err = nla_put_u16(skb, TLS_INFO_VERSION, version);
+ if (err)
+ goto nla_failure;
+ }
+ cipher_type = ctx->prot_info.cipher_type;
+ if (cipher_type) {
+ err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type);
+ if (err)
+ goto nla_failure;
+ }
+ err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true));
+ if (err)
+ goto nla_failure;
+
+ err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false));
+ if (err)
+ goto nla_failure;
+
+ if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) {
+ err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX);
+ if (err)
+ goto nla_failure;
+ }
+ if (ctx->rx_no_pad) {
+ err = nla_put_flag(skb, TLS_INFO_RX_NO_PAD);
+ if (err)
+ goto nla_failure;
+ }
+
+ rcu_read_unlock();
+ nla_nest_end(skb, start);
+ return 0;
+
+nla_failure:
+ rcu_read_unlock();
+ nla_nest_cancel(skb, start);
+ return err;
+}
+
+static size_t tls_get_info_size(const struct sock *sk)
+{
+ size_t size = 0;
+
+ size += nla_total_size(0) + /* INET_ULP_INFO_TLS */
+ nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */
+ nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */
+ nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */
+ nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */
+ nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */
+ nla_total_size(0) + /* TLS_INFO_RX_NO_PAD */
+ 0;
+
+ return size;
+}
+
+static int __net_init tls_init_net(struct net *net)
+{
+ int err;
+
+ net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
+ if (!net->mib.tls_statistics)
+ return -ENOMEM;
+
+ err = tls_proc_init(net);
+ if (err)
+ goto err_free_stats;
+
+ return 0;
+err_free_stats:
+ free_percpu(net->mib.tls_statistics);
+ return err;
+}
+
+static void __net_exit tls_exit_net(struct net *net)
+{
+ tls_proc_fini(net);
+ free_percpu(net->mib.tls_statistics);
+}
+
+static struct pernet_operations tls_proc_ops = {
+ .init = tls_init_net,
+ .exit = tls_exit_net,
+};
+
+static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
+ .name = "tls",
+ .owner = THIS_MODULE,
+ .init = tls_init,
+ .update = tls_update,
+ .get_info = tls_get_info,
+ .get_info_size = tls_get_info_size,
+};
+
+static int __init tls_register(void)
+{
+ int err;
+
+ err = register_pernet_subsys(&tls_proc_ops);
+ if (err)
+ return err;
+
+ err = tls_strp_dev_init();
+ if (err)
+ goto err_pernet;
+
+ err = tls_device_init();
+ if (err)
+ goto err_strp;
+
+ tcp_register_ulp(&tcp_tls_ulp_ops);
+
+ return 0;
+err_strp:
+ tls_strp_dev_exit();
+err_pernet:
+ unregister_pernet_subsys(&tls_proc_ops);
+ return err;
+}
+
+static void __exit tls_unregister(void)
+{
+ tcp_unregister_ulp(&tcp_tls_ulp_ops);
+ tls_strp_dev_exit();
+ tls_device_cleanup();
+ unregister_pernet_subsys(&tls_proc_ops);
+}
+
+module_init(tls_register);
+module_exit(tls_unregister);
diff --git a/net/tls/tls_proc.c b/net/tls/tls_proc.c
new file mode 100644
index 000000000..68982728f
--- /dev/null
+++ b/net/tls/tls_proc.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <net/snmp.h>
+#include <net/tls.h>
+
+#include "tls.h"
+
+#ifdef CONFIG_PROC_FS
+static const struct snmp_mib tls_mib_list[] = {
+ SNMP_MIB_ITEM("TlsCurrTxSw", LINUX_MIB_TLSCURRTXSW),
+ SNMP_MIB_ITEM("TlsCurrRxSw", LINUX_MIB_TLSCURRRXSW),
+ SNMP_MIB_ITEM("TlsCurrTxDevice", LINUX_MIB_TLSCURRTXDEVICE),
+ SNMP_MIB_ITEM("TlsCurrRxDevice", LINUX_MIB_TLSCURRRXDEVICE),
+ SNMP_MIB_ITEM("TlsTxSw", LINUX_MIB_TLSTXSW),
+ SNMP_MIB_ITEM("TlsRxSw", LINUX_MIB_TLSRXSW),
+ SNMP_MIB_ITEM("TlsTxDevice", LINUX_MIB_TLSTXDEVICE),
+ SNMP_MIB_ITEM("TlsRxDevice", LINUX_MIB_TLSRXDEVICE),
+ SNMP_MIB_ITEM("TlsDecryptError", LINUX_MIB_TLSDECRYPTERROR),
+ SNMP_MIB_ITEM("TlsRxDeviceResync", LINUX_MIB_TLSRXDEVICERESYNC),
+ SNMP_MIB_ITEM("TlsDecryptRetry", LINUX_MIB_TLSDECRYPTRETRY),
+ SNMP_MIB_ITEM("TlsRxNoPadViolation", LINUX_MIB_TLSRXNOPADVIOL),
+ SNMP_MIB_SENTINEL
+};
+
+static int tls_statistics_seq_show(struct seq_file *seq, void *v)
+{
+ unsigned long buf[LINUX_MIB_TLSMAX] = {};
+ struct net *net = seq->private;
+ int i;
+
+ snmp_get_cpu_field_batch(buf, tls_mib_list, net->mib.tls_statistics);
+ for (i = 0; tls_mib_list[i].name; i++)
+ seq_printf(seq, "%-32s\t%lu\n", tls_mib_list[i].name, buf[i]);
+
+ return 0;
+}
+#endif
+
+int __net_init tls_proc_init(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+ if (!proc_create_net_single("tls_stat", 0444, net->proc_net,
+ tls_statistics_seq_show, NULL))
+ return -ENOMEM;
+#endif /* CONFIG_PROC_FS */
+
+ return 0;
+}
+
+void __net_exit tls_proc_fini(struct net *net)
+{
+ remove_proc_entry("tls_stat", net->proc_net);
+}
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
new file mode 100644
index 000000000..f37f4a0fc
--- /dev/null
+++ b/net/tls/tls_strp.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */
+
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/strparser.h>
+#include <net/tcp.h>
+#include <net/sock.h>
+#include <net/tls.h>
+
+#include "tls.h"
+
+static struct workqueue_struct *tls_strp_wq;
+
+static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+{
+ if (strp->stopped)
+ return;
+
+ strp->stopped = 1;
+
+ /* Report an error on the lower socket */
+ WRITE_ONCE(strp->sk->sk_err, -err);
+ /* Paired with smp_rmb() in tcp_poll() */
+ smp_wmb();
+ sk_error_report(strp->sk);
+}
+
+static void tls_strp_anchor_free(struct tls_strparser *strp)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
+
+ DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
+ if (!strp->copy_mode)
+ shinfo->frag_list = NULL;
+ consume_skb(strp->anchor);
+ strp->anchor = NULL;
+}
+
+static struct sk_buff *
+tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb,
+ int offset, int len)
+{
+ struct sk_buff *skb;
+ int i, err;
+
+ skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER,
+ &err, strp->sk->sk_allocation);
+ if (!skb)
+ return NULL;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
+ skb_frag_address(frag),
+ skb_frag_size(frag)));
+ offset += skb_frag_size(frag);
+ }
+
+ skb->len = len;
+ skb->data_len = len;
+ skb_copy_header(skb, in_skb);
+ return skb;
+}
+
+/* Create a new skb with the contents of input copied to its page frags */
+static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
+{
+ struct strp_msg *rxm;
+ struct sk_buff *skb;
+
+ skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
+ strp->stm.full_len);
+ if (!skb)
+ return NULL;
+
+ rxm = strp_msg(skb);
+ rxm->offset = 0;
+ return skb;
+}
+
+/* Steal the input skb, input msg is invalid after calling this function */
+struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx)
+{
+ struct tls_strparser *strp = &ctx->strp;
+
+#ifdef CONFIG_TLS_DEVICE
+ DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted);
+#else
+ /* This function turns an input into an output,
+ * that can only happen if we have offload.
+ */
+ WARN_ON(1);
+#endif
+
+ if (strp->copy_mode) {
+ struct sk_buff *skb;
+
+ /* Replace anchor with an empty skb, this is a little
+ * dangerous but __tls_cur_msg() warns on empty skbs
+ * so hopefully we'll catch abuses.
+ */
+ skb = alloc_skb(0, strp->sk->sk_allocation);
+ if (!skb)
+ return NULL;
+
+ swap(strp->anchor, skb);
+ return skb;
+ }
+
+ return tls_strp_msg_make_copy(strp);
+}
+
+/* Force the input skb to be in copy mode. The data ownership remains
+ * with the input skb itself (meaning unpause will wipe it) but it can
+ * be modified.
+ */
+int tls_strp_msg_cow(struct tls_sw_context_rx *ctx)
+{
+ struct tls_strparser *strp = &ctx->strp;
+ struct sk_buff *skb;
+
+ if (strp->copy_mode)
+ return 0;
+
+ skb = tls_strp_msg_make_copy(strp);
+ if (!skb)
+ return -ENOMEM;
+
+ tls_strp_anchor_free(strp);
+ strp->anchor = skb;
+
+ tcp_read_done(strp->sk, strp->stm.full_len);
+ strp->copy_mode = 1;
+
+ return 0;
+}
+
+/* Make a clone (in the skb sense) of the input msg to keep a reference
+ * to the underlying data. The reference-holding skbs get placed on
+ * @dst.
+ */
+int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
+
+ if (strp->copy_mode) {
+ struct sk_buff *skb;
+
+ WARN_ON_ONCE(!shinfo->nr_frags);
+
+ /* We can't skb_clone() the anchor, it gets wiped by unpause */
+ skb = alloc_skb(0, strp->sk->sk_allocation);
+ if (!skb)
+ return -ENOMEM;
+
+ __skb_queue_tail(dst, strp->anchor);
+ strp->anchor = skb;
+ } else {
+ struct sk_buff *iter, *clone;
+ int chunk, len, offset;
+
+ offset = strp->stm.offset;
+ len = strp->stm.full_len;
+ iter = shinfo->frag_list;
+
+ while (len > 0) {
+ if (iter->len <= offset) {
+ offset -= iter->len;
+ goto next;
+ }
+
+ chunk = iter->len - offset;
+ offset = 0;
+
+ clone = skb_clone(iter, strp->sk->sk_allocation);
+ if (!clone)
+ return -ENOMEM;
+ __skb_queue_tail(dst, clone);
+
+ len -= chunk;
+next:
+ iter = iter->next;
+ }
+ }
+
+ return 0;
+}
+
+static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
+ int i;
+
+ DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
+
+ for (i = 0; i < shinfo->nr_frags; i++)
+ __skb_frag_unref(&shinfo->frags[i], false);
+ shinfo->nr_frags = 0;
+ if (strp->copy_mode) {
+ kfree_skb_list(shinfo->frag_list);
+ shinfo->frag_list = NULL;
+ }
+ strp->copy_mode = 0;
+ strp->mixed_decrypted = 0;
+}
+
+static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
+ struct sk_buff *in_skb, unsigned int offset,
+ size_t in_len)
+{
+ size_t len, chunk;
+ skb_frag_t *frag;
+ int sz;
+
+ frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
+
+ len = in_len;
+ /* First make sure we got the header */
+ if (!strp->stm.full_len) {
+ /* Assume one page is more than enough for headers */
+ chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag));
+ WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
+ skb_frag_address(frag) +
+ skb_frag_size(frag),
+ chunk));
+
+ skb->len += chunk;
+ skb->data_len += chunk;
+ skb_frag_size_add(frag, chunk);
+
+ sz = tls_rx_msg_size(strp, skb);
+ if (sz < 0)
+ return sz;
+
+ /* We may have over-read, sz == 0 is guaranteed under-read */
+ if (unlikely(sz && sz < skb->len)) {
+ int over = skb->len - sz;
+
+ WARN_ON_ONCE(over > chunk);
+ skb->len -= over;
+ skb->data_len -= over;
+ skb_frag_size_add(frag, -over);
+
+ chunk -= over;
+ }
+
+ frag++;
+ len -= chunk;
+ offset += chunk;
+
+ strp->stm.full_len = sz;
+ if (!strp->stm.full_len)
+ goto read_done;
+ }
+
+ /* Load up more data */
+ while (len && strp->stm.full_len > skb->len) {
+ chunk = min_t(size_t, len, strp->stm.full_len - skb->len);
+ chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag));
+ WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
+ skb_frag_address(frag) +
+ skb_frag_size(frag),
+ chunk));
+
+ skb->len += chunk;
+ skb->data_len += chunk;
+ skb_frag_size_add(frag, chunk);
+ frag++;
+ len -= chunk;
+ offset += chunk;
+ }
+
+read_done:
+ return in_len - len;
+}
+
+static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
+ struct sk_buff *in_skb, unsigned int offset,
+ size_t in_len)
+{
+ struct sk_buff *nskb, *first, *last;
+ struct skb_shared_info *shinfo;
+ size_t chunk;
+ int sz;
+
+ if (strp->stm.full_len)
+ chunk = strp->stm.full_len - skb->len;
+ else
+ chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
+ chunk = min(chunk, in_len);
+
+ nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
+ if (!nskb)
+ return -ENOMEM;
+
+ shinfo = skb_shinfo(skb);
+ if (!shinfo->frag_list) {
+ shinfo->frag_list = nskb;
+ nskb->prev = nskb;
+ } else {
+ first = shinfo->frag_list;
+ last = first->prev;
+ last->next = nskb;
+ first->prev = nskb;
+ }
+
+ skb->len += chunk;
+ skb->data_len += chunk;
+
+ if (!strp->stm.full_len) {
+ sz = tls_rx_msg_size(strp, skb);
+ if (sz < 0)
+ return sz;
+
+ /* We may have over-read, sz == 0 is guaranteed under-read */
+ if (unlikely(sz && sz < skb->len)) {
+ int over = skb->len - sz;
+
+ WARN_ON_ONCE(over > chunk);
+ skb->len -= over;
+ skb->data_len -= over;
+ __pskb_trim(nskb, nskb->len - over);
+
+ chunk -= over;
+ }
+
+ strp->stm.full_len = sz;
+ }
+
+ return chunk;
+}
+
+static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
+ unsigned int offset, size_t in_len)
+{
+ struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
+ struct sk_buff *skb;
+ int ret;
+
+ if (strp->msg_ready)
+ return 0;
+
+ skb = strp->anchor;
+ if (!skb->len)
+ skb_copy_decrypted(skb, in_skb);
+ else
+ strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
+
+ if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
+ ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
+ else
+ ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
+ if (ret < 0) {
+ desc->error = ret;
+ ret = 0;
+ }
+
+ if (strp->stm.full_len && strp->stm.full_len == skb->len) {
+ desc->count = 0;
+
+ strp->msg_ready = 1;
+ tls_rx_msg_ready(strp);
+ }
+
+ return ret;
+}
+
+static int tls_strp_read_copyin(struct tls_strparser *strp)
+{
+ struct socket *sock = strp->sk->sk_socket;
+ read_descriptor_t desc;
+
+ desc.arg.data = strp;
+ desc.error = 0;
+ desc.count = 1; /* give more than one skb per call */
+
+ /* sk should be locked here, so okay to do read_sock */
+ sock->ops->read_sock(strp->sk, &desc, tls_strp_copyin);
+
+ return desc.error;
+}
+
+static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
+{
+ struct skb_shared_info *shinfo;
+ struct page *page;
+ int need_spc, len;
+
+ /* If the rbuf is small or rcv window has collapsed to 0 we need
+ * to read the data out. Otherwise the connection will stall.
+ * Without pressure threshold of INT_MAX will never be ready.
+ */
+ if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX)))
+ return 0;
+
+ shinfo = skb_shinfo(strp->anchor);
+ shinfo->frag_list = NULL;
+
+ /* If we don't know the length go max plus page for cipher overhead */
+ need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
+
+ for (len = need_spc; len > 0; len -= PAGE_SIZE) {
+ page = alloc_page(strp->sk->sk_allocation);
+ if (!page) {
+ tls_strp_flush_anchor_copy(strp);
+ return -ENOMEM;
+ }
+
+ skb_fill_page_desc(strp->anchor, shinfo->nr_frags++,
+ page, 0, 0);
+ }
+
+ strp->copy_mode = 1;
+ strp->stm.offset = 0;
+
+ strp->anchor->len = 0;
+ strp->anchor->data_len = 0;
+ strp->anchor->truesize = round_up(need_spc, PAGE_SIZE);
+
+ tls_strp_read_copyin(strp);
+
+ return 0;
+}
+
+static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
+{
+ unsigned int len = strp->stm.offset + strp->stm.full_len;
+ struct sk_buff *first, *skb;
+ u32 seq;
+
+ first = skb_shinfo(strp->anchor)->frag_list;
+ skb = first;
+ seq = TCP_SKB_CB(first)->seq;
+
+ /* Make sure there's no duplicate data in the queue,
+ * and the decrypted status matches.
+ */
+ while (skb->len < len) {
+ seq += skb->len;
+ len -= skb->len;
+ skb = skb->next;
+
+ if (TCP_SKB_CB(skb)->seq != seq)
+ return false;
+ if (skb_cmp_decrypted(first, skb))
+ return false;
+ }
+
+ return true;
+}
+
+static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
+{
+ struct tcp_sock *tp = tcp_sk(strp->sk);
+ struct sk_buff *first;
+ u32 offset;
+
+ first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset);
+ if (WARN_ON_ONCE(!first))
+ return;
+
+ /* Bestow the state onto the anchor */
+ strp->anchor->len = offset + len;
+ strp->anchor->data_len = offset + len;
+ strp->anchor->truesize = offset + len;
+
+ skb_shinfo(strp->anchor)->frag_list = first;
+
+ skb_copy_header(strp->anchor, first);
+ strp->anchor->destructor = NULL;
+
+ strp->stm.offset = offset;
+}
+
+void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
+{
+ struct strp_msg *rxm;
+ struct tls_msg *tlm;
+
+ DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready);
+ DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
+
+ if (!strp->copy_mode && force_refresh) {
+ if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
+ return;
+
+ tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
+ }
+
+ rxm = strp_msg(strp->anchor);
+ rxm->full_len = strp->stm.full_len;
+ rxm->offset = strp->stm.offset;
+ tlm = tls_msg(strp->anchor);
+ tlm->control = strp->mark;
+}
+
+/* Called with lock held on lower socket */
+static int tls_strp_read_sock(struct tls_strparser *strp)
+{
+ int sz, inq;
+
+ inq = tcp_inq(strp->sk);
+ if (inq < 1)
+ return 0;
+
+ if (unlikely(strp->copy_mode))
+ return tls_strp_read_copyin(strp);
+
+ if (inq < strp->stm.full_len)
+ return tls_strp_read_copy(strp, true);
+
+ if (!strp->stm.full_len) {
+ tls_strp_load_anchor_with_queue(strp, inq);
+
+ sz = tls_rx_msg_size(strp, strp->anchor);
+ if (sz < 0) {
+ tls_strp_abort_strp(strp, sz);
+ return sz;
+ }
+
+ strp->stm.full_len = sz;
+
+ if (!strp->stm.full_len || inq < strp->stm.full_len)
+ return tls_strp_read_copy(strp, true);
+ }
+
+ if (!tls_strp_check_queue_ok(strp))
+ return tls_strp_read_copy(strp, false);
+
+ strp->msg_ready = 1;
+ tls_rx_msg_ready(strp);
+
+ return 0;
+}
+
+void tls_strp_check_rcv(struct tls_strparser *strp)
+{
+ if (unlikely(strp->stopped) || strp->msg_ready)
+ return;
+
+ if (tls_strp_read_sock(strp) == -ENOMEM)
+ queue_work(tls_strp_wq, &strp->work);
+}
+
+/* Lower sock lock held */
+void tls_strp_data_ready(struct tls_strparser *strp)
+{
+ /* This check is needed to synchronize with do_tls_strp_work.
+ * do_tls_strp_work acquires a process lock (lock_sock) whereas
+ * the lock held here is bh_lock_sock. The two locks can be
+ * held by different threads at the same time, but bh_lock_sock
+ * allows a thread in BH context to safely check if the process
+ * lock is held. In this case, if the lock is held, queue work.
+ */
+ if (sock_owned_by_user_nocheck(strp->sk)) {
+ queue_work(tls_strp_wq, &strp->work);
+ return;
+ }
+
+ tls_strp_check_rcv(strp);
+}
+
+static void tls_strp_work(struct work_struct *w)
+{
+ struct tls_strparser *strp =
+ container_of(w, struct tls_strparser, work);
+
+ lock_sock(strp->sk);
+ tls_strp_check_rcv(strp);
+ release_sock(strp->sk);
+}
+
+void tls_strp_msg_done(struct tls_strparser *strp)
+{
+ WARN_ON(!strp->stm.full_len);
+
+ if (likely(!strp->copy_mode))
+ tcp_read_done(strp->sk, strp->stm.full_len);
+ else
+ tls_strp_flush_anchor_copy(strp);
+
+ strp->msg_ready = 0;
+ memset(&strp->stm, 0, sizeof(strp->stm));
+
+ tls_strp_check_rcv(strp);
+}
+
+void tls_strp_stop(struct tls_strparser *strp)
+{
+ strp->stopped = 1;
+}
+
+int tls_strp_init(struct tls_strparser *strp, struct sock *sk)
+{
+ memset(strp, 0, sizeof(*strp));
+
+ strp->sk = sk;
+
+ strp->anchor = alloc_skb(0, GFP_KERNEL);
+ if (!strp->anchor)
+ return -ENOMEM;
+
+ INIT_WORK(&strp->work, tls_strp_work);
+
+ return 0;
+}
+
+/* strp must already be stopped so that tls_strp_recv will no longer be called.
+ * Note that tls_strp_done is not called with the lower socket held.
+ */
+void tls_strp_done(struct tls_strparser *strp)
+{
+ WARN_ON(!strp->stopped);
+
+ cancel_work_sync(&strp->work);
+ tls_strp_anchor_free(strp);
+}
+
+int __init tls_strp_dev_init(void)
+{
+ tls_strp_wq = create_workqueue("tls-strp");
+ if (unlikely(!tls_strp_wq))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void tls_strp_dev_exit(void)
+{
+ destroy_workqueue(tls_strp_wq);
+}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
new file mode 100644
index 000000000..0323040d3
--- /dev/null
+++ b/net/tls/tls_sw.c
@@ -0,0 +1,2818 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
+ * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
+ * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
+ * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bug.h>
+#include <linux/sched/signal.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/splice.h>
+#include <crypto/aead.h>
+
+#include <net/strparser.h>
+#include <net/tls.h>
+
+#include "tls.h"
+
+struct tls_decrypt_arg {
+ struct_group(inargs,
+ bool zc;
+ bool async;
+ u8 tail;
+ );
+
+ struct sk_buff *skb;
+};
+
+struct tls_decrypt_ctx {
+ struct sock *sk;
+ u8 iv[MAX_IV_SIZE];
+ u8 aad[TLS_MAX_AAD_SIZE];
+ u8 tail;
+ struct scatterlist sg[];
+};
+
+noinline void tls_err_abort(struct sock *sk, int err)
+{
+ WARN_ON_ONCE(err >= 0);
+ /* sk->sk_err should contain a positive error code. */
+ WRITE_ONCE(sk->sk_err, -err);
+ /* Paired with smp_rmb() in tcp_poll() */
+ smp_wmb();
+ sk_error_report(sk);
+}
+
+static int __skb_nsg(struct sk_buff *skb, int offset, int len,
+ unsigned int recursion_level)
+{
+ int start = skb_headlen(skb);
+ int i, chunk = start - offset;
+ struct sk_buff *frag_iter;
+ int elt = 0;
+
+ if (unlikely(recursion_level >= 24))
+ return -EMSGSIZE;
+
+ if (chunk > 0) {
+ if (chunk > len)
+ chunk = len;
+ elt++;
+ len -= chunk;
+ if (len == 0)
+ return elt;
+ offset += chunk;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ chunk = end - offset;
+ if (chunk > 0) {
+ if (chunk > len)
+ chunk = len;
+ elt++;
+ len -= chunk;
+ if (len == 0)
+ return elt;
+ offset += chunk;
+ }
+ start = end;
+ }
+
+ if (unlikely(skb_has_frag_list(skb))) {
+ skb_walk_frags(skb, frag_iter) {
+ int end, ret;
+
+ WARN_ON(start > offset + len);
+
+ end = start + frag_iter->len;
+ chunk = end - offset;
+ if (chunk > 0) {
+ if (chunk > len)
+ chunk = len;
+ ret = __skb_nsg(frag_iter, offset - start, chunk,
+ recursion_level + 1);
+ if (unlikely(ret < 0))
+ return ret;
+ elt += ret;
+ len -= chunk;
+ if (len == 0)
+ return elt;
+ offset += chunk;
+ }
+ start = end;
+ }
+ }
+ BUG_ON(len);
+ return elt;
+}
+
+/* Return the number of scatterlist elements required to completely map the
+ * skb, or -EMSGSIZE if the recursion depth is exceeded.
+ */
+static int skb_nsg(struct sk_buff *skb, int offset, int len)
+{
+ return __skb_nsg(skb, offset, len, 0);
+}
+
+static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
+ struct tls_decrypt_arg *darg)
+{
+ struct strp_msg *rxm = strp_msg(skb);
+ struct tls_msg *tlm = tls_msg(skb);
+ int sub = 0;
+
+ /* Determine zero-padding length */
+ if (prot->version == TLS_1_3_VERSION) {
+ int offset = rxm->full_len - TLS_TAG_SIZE - 1;
+ char content_type = darg->zc ? darg->tail : 0;
+ int err;
+
+ while (content_type == 0) {
+ if (offset < prot->prepend_size)
+ return -EBADMSG;
+ err = skb_copy_bits(skb, rxm->offset + offset,
+ &content_type, 1);
+ if (err)
+ return err;
+ if (content_type)
+ break;
+ sub++;
+ offset--;
+ }
+ tlm->control = content_type;
+ }
+ return sub;
+}
+
+static void tls_decrypt_done(crypto_completion_data_t *data, int err)
+{
+ struct aead_request *aead_req = crypto_get_completion_data(data);
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct scatterlist *sgout = aead_req->dst;
+ struct scatterlist *sgin = aead_req->src;
+ struct tls_sw_context_rx *ctx;
+ struct tls_decrypt_ctx *dctx;
+ struct tls_context *tls_ctx;
+ struct scatterlist *sg;
+ unsigned int pages;
+ struct sock *sk;
+ int aead_size;
+
+ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
+ aead_size = ALIGN(aead_size, __alignof__(*dctx));
+ dctx = (void *)((u8 *)aead_req + aead_size);
+
+ sk = dctx->sk;
+ tls_ctx = tls_get_ctx(sk);
+ ctx = tls_sw_ctx_rx(tls_ctx);
+
+ /* Propagate if there was an err */
+ if (err) {
+ if (err == -EBADMSG)
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
+ ctx->async_wait.err = err;
+ tls_err_abort(sk, err);
+ }
+
+ /* Free the destination pages if skb was not decrypted inplace */
+ if (sgout != sgin) {
+ /* Skip the first S/G entry as it points to AAD */
+ for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
+ if (!sg)
+ break;
+ put_page(sg_page(sg));
+ }
+ }
+
+ kfree(aead_req);
+
+ spin_lock_bh(&ctx->decrypt_compl_lock);
+ if (!atomic_dec_return(&ctx->decrypt_pending))
+ complete(&ctx->async_wait.completion);
+ spin_unlock_bh(&ctx->decrypt_compl_lock);
+}
+
+static int tls_do_decryption(struct sock *sk,
+ struct scatterlist *sgin,
+ struct scatterlist *sgout,
+ char *iv_recv,
+ size_t data_len,
+ struct aead_request *aead_req,
+ struct tls_decrypt_arg *darg)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ int ret;
+
+ aead_request_set_tfm(aead_req, ctx->aead_recv);
+ aead_request_set_ad(aead_req, prot->aad_size);
+ aead_request_set_crypt(aead_req, sgin, sgout,
+ data_len + prot->tag_size,
+ (u8 *)iv_recv);
+
+ if (darg->async) {
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tls_decrypt_done, aead_req);
+ atomic_inc(&ctx->decrypt_pending);
+ } else {
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &ctx->async_wait);
+ }
+
+ ret = crypto_aead_decrypt(aead_req);
+ if (ret == -EINPROGRESS) {
+ if (darg->async)
+ return 0;
+
+ ret = crypto_wait_req(ret, &ctx->async_wait);
+ }
+ darg->async = false;
+
+ return ret;
+}
+
+static void tls_trim_both_msgs(struct sock *sk, int target_size)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec;
+
+ sk_msg_trim(sk, &rec->msg_plaintext, target_size);
+ if (target_size > 0)
+ target_size += prot->overhead_size;
+ sk_msg_trim(sk, &rec->msg_encrypted, target_size);
+}
+
+static int tls_alloc_encrypted_msg(struct sock *sk, int len)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec;
+ struct sk_msg *msg_en = &rec->msg_encrypted;
+
+ return sk_msg_alloc(sk, msg_en, len, 0);
+}
+
+static int tls_clone_plaintext_msg(struct sock *sk, int required)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec;
+ struct sk_msg *msg_pl = &rec->msg_plaintext;
+ struct sk_msg *msg_en = &rec->msg_encrypted;
+ int skip, len;
+
+ /* We add page references worth len bytes from encrypted sg
+ * at the end of plaintext sg. It is guaranteed that msg_en
+ * has enough required room (ensured by caller).
+ */
+ len = required - msg_pl->sg.size;
+
+ /* Skip initial bytes in msg_en's data to be able to use
+ * same offset of both plain and encrypted data.
+ */
+ skip = prot->prepend_size + msg_pl->sg.size;
+
+ return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
+}
+
+static struct tls_rec *tls_get_rec(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct sk_msg *msg_pl, *msg_en;
+ struct tls_rec *rec;
+ int mem_size;
+
+ mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
+
+ rec = kzalloc(mem_size, sk->sk_allocation);
+ if (!rec)
+ return NULL;
+
+ msg_pl = &rec->msg_plaintext;
+ msg_en = &rec->msg_encrypted;
+
+ sk_msg_init(msg_pl);
+ sk_msg_init(msg_en);
+
+ sg_init_table(rec->sg_aead_in, 2);
+ sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
+ sg_unmark_end(&rec->sg_aead_in[1]);
+
+ sg_init_table(rec->sg_aead_out, 2);
+ sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
+ sg_unmark_end(&rec->sg_aead_out[1]);
+
+ rec->sk = sk;
+
+ return rec;
+}
+
+static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
+{
+ sk_msg_free(sk, &rec->msg_encrypted);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+}
+
+static void tls_free_open_rec(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec;
+
+ if (rec) {
+ tls_free_rec(sk, rec);
+ ctx->open_rec = NULL;
+ }
+}
+
+int tls_tx_records(struct sock *sk, int flags)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec, *tmp;
+ struct sk_msg *msg_en;
+ int tx_flags, rc = 0;
+
+ if (tls_is_partially_sent_record(tls_ctx)) {
+ rec = list_first_entry(&ctx->tx_list,
+ struct tls_rec, list);
+
+ if (flags == -1)
+ tx_flags = rec->tx_flags;
+ else
+ tx_flags = flags;
+
+ rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
+ if (rc)
+ goto tx_err;
+
+ /* Full record has been transmitted.
+ * Remove the head of tx_list
+ */
+ list_del(&rec->list);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+ }
+
+ /* Tx all ready records */
+ list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
+ if (READ_ONCE(rec->tx_ready)) {
+ if (flags == -1)
+ tx_flags = rec->tx_flags;
+ else
+ tx_flags = flags;
+
+ msg_en = &rec->msg_encrypted;
+ rc = tls_push_sg(sk, tls_ctx,
+ &msg_en->sg.data[msg_en->sg.curr],
+ 0, tx_flags);
+ if (rc)
+ goto tx_err;
+
+ list_del(&rec->list);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+ } else {
+ break;
+ }
+ }
+
+tx_err:
+ if (rc < 0 && rc != -EAGAIN)
+ tls_err_abort(sk, -EBADMSG);
+
+ return rc;
+}
+
+static void tls_encrypt_done(crypto_completion_data_t *data, int err)
+{
+ struct aead_request *aead_req = crypto_get_completion_data(data);
+ struct tls_sw_context_tx *ctx;
+ struct tls_context *tls_ctx;
+ struct tls_prot_info *prot;
+ struct scatterlist *sge;
+ struct sk_msg *msg_en;
+ struct tls_rec *rec;
+ bool ready = false;
+ struct sock *sk;
+ int pending;
+
+ rec = container_of(aead_req, struct tls_rec, aead_req);
+ msg_en = &rec->msg_encrypted;
+
+ sk = rec->sk;
+ tls_ctx = tls_get_ctx(sk);
+ prot = &tls_ctx->prot_info;
+ ctx = tls_sw_ctx_tx(tls_ctx);
+
+ sge = sk_msg_elem(msg_en, msg_en->sg.curr);
+ sge->offset -= prot->prepend_size;
+ sge->length += prot->prepend_size;
+
+ /* Check if error is previously set on socket */
+ if (err || sk->sk_err) {
+ rec = NULL;
+
+ /* If err is already set on socket, return the same code */
+ if (sk->sk_err) {
+ ctx->async_wait.err = -sk->sk_err;
+ } else {
+ ctx->async_wait.err = err;
+ tls_err_abort(sk, err);
+ }
+ }
+
+ if (rec) {
+ struct tls_rec *first_rec;
+
+ /* Mark the record as ready for transmission */
+ smp_store_mb(rec->tx_ready, true);
+
+ /* If received record is at head of tx_list, schedule tx */
+ first_rec = list_first_entry(&ctx->tx_list,
+ struct tls_rec, list);
+ if (rec == first_rec)
+ ready = true;
+ }
+
+ spin_lock_bh(&ctx->encrypt_compl_lock);
+ pending = atomic_dec_return(&ctx->encrypt_pending);
+
+ if (!pending && ctx->async_notify)
+ complete(&ctx->async_wait.completion);
+ spin_unlock_bh(&ctx->encrypt_compl_lock);
+
+ if (!ready)
+ return;
+
+ /* Schedule the transmission */
+ if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+ schedule_delayed_work(&ctx->tx_work.work, 1);
+}
+
+static int tls_do_encryption(struct sock *sk,
+ struct tls_context *tls_ctx,
+ struct tls_sw_context_tx *ctx,
+ struct aead_request *aead_req,
+ size_t data_len, u32 start)
+{
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_rec *rec = ctx->open_rec;
+ struct sk_msg *msg_en = &rec->msg_encrypted;
+ struct scatterlist *sge = sk_msg_elem(msg_en, start);
+ int rc, iv_offset = 0;
+
+ /* For CCM based ciphers, first byte of IV is a constant */
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
+ rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
+ }
+
+ memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
+ prot->iv_size + prot->salt_size);
+
+ tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
+ tls_ctx->tx.rec_seq);
+
+ sge->offset += prot->prepend_size;
+ sge->length -= prot->prepend_size;
+
+ msg_en->sg.curr = start;
+
+ aead_request_set_tfm(aead_req, ctx->aead_send);
+ aead_request_set_ad(aead_req, prot->aad_size);
+ aead_request_set_crypt(aead_req, rec->sg_aead_in,
+ rec->sg_aead_out,
+ data_len, rec->iv_data);
+
+ aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tls_encrypt_done, aead_req);
+
+ /* Add the record in tx_list */
+ list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
+ atomic_inc(&ctx->encrypt_pending);
+
+ rc = crypto_aead_encrypt(aead_req);
+ if (!rc || rc != -EINPROGRESS) {
+ atomic_dec(&ctx->encrypt_pending);
+ sge->offset -= prot->prepend_size;
+ sge->length += prot->prepend_size;
+ }
+
+ if (!rc) {
+ WRITE_ONCE(rec->tx_ready, true);
+ } else if (rc != -EINPROGRESS) {
+ list_del(&rec->list);
+ return rc;
+ }
+
+ /* Unhook the record from context if encryption is not failure */
+ ctx->open_rec = NULL;
+ tls_advance_record_sn(sk, prot, &tls_ctx->tx);
+ return rc;
+}
+
+static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
+ struct tls_rec **to, struct sk_msg *msg_opl,
+ struct sk_msg *msg_oen, u32 split_point,
+ u32 tx_overhead_size, u32 *orig_end)
+{
+ u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
+ struct scatterlist *sge, *osge, *nsge;
+ u32 orig_size = msg_opl->sg.size;
+ struct scatterlist tmp = { };
+ struct sk_msg *msg_npl;
+ struct tls_rec *new;
+ int ret;
+
+ new = tls_get_rec(sk);
+ if (!new)
+ return -ENOMEM;
+ ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
+ tx_overhead_size, 0);
+ if (ret < 0) {
+ tls_free_rec(sk, new);
+ return ret;
+ }
+
+ *orig_end = msg_opl->sg.end;
+ i = msg_opl->sg.start;
+ sge = sk_msg_elem(msg_opl, i);
+ while (apply && sge->length) {
+ if (sge->length > apply) {
+ u32 len = sge->length - apply;
+
+ get_page(sg_page(sge));
+ sg_set_page(&tmp, sg_page(sge), len,
+ sge->offset + apply);
+ sge->length = apply;
+ bytes += apply;
+ apply = 0;
+ } else {
+ apply -= sge->length;
+ bytes += sge->length;
+ }
+
+ sk_msg_iter_var_next(i);
+ if (i == msg_opl->sg.end)
+ break;
+ sge = sk_msg_elem(msg_opl, i);
+ }
+
+ msg_opl->sg.end = i;
+ msg_opl->sg.curr = i;
+ msg_opl->sg.copybreak = 0;
+ msg_opl->apply_bytes = 0;
+ msg_opl->sg.size = bytes;
+
+ msg_npl = &new->msg_plaintext;
+ msg_npl->apply_bytes = apply;
+ msg_npl->sg.size = orig_size - bytes;
+
+ j = msg_npl->sg.start;
+ nsge = sk_msg_elem(msg_npl, j);
+ if (tmp.length) {
+ memcpy(nsge, &tmp, sizeof(*nsge));
+ sk_msg_iter_var_next(j);
+ nsge = sk_msg_elem(msg_npl, j);
+ }
+
+ osge = sk_msg_elem(msg_opl, i);
+ while (osge->length) {
+ memcpy(nsge, osge, sizeof(*nsge));
+ sg_unmark_end(nsge);
+ sk_msg_iter_var_next(i);
+ sk_msg_iter_var_next(j);
+ if (i == *orig_end)
+ break;
+ osge = sk_msg_elem(msg_opl, i);
+ nsge = sk_msg_elem(msg_npl, j);
+ }
+
+ msg_npl->sg.end = j;
+ msg_npl->sg.curr = j;
+ msg_npl->sg.copybreak = 0;
+
+ *to = new;
+ return 0;
+}
+
+static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
+ struct tls_rec *from, u32 orig_end)
+{
+ struct sk_msg *msg_npl = &from->msg_plaintext;
+ struct sk_msg *msg_opl = &to->msg_plaintext;
+ struct scatterlist *osge, *nsge;
+ u32 i, j;
+
+ i = msg_opl->sg.end;
+ sk_msg_iter_var_prev(i);
+ j = msg_npl->sg.start;
+
+ osge = sk_msg_elem(msg_opl, i);
+ nsge = sk_msg_elem(msg_npl, j);
+
+ if (sg_page(osge) == sg_page(nsge) &&
+ osge->offset + osge->length == nsge->offset) {
+ osge->length += nsge->length;
+ put_page(sg_page(nsge));
+ }
+
+ msg_opl->sg.end = orig_end;
+ msg_opl->sg.curr = orig_end;
+ msg_opl->sg.copybreak = 0;
+ msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
+ msg_opl->sg.size += msg_npl->sg.size;
+
+ sk_msg_free(sk, &to->msg_encrypted);
+ sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
+
+ kfree(from);
+}
+
+static int tls_push_record(struct sock *sk, int flags,
+ unsigned char record_type)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
+ u32 i, split_point, orig_end;
+ struct sk_msg *msg_pl, *msg_en;
+ struct aead_request *req;
+ bool split;
+ int rc;
+
+ if (!rec)
+ return 0;
+
+ msg_pl = &rec->msg_plaintext;
+ msg_en = &rec->msg_encrypted;
+
+ split_point = msg_pl->apply_bytes;
+ split = split_point && split_point < msg_pl->sg.size;
+ if (unlikely((!split &&
+ msg_pl->sg.size +
+ prot->overhead_size > msg_en->sg.size) ||
+ (split &&
+ split_point +
+ prot->overhead_size > msg_en->sg.size))) {
+ split = true;
+ split_point = msg_en->sg.size;
+ }
+ if (split) {
+ rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
+ split_point, prot->overhead_size,
+ &orig_end);
+ if (rc < 0)
+ return rc;
+ /* This can happen if above tls_split_open_record allocates
+ * a single large encryption buffer instead of two smaller
+ * ones. In this case adjust pointers and continue without
+ * split.
+ */
+ if (!msg_pl->sg.size) {
+ tls_merge_open_record(sk, rec, tmp, orig_end);
+ msg_pl = &rec->msg_plaintext;
+ msg_en = &rec->msg_encrypted;
+ split = false;
+ }
+ sk_msg_trim(sk, msg_en, msg_pl->sg.size +
+ prot->overhead_size);
+ }
+
+ rec->tx_flags = flags;
+ req = &rec->aead_req;
+
+ i = msg_pl->sg.end;
+ sk_msg_iter_var_prev(i);
+
+ rec->content_type = record_type;
+ if (prot->version == TLS_1_3_VERSION) {
+ /* Add content type to end of message. No padding added */
+ sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
+ sg_mark_end(&rec->sg_content_type);
+ sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
+ &rec->sg_content_type);
+ } else {
+ sg_mark_end(sk_msg_elem(msg_pl, i));
+ }
+
+ if (msg_pl->sg.end < msg_pl->sg.start) {
+ sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
+ MAX_SKB_FRAGS - msg_pl->sg.start + 1,
+ msg_pl->sg.data);
+ }
+
+ i = msg_pl->sg.start;
+ sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
+
+ i = msg_en->sg.end;
+ sk_msg_iter_var_prev(i);
+ sg_mark_end(sk_msg_elem(msg_en, i));
+
+ i = msg_en->sg.start;
+ sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
+
+ tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
+ tls_ctx->tx.rec_seq, record_type, prot);
+
+ tls_fill_prepend(tls_ctx,
+ page_address(sg_page(&msg_en->sg.data[i])) +
+ msg_en->sg.data[i].offset,
+ msg_pl->sg.size + prot->tail_size,
+ record_type);
+
+ tls_ctx->pending_open_record_frags = false;
+
+ rc = tls_do_encryption(sk, tls_ctx, ctx, req,
+ msg_pl->sg.size + prot->tail_size, i);
+ if (rc < 0) {
+ if (rc != -EINPROGRESS) {
+ tls_err_abort(sk, -EBADMSG);
+ if (split) {
+ tls_ctx->pending_open_record_frags = true;
+ tls_merge_open_record(sk, rec, tmp, orig_end);
+ }
+ }
+ ctx->async_capable = 1;
+ return rc;
+ } else if (split) {
+ msg_pl = &tmp->msg_plaintext;
+ msg_en = &tmp->msg_encrypted;
+ sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
+ tls_ctx->pending_open_record_frags = true;
+ ctx->open_rec = tmp;
+ }
+
+ return tls_tx_records(sk, flags);
+}
+
+static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
+ bool full_record, u8 record_type,
+ ssize_t *copied, int flags)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct sk_msg msg_redir = { };
+ struct sk_psock *psock;
+ struct sock *sk_redir;
+ struct tls_rec *rec;
+ bool enospc, policy, redir_ingress;
+ int err = 0, send;
+ u32 delta = 0;
+
+ policy = !(flags & MSG_SENDPAGE_NOPOLICY);
+ psock = sk_psock_get(sk);
+ if (!psock || !policy) {
+ err = tls_push_record(sk, flags, record_type);
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
+ *copied -= sk_msg_free(sk, msg);
+ tls_free_open_rec(sk);
+ err = -sk->sk_err;
+ }
+ if (psock)
+ sk_psock_put(sk, psock);
+ return err;
+ }
+more_data:
+ enospc = sk_msg_full(msg);
+ if (psock->eval == __SK_NONE) {
+ delta = msg->sg.size;
+ psock->eval = sk_psock_msg_verdict(sk, psock, msg);
+ delta -= msg->sg.size;
+ }
+ if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
+ !enospc && !full_record) {
+ err = -ENOSPC;
+ goto out_err;
+ }
+ msg->cork_bytes = 0;
+ send = msg->sg.size;
+ if (msg->apply_bytes && msg->apply_bytes < send)
+ send = msg->apply_bytes;
+
+ switch (psock->eval) {
+ case __SK_PASS:
+ err = tls_push_record(sk, flags, record_type);
+ if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
+ *copied -= sk_msg_free(sk, msg);
+ tls_free_open_rec(sk);
+ err = -sk->sk_err;
+ goto out_err;
+ }
+ break;
+ case __SK_REDIRECT:
+ redir_ingress = psock->redir_ingress;
+ sk_redir = psock->sk_redir;
+ memcpy(&msg_redir, msg, sizeof(*msg));
+ if (msg->apply_bytes < send)
+ msg->apply_bytes = 0;
+ else
+ msg->apply_bytes -= send;
+ sk_msg_return_zero(sk, msg, send);
+ msg->sg.size -= send;
+ release_sock(sk);
+ err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
+ &msg_redir, send, flags);
+ lock_sock(sk);
+ if (err < 0) {
+ *copied -= sk_msg_free_nocharge(sk, &msg_redir);
+ msg->sg.size = 0;
+ }
+ if (msg->sg.size == 0)
+ tls_free_open_rec(sk);
+ break;
+ case __SK_DROP:
+ default:
+ sk_msg_free_partial(sk, msg, send);
+ if (msg->apply_bytes < send)
+ msg->apply_bytes = 0;
+ else
+ msg->apply_bytes -= send;
+ if (msg->sg.size == 0)
+ tls_free_open_rec(sk);
+ *copied -= (send + delta);
+ err = -EACCES;
+ }
+
+ if (likely(!err)) {
+ bool reset_eval = !ctx->open_rec;
+
+ rec = ctx->open_rec;
+ if (rec) {
+ msg = &rec->msg_plaintext;
+ if (!msg->apply_bytes)
+ reset_eval = true;
+ }
+ if (reset_eval) {
+ psock->eval = __SK_NONE;
+ if (psock->sk_redir) {
+ sock_put(psock->sk_redir);
+ psock->sk_redir = NULL;
+ }
+ }
+ if (rec)
+ goto more_data;
+ }
+ out_err:
+ sk_psock_put(sk, psock);
+ return err;
+}
+
+static int tls_sw_push_pending_record(struct sock *sk, int flags)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec;
+ struct sk_msg *msg_pl;
+ size_t copied;
+
+ if (!rec)
+ return 0;
+
+ msg_pl = &rec->msg_plaintext;
+ copied = msg_pl->sg.size;
+ if (!copied)
+ return 0;
+
+ return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
+ &copied, flags);
+}
+
+int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ bool async_capable = ctx->async_capable;
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
+ bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
+ bool eor = !(msg->msg_flags & MSG_MORE);
+ size_t try_to_copy;
+ ssize_t copied = 0;
+ struct sk_msg *msg_pl, *msg_en;
+ struct tls_rec *rec;
+ int required_size;
+ int num_async = 0;
+ bool full_record;
+ int record_room;
+ int num_zc = 0;
+ int orig_size;
+ int ret = 0;
+ int pending;
+
+ if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_CMSG_COMPAT))
+ return -EOPNOTSUPP;
+
+ ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
+ if (ret)
+ return ret;
+ lock_sock(sk);
+
+ if (unlikely(msg->msg_controllen)) {
+ ret = tls_process_cmsg(sk, msg, &record_type);
+ if (ret) {
+ if (ret == -EINPROGRESS)
+ num_async++;
+ else if (ret != -EAGAIN)
+ goto send_end;
+ }
+ }
+
+ while (msg_data_left(msg)) {
+ if (sk->sk_err) {
+ ret = -sk->sk_err;
+ goto send_end;
+ }
+
+ if (ctx->open_rec)
+ rec = ctx->open_rec;
+ else
+ rec = ctx->open_rec = tls_get_rec(sk);
+ if (!rec) {
+ ret = -ENOMEM;
+ goto send_end;
+ }
+
+ msg_pl = &rec->msg_plaintext;
+ msg_en = &rec->msg_encrypted;
+
+ orig_size = msg_pl->sg.size;
+ full_record = false;
+ try_to_copy = msg_data_left(msg);
+ record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
+ if (try_to_copy >= record_room) {
+ try_to_copy = record_room;
+ full_record = true;
+ }
+
+ required_size = msg_pl->sg.size + try_to_copy +
+ prot->overhead_size;
+
+ if (!sk_stream_memory_free(sk))
+ goto wait_for_sndbuf;
+
+alloc_encrypted:
+ ret = tls_alloc_encrypted_msg(sk, required_size);
+ if (ret) {
+ if (ret != -ENOSPC)
+ goto wait_for_memory;
+
+ /* Adjust try_to_copy according to the amount that was
+ * actually allocated. The difference is due
+ * to max sg elements limit
+ */
+ try_to_copy -= required_size - msg_en->sg.size;
+ full_record = true;
+ }
+
+ if (!is_kvec && (full_record || eor) && !async_capable) {
+ u32 first = msg_pl->sg.end;
+
+ ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
+ msg_pl, try_to_copy);
+ if (ret)
+ goto fallback_to_reg_send;
+
+ num_zc++;
+ copied += try_to_copy;
+
+ sk_msg_sg_copy_set(msg_pl, first);
+ ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
+ record_type, &copied,
+ msg->msg_flags);
+ if (ret) {
+ if (ret == -EINPROGRESS)
+ num_async++;
+ else if (ret == -ENOMEM)
+ goto wait_for_memory;
+ else if (ctx->open_rec && ret == -ENOSPC)
+ goto rollback_iter;
+ else if (ret != -EAGAIN)
+ goto send_end;
+ }
+ continue;
+rollback_iter:
+ copied -= try_to_copy;
+ sk_msg_sg_copy_clear(msg_pl, first);
+ iov_iter_revert(&msg->msg_iter,
+ msg_pl->sg.size - orig_size);
+fallback_to_reg_send:
+ sk_msg_trim(sk, msg_pl, orig_size);
+ }
+
+ required_size = msg_pl->sg.size + try_to_copy;
+
+ ret = tls_clone_plaintext_msg(sk, required_size);
+ if (ret) {
+ if (ret != -ENOSPC)
+ goto send_end;
+
+ /* Adjust try_to_copy according to the amount that was
+ * actually allocated. The difference is due
+ * to max sg elements limit
+ */
+ try_to_copy -= required_size - msg_pl->sg.size;
+ full_record = true;
+ sk_msg_trim(sk, msg_en,
+ msg_pl->sg.size + prot->overhead_size);
+ }
+
+ if (try_to_copy) {
+ ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
+ msg_pl, try_to_copy);
+ if (ret < 0)
+ goto trim_sgl;
+ }
+
+ /* Open records defined only if successfully copied, otherwise
+ * we would trim the sg but not reset the open record frags.
+ */
+ tls_ctx->pending_open_record_frags = true;
+ copied += try_to_copy;
+ if (full_record || eor) {
+ ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
+ record_type, &copied,
+ msg->msg_flags);
+ if (ret) {
+ if (ret == -EINPROGRESS)
+ num_async++;
+ else if (ret == -ENOMEM)
+ goto wait_for_memory;
+ else if (ret != -EAGAIN) {
+ if (ret == -ENOSPC)
+ ret = 0;
+ goto send_end;
+ }
+ }
+ }
+
+ continue;
+
+wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+ ret = sk_stream_wait_memory(sk, &timeo);
+ if (ret) {
+trim_sgl:
+ if (ctx->open_rec)
+ tls_trim_both_msgs(sk, orig_size);
+ goto send_end;
+ }
+
+ if (ctx->open_rec && msg_en->sg.size < required_size)
+ goto alloc_encrypted;
+ }
+
+ if (!num_async) {
+ goto send_end;
+ } else if (num_zc) {
+ /* Wait for pending encryptions to get completed */
+ spin_lock_bh(&ctx->encrypt_compl_lock);
+ ctx->async_notify = true;
+
+ pending = atomic_read(&ctx->encrypt_pending);
+ spin_unlock_bh(&ctx->encrypt_compl_lock);
+ if (pending)
+ crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ else
+ reinit_completion(&ctx->async_wait.completion);
+
+ /* There can be no concurrent accesses, since we have no
+ * pending encrypt operations
+ */
+ WRITE_ONCE(ctx->async_notify, false);
+
+ if (ctx->async_wait.err) {
+ ret = ctx->async_wait.err;
+ copied = 0;
+ }
+ }
+
+ /* Transmit if any encryptions have completed */
+ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ cancel_delayed_work(&ctx->tx_work.work);
+ tls_tx_records(sk, msg->msg_flags);
+ }
+
+send_end:
+ ret = sk_stream_error(sk, msg->msg_flags, ret);
+
+ release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
+ return copied > 0 ? copied : ret;
+}
+
+static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
+ struct sk_msg *msg_pl;
+ struct tls_rec *rec;
+ int num_async = 0;
+ ssize_t copied = 0;
+ bool full_record;
+ int record_room;
+ int ret = 0;
+ bool eor;
+
+ eor = !(flags & MSG_SENDPAGE_NOTLAST);
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+ /* Call the sk_stream functions to manage the sndbuf mem. */
+ while (size > 0) {
+ size_t copy, required_size;
+
+ if (sk->sk_err) {
+ ret = -sk->sk_err;
+ goto sendpage_end;
+ }
+
+ if (ctx->open_rec)
+ rec = ctx->open_rec;
+ else
+ rec = ctx->open_rec = tls_get_rec(sk);
+ if (!rec) {
+ ret = -ENOMEM;
+ goto sendpage_end;
+ }
+
+ msg_pl = &rec->msg_plaintext;
+
+ full_record = false;
+ record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
+ copy = size;
+ if (copy >= record_room) {
+ copy = record_room;
+ full_record = true;
+ }
+
+ required_size = msg_pl->sg.size + copy + prot->overhead_size;
+
+ if (!sk_stream_memory_free(sk))
+ goto wait_for_sndbuf;
+alloc_payload:
+ ret = tls_alloc_encrypted_msg(sk, required_size);
+ if (ret) {
+ if (ret != -ENOSPC)
+ goto wait_for_memory;
+
+ /* Adjust copy according to the amount that was
+ * actually allocated. The difference is due
+ * to max sg elements limit
+ */
+ copy -= required_size - msg_pl->sg.size;
+ full_record = true;
+ }
+
+ sk_msg_page_add(msg_pl, page, copy, offset);
+ msg_pl->sg.copybreak = 0;
+ msg_pl->sg.curr = msg_pl->sg.end;
+ sk_mem_charge(sk, copy);
+
+ offset += copy;
+ size -= copy;
+ copied += copy;
+
+ tls_ctx->pending_open_record_frags = true;
+ if (full_record || eor || sk_msg_full(msg_pl)) {
+ ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
+ record_type, &copied, flags);
+ if (ret) {
+ if (ret == -EINPROGRESS)
+ num_async++;
+ else if (ret == -ENOMEM)
+ goto wait_for_memory;
+ else if (ret != -EAGAIN) {
+ if (ret == -ENOSPC)
+ ret = 0;
+ goto sendpage_end;
+ }
+ }
+ }
+ continue;
+wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+ ret = sk_stream_wait_memory(sk, &timeo);
+ if (ret) {
+ if (ctx->open_rec)
+ tls_trim_both_msgs(sk, msg_pl->sg.size);
+ goto sendpage_end;
+ }
+
+ if (ctx->open_rec)
+ goto alloc_payload;
+ }
+
+ if (num_async) {
+ /* Transmit if any encryptions have completed */
+ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ cancel_delayed_work(&ctx->tx_work.work);
+ tls_tx_records(sk, flags);
+ }
+ }
+sendpage_end:
+ ret = sk_stream_error(sk, flags, ret);
+ return copied > 0 ? copied : ret;
+}
+
+int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
+ MSG_NO_SHARED_FRAGS))
+ return -EOPNOTSUPP;
+
+ return tls_sw_do_sendpage(sk, page, offset, size, flags);
+}
+
+int tls_sw_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ int ret;
+
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
+ return -EOPNOTSUPP;
+
+ ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
+ if (ret)
+ return ret;
+ lock_sock(sk);
+ ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
+ release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
+ return ret;
+}
+
+static int
+tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
+ bool released)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
+ long timeo;
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+
+ while (!tls_strp_msg_ready(ctx)) {
+ if (!sk_psock_queue_empty(psock))
+ return 0;
+
+ if (sk->sk_err)
+ return sock_error(sk);
+
+ if (ret < 0)
+ return ret;
+
+ if (!skb_queue_empty(&sk->sk_receive_queue)) {
+ tls_strp_check_rcv(&ctx->strp);
+ if (tls_strp_msg_ready(ctx))
+ break;
+ }
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ return 0;
+
+ if (sock_flag(sk, SOCK_DONE))
+ return 0;
+
+ if (!timeo)
+ return -EAGAIN;
+
+ released = true;
+ add_wait_queue(sk_sleep(sk), &wait);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ ret = sk_wait_event(sk, &timeo,
+ tls_strp_msg_ready(ctx) ||
+ !sk_psock_queue_empty(psock),
+ &wait);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ /* Handle signals */
+ if (signal_pending(current))
+ return sock_intr_errno(timeo);
+ }
+
+ tls_strp_msg_load(&ctx->strp, released);
+
+ return 1;
+}
+
+static int tls_setup_from_iter(struct iov_iter *from,
+ int length, int *pages_used,
+ struct scatterlist *to,
+ int to_max_pages)
+{
+ int rc = 0, i = 0, num_elem = *pages_used, maxpages;
+ struct page *pages[MAX_SKB_FRAGS];
+ unsigned int size = 0;
+ ssize_t copied, use;
+ size_t offset;
+
+ while (length > 0) {
+ i = 0;
+ maxpages = to_max_pages - num_elem;
+ if (maxpages == 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+ copied = iov_iter_get_pages2(from, pages,
+ length,
+ maxpages, &offset);
+ if (copied <= 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ length -= copied;
+ size += copied;
+ while (copied) {
+ use = min_t(int, copied, PAGE_SIZE - offset);
+
+ sg_set_page(&to[num_elem],
+ pages[i], use, offset);
+ sg_unmark_end(&to[num_elem]);
+ /* We do not uncharge memory from this API */
+
+ offset = 0;
+ copied -= use;
+
+ i++;
+ num_elem++;
+ }
+ }
+ /* Mark the end in the last sg entry if newly added */
+ if (num_elem > *pages_used)
+ sg_mark_end(&to[num_elem - 1]);
+out:
+ if (rc)
+ iov_iter_revert(from, size);
+ *pages_used = num_elem;
+
+ return rc;
+}
+
+static struct sk_buff *
+tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
+ unsigned int full_len)
+{
+ struct strp_msg *clr_rxm;
+ struct sk_buff *clr_skb;
+ int err;
+
+ clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
+ &err, sk->sk_allocation);
+ if (!clr_skb)
+ return NULL;
+
+ skb_copy_header(clr_skb, skb);
+ clr_skb->len = full_len;
+ clr_skb->data_len = full_len;
+
+ clr_rxm = strp_msg(clr_skb);
+ clr_rxm->offset = 0;
+
+ return clr_skb;
+}
+
+/* Decrypt handlers
+ *
+ * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
+ * They must transform the darg in/out argument are as follows:
+ * | Input | Output
+ * -------------------------------------------------------------------
+ * zc | Zero-copy decrypt allowed | Zero-copy performed
+ * async | Async decrypt allowed | Async crypto used / in progress
+ * skb | * | Output skb
+ *
+ * If ZC decryption was performed darg.skb will point to the input skb.
+ */
+
+/* This function decrypts the input skb into either out_iov or in out_sg
+ * or in skb buffers itself. The input parameter 'darg->zc' indicates if
+ * zero-copy mode needs to be tried or not. With zero-copy mode, either
+ * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
+ * NULL, then the decryption happens inside skb buffers itself, i.e.
+ * zero-copy gets disabled and 'darg->zc' is updated.
+ */
+static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
+ struct scatterlist *out_sg,
+ struct tls_decrypt_arg *darg)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ int n_sgin, n_sgout, aead_size, err, pages = 0;
+ struct sk_buff *skb = tls_strp_msg(ctx);
+ const struct strp_msg *rxm = strp_msg(skb);
+ const struct tls_msg *tlm = tls_msg(skb);
+ struct aead_request *aead_req;
+ struct scatterlist *sgin = NULL;
+ struct scatterlist *sgout = NULL;
+ const int data_len = rxm->full_len - prot->overhead_size;
+ int tail_pages = !!prot->tail_size;
+ struct tls_decrypt_ctx *dctx;
+ struct sk_buff *clear_skb;
+ int iv_offset = 0;
+ u8 *mem;
+
+ n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
+ rxm->full_len - prot->prepend_size);
+ if (n_sgin < 1)
+ return n_sgin ?: -EBADMSG;
+
+ if (darg->zc && (out_iov || out_sg)) {
+ clear_skb = NULL;
+
+ if (out_iov)
+ n_sgout = 1 + tail_pages +
+ iov_iter_npages_cap(out_iov, INT_MAX, data_len);
+ else
+ n_sgout = sg_nents(out_sg);
+ } else {
+ darg->zc = false;
+
+ clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
+ if (!clear_skb)
+ return -ENOMEM;
+
+ n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
+ }
+
+ /* Increment to accommodate AAD */
+ n_sgin = n_sgin + 1;
+
+ /* Allocate a single block of memory which contains
+ * aead_req || tls_decrypt_ctx.
+ * Both structs are variable length.
+ */
+ aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
+ aead_size = ALIGN(aead_size, __alignof__(*dctx));
+ mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
+ sk->sk_allocation);
+ if (!mem) {
+ err = -ENOMEM;
+ goto exit_free_skb;
+ }
+
+ /* Segment the allocated memory */
+ aead_req = (struct aead_request *)mem;
+ dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
+ dctx->sk = sk;
+ sgin = &dctx->sg[0];
+ sgout = &dctx->sg[n_sgin];
+
+ /* For CCM based ciphers, first byte of nonce+iv is a constant */
+ switch (prot->cipher_type) {
+ case TLS_CIPHER_AES_CCM_128:
+ dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
+ case TLS_CIPHER_SM4_CCM:
+ dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
+ iv_offset = 1;
+ break;
+ }
+
+ /* Prepare IV */
+ if (prot->version == TLS_1_3_VERSION ||
+ prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
+ memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
+ prot->iv_size + prot->salt_size);
+ } else {
+ err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
+ &dctx->iv[iv_offset] + prot->salt_size,
+ prot->iv_size);
+ if (err < 0)
+ goto exit_free;
+ memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
+ }
+ tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
+
+ /* Prepare AAD */
+ tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
+ prot->tail_size,
+ tls_ctx->rx.rec_seq, tlm->control, prot);
+
+ /* Prepare sgin */
+ sg_init_table(sgin, n_sgin);
+ sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
+ err = skb_to_sgvec(skb, &sgin[1],
+ rxm->offset + prot->prepend_size,
+ rxm->full_len - prot->prepend_size);
+ if (err < 0)
+ goto exit_free;
+
+ if (clear_skb) {
+ sg_init_table(sgout, n_sgout);
+ sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
+
+ err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
+ data_len + prot->tail_size);
+ if (err < 0)
+ goto exit_free;
+ } else if (out_iov) {
+ sg_init_table(sgout, n_sgout);
+ sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
+
+ err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
+ (n_sgout - 1 - tail_pages));
+ if (err < 0)
+ goto exit_free_pages;
+
+ if (prot->tail_size) {
+ sg_unmark_end(&sgout[pages]);
+ sg_set_buf(&sgout[pages + 1], &dctx->tail,
+ prot->tail_size);
+ sg_mark_end(&sgout[pages + 1]);
+ }
+ } else if (out_sg) {
+ memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
+ }
+
+ /* Prepare and submit AEAD request */
+ err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
+ data_len + prot->tail_size, aead_req, darg);
+ if (err)
+ goto exit_free_pages;
+
+ darg->skb = clear_skb ?: tls_strp_msg(ctx);
+ clear_skb = NULL;
+
+ if (unlikely(darg->async)) {
+ err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
+ if (err)
+ __skb_queue_tail(&ctx->async_hold, darg->skb);
+ return err;
+ }
+
+ if (prot->tail_size)
+ darg->tail = dctx->tail;
+
+exit_free_pages:
+ /* Release the pages in case iov was mapped to pages */
+ for (; pages > 0; pages--)
+ put_page(sg_page(&sgout[pages]));
+exit_free:
+ kfree(mem);
+exit_free_skb:
+ consume_skb(clear_skb);
+ return err;
+}
+
+static int
+tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
+ struct msghdr *msg, struct tls_decrypt_arg *darg)
+{
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct strp_msg *rxm;
+ int pad, err;
+
+ err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
+ if (err < 0) {
+ if (err == -EBADMSG)
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
+ return err;
+ }
+ /* keep going even for ->async, the code below is TLS 1.3 */
+
+ /* If opportunistic TLS 1.3 ZC failed retry without ZC */
+ if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
+ darg->tail != TLS_RECORD_TYPE_DATA)) {
+ darg->zc = false;
+ if (!darg->tail)
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
+ TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
+ return tls_decrypt_sw(sk, tls_ctx, msg, darg);
+ }
+
+ pad = tls_padding_length(prot, darg->skb, darg);
+ if (pad < 0) {
+ if (darg->skb != tls_strp_msg(ctx))
+ consume_skb(darg->skb);
+ return pad;
+ }
+
+ rxm = strp_msg(darg->skb);
+ rxm->full_len -= pad;
+
+ return 0;
+}
+
+static int
+tls_decrypt_device(struct sock *sk, struct msghdr *msg,
+ struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
+{
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct strp_msg *rxm;
+ int pad, err;
+
+ if (tls_ctx->rx_conf != TLS_HW)
+ return 0;
+
+ err = tls_device_decrypted(sk, tls_ctx);
+ if (err <= 0)
+ return err;
+
+ pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
+ if (pad < 0)
+ return pad;
+
+ darg->async = false;
+ darg->skb = tls_strp_msg(ctx);
+ /* ->zc downgrade check, in case TLS 1.3 gets here */
+ darg->zc &= !(prot->version == TLS_1_3_VERSION &&
+ tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
+
+ rxm = strp_msg(darg->skb);
+ rxm->full_len -= pad;
+
+ if (!darg->zc) {
+ /* Non-ZC case needs a real skb */
+ darg->skb = tls_strp_msg_detach(ctx);
+ if (!darg->skb)
+ return -ENOMEM;
+ } else {
+ unsigned int off, len;
+
+ /* In ZC case nobody cares about the output skb.
+ * Just copy the data here. Note the skb is not fully trimmed.
+ */
+ off = rxm->offset + prot->prepend_size;
+ len = rxm->full_len - prot->overhead_size;
+
+ err = skb_copy_datagram_msg(darg->skb, off, msg, len);
+ if (err)
+ return err;
+ }
+ return 1;
+}
+
+static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
+ struct tls_decrypt_arg *darg)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct strp_msg *rxm;
+ int err;
+
+ err = tls_decrypt_device(sk, msg, tls_ctx, darg);
+ if (!err)
+ err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
+ if (err < 0)
+ return err;
+
+ rxm = strp_msg(darg->skb);
+ rxm->offset += prot->prepend_size;
+ rxm->full_len -= prot->overhead_size;
+ tls_advance_record_sn(sk, prot, &tls_ctx->rx);
+
+ return 0;
+}
+
+int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
+{
+ struct tls_decrypt_arg darg = { .zc = true, };
+
+ return tls_decrypt_sg(sk, NULL, sgout, &darg);
+}
+
+static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
+ u8 *control)
+{
+ int err;
+
+ if (!*control) {
+ *control = tlm->control;
+ if (!*control)
+ return -EBADMSG;
+
+ err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
+ sizeof(*control), control);
+ if (*control != TLS_RECORD_TYPE_DATA) {
+ if (err || msg->msg_flags & MSG_CTRUNC)
+ return -EIO;
+ }
+ } else if (*control != tlm->control) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
+{
+ tls_strp_msg_done(&ctx->strp);
+}
+
+/* This function traverses the rx_list in tls receive context to copies the
+ * decrypted records into the buffer provided by caller zero copy is not
+ * true. Further, the records are removed from the rx_list if it is not a peek
+ * case and the record has been consumed completely.
+ */
+static int process_rx_list(struct tls_sw_context_rx *ctx,
+ struct msghdr *msg,
+ u8 *control,
+ size_t skip,
+ size_t len,
+ bool is_peek)
+{
+ struct sk_buff *skb = skb_peek(&ctx->rx_list);
+ struct tls_msg *tlm;
+ ssize_t copied = 0;
+ int err;
+
+ while (skip && skb) {
+ struct strp_msg *rxm = strp_msg(skb);
+ tlm = tls_msg(skb);
+
+ err = tls_record_content_type(msg, tlm, control);
+ if (err <= 0)
+ goto out;
+
+ if (skip < rxm->full_len)
+ break;
+
+ skip = skip - rxm->full_len;
+ skb = skb_peek_next(skb, &ctx->rx_list);
+ }
+
+ while (len && skb) {
+ struct sk_buff *next_skb;
+ struct strp_msg *rxm = strp_msg(skb);
+ int chunk = min_t(unsigned int, rxm->full_len - skip, len);
+
+ tlm = tls_msg(skb);
+
+ err = tls_record_content_type(msg, tlm, control);
+ if (err <= 0)
+ goto out;
+
+ err = skb_copy_datagram_msg(skb, rxm->offset + skip,
+ msg, chunk);
+ if (err < 0)
+ goto out;
+
+ len = len - chunk;
+ copied = copied + chunk;
+
+ /* Consume the data from record if it is non-peek case*/
+ if (!is_peek) {
+ rxm->offset = rxm->offset + chunk;
+ rxm->full_len = rxm->full_len - chunk;
+
+ /* Return if there is unconsumed data in the record */
+ if (rxm->full_len - skip)
+ break;
+ }
+
+ /* The remaining skip-bytes must lie in 1st record in rx_list.
+ * So from the 2nd record, 'skip' should be 0.
+ */
+ skip = 0;
+
+ if (msg)
+ msg->msg_flags |= MSG_EOR;
+
+ next_skb = skb_peek_next(skb, &ctx->rx_list);
+
+ if (!is_peek) {
+ __skb_unlink(skb, &ctx->rx_list);
+ consume_skb(skb);
+ }
+
+ skb = next_skb;
+ }
+ err = 0;
+
+out:
+ return copied ? : err;
+}
+
+static bool
+tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
+ size_t len_left, size_t decrypted, ssize_t done,
+ size_t *flushed_at)
+{
+ size_t max_rec;
+
+ if (len_left <= decrypted)
+ return false;
+
+ max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
+ if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
+ return false;
+
+ *flushed_at = done;
+ return sk_flush_backlog(sk);
+}
+
+static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
+ bool nonblock)
+{
+ long timeo;
+ int ret;
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+
+ while (unlikely(ctx->reader_present)) {
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ ctx->reader_contended = 1;
+
+ add_wait_queue(&ctx->wq, &wait);
+ ret = sk_wait_event(sk, &timeo,
+ !READ_ONCE(ctx->reader_present), &wait);
+ remove_wait_queue(&ctx->wq, &wait);
+
+ if (timeo <= 0)
+ return -EAGAIN;
+ if (signal_pending(current))
+ return sock_intr_errno(timeo);
+ if (ret < 0)
+ return ret;
+ }
+
+ WRITE_ONCE(ctx->reader_present, 1);
+
+ return 0;
+}
+
+static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
+ bool nonblock)
+{
+ int err;
+
+ lock_sock(sk);
+ err = tls_rx_reader_acquire(sk, ctx, nonblock);
+ if (err)
+ release_sock(sk);
+ return err;
+}
+
+static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
+{
+ if (unlikely(ctx->reader_contended)) {
+ if (wq_has_sleeper(&ctx->wq))
+ wake_up(&ctx->wq);
+ else
+ ctx->reader_contended = 0;
+
+ WARN_ON_ONCE(!ctx->reader_present);
+ }
+
+ WRITE_ONCE(ctx->reader_present, 0);
+}
+
+static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
+{
+ tls_rx_reader_release(sk, ctx);
+ release_sock(sk);
+}
+
+int tls_sw_recvmsg(struct sock *sk,
+ struct msghdr *msg,
+ size_t len,
+ int flags,
+ int *addr_len)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ ssize_t decrypted = 0, async_copy_bytes = 0;
+ struct sk_psock *psock;
+ unsigned char control = 0;
+ size_t flushed_at = 0;
+ struct strp_msg *rxm;
+ struct tls_msg *tlm;
+ ssize_t copied = 0;
+ bool async = false;
+ int target, err;
+ bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
+ bool is_peek = flags & MSG_PEEK;
+ bool released = true;
+ bool bpf_strp_enabled;
+ bool zc_capable;
+
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
+
+ psock = sk_psock_get(sk);
+ err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
+ if (err < 0)
+ return err;
+ bpf_strp_enabled = sk_psock_strp_enabled(psock);
+
+ /* If crypto failed the connection is broken */
+ err = ctx->async_wait.err;
+ if (err)
+ goto end;
+
+ /* Process pending decrypted records. It must be non-zero-copy */
+ err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
+ if (err < 0)
+ goto end;
+
+ copied = err;
+ if (len <= copied)
+ goto end;
+
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+ len = len - copied;
+
+ zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
+ ctx->zc_capable;
+ decrypted = 0;
+ while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
+ struct tls_decrypt_arg darg;
+ int to_decrypt, chunk;
+
+ err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
+ released);
+ if (err <= 0) {
+ if (psock) {
+ chunk = sk_msg_recvmsg(sk, psock, msg, len,
+ flags);
+ if (chunk > 0) {
+ decrypted += chunk;
+ len -= chunk;
+ continue;
+ }
+ }
+ goto recv_end;
+ }
+
+ memset(&darg.inargs, 0, sizeof(darg.inargs));
+
+ rxm = strp_msg(tls_strp_msg(ctx));
+ tlm = tls_msg(tls_strp_msg(ctx));
+
+ to_decrypt = rxm->full_len - prot->overhead_size;
+
+ if (zc_capable && to_decrypt <= len &&
+ tlm->control == TLS_RECORD_TYPE_DATA)
+ darg.zc = true;
+
+ /* Do not use async mode if record is non-data */
+ if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
+ darg.async = ctx->async_capable;
+ else
+ darg.async = false;
+
+ err = tls_rx_one_record(sk, msg, &darg);
+ if (err < 0) {
+ tls_err_abort(sk, -EBADMSG);
+ goto recv_end;
+ }
+
+ async |= darg.async;
+
+ /* If the type of records being processed is not known yet,
+ * set it to record type just dequeued. If it is already known,
+ * but does not match the record type just dequeued, go to end.
+ * We always get record type here since for tls1.2, record type
+ * is known just after record is dequeued from stream parser.
+ * For tls1.3, we disable async.
+ */
+ err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
+ if (err <= 0) {
+ DEBUG_NET_WARN_ON_ONCE(darg.zc);
+ tls_rx_rec_done(ctx);
+put_on_rx_list_err:
+ __skb_queue_tail(&ctx->rx_list, darg.skb);
+ goto recv_end;
+ }
+
+ /* periodically flush backlog, and feed strparser */
+ released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
+ decrypted + copied,
+ &flushed_at);
+
+ /* TLS 1.3 may have updated the length by more than overhead */
+ rxm = strp_msg(darg.skb);
+ chunk = rxm->full_len;
+ tls_rx_rec_done(ctx);
+
+ if (!darg.zc) {
+ bool partially_consumed = chunk > len;
+ struct sk_buff *skb = darg.skb;
+
+ DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
+
+ if (async) {
+ /* TLS 1.2-only, to_decrypt must be text len */
+ chunk = min_t(int, to_decrypt, len);
+ async_copy_bytes += chunk;
+put_on_rx_list:
+ decrypted += chunk;
+ len -= chunk;
+ __skb_queue_tail(&ctx->rx_list, skb);
+ continue;
+ }
+
+ if (bpf_strp_enabled) {
+ released = true;
+ err = sk_psock_tls_strp_read(psock, skb);
+ if (err != __SK_PASS) {
+ rxm->offset = rxm->offset + rxm->full_len;
+ rxm->full_len = 0;
+ if (err == __SK_DROP)
+ consume_skb(skb);
+ continue;
+ }
+ }
+
+ if (partially_consumed)
+ chunk = len;
+
+ err = skb_copy_datagram_msg(skb, rxm->offset,
+ msg, chunk);
+ if (err < 0)
+ goto put_on_rx_list_err;
+
+ if (is_peek)
+ goto put_on_rx_list;
+
+ if (partially_consumed) {
+ rxm->offset += chunk;
+ rxm->full_len -= chunk;
+ goto put_on_rx_list;
+ }
+
+ consume_skb(skb);
+ }
+
+ decrypted += chunk;
+ len -= chunk;
+
+ /* Return full control message to userspace before trying
+ * to parse another message type
+ */
+ msg->msg_flags |= MSG_EOR;
+ if (control != TLS_RECORD_TYPE_DATA)
+ break;
+ }
+
+recv_end:
+ if (async) {
+ int ret, pending;
+
+ /* Wait for all previously submitted records to be decrypted */
+ spin_lock_bh(&ctx->decrypt_compl_lock);
+ reinit_completion(&ctx->async_wait.completion);
+ pending = atomic_read(&ctx->decrypt_pending);
+ spin_unlock_bh(&ctx->decrypt_compl_lock);
+ ret = 0;
+ if (pending)
+ ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ __skb_queue_purge(&ctx->async_hold);
+
+ if (ret) {
+ if (err >= 0 || err == -EINPROGRESS)
+ err = ret;
+ decrypted = 0;
+ goto end;
+ }
+
+ /* Drain records from the rx_list & copy if required */
+ if (is_peek || is_kvec)
+ err = process_rx_list(ctx, msg, &control, copied,
+ decrypted, is_peek);
+ else
+ err = process_rx_list(ctx, msg, &control, 0,
+ async_copy_bytes, is_peek);
+ decrypted += max(err, 0);
+ }
+
+ copied += decrypted;
+
+end:
+ tls_rx_reader_unlock(sk, ctx);
+ if (psock)
+ sk_psock_put(sk, psock);
+ return copied ? : err;
+}
+
+ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct strp_msg *rxm = NULL;
+ struct sock *sk = sock->sk;
+ struct tls_msg *tlm;
+ struct sk_buff *skb;
+ ssize_t copied = 0;
+ int chunk;
+ int err;
+
+ err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
+ if (err < 0)
+ return err;
+
+ if (!skb_queue_empty(&ctx->rx_list)) {
+ skb = __skb_dequeue(&ctx->rx_list);
+ } else {
+ struct tls_decrypt_arg darg;
+
+ err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
+ true);
+ if (err <= 0)
+ goto splice_read_end;
+
+ memset(&darg.inargs, 0, sizeof(darg.inargs));
+
+ err = tls_rx_one_record(sk, NULL, &darg);
+ if (err < 0) {
+ tls_err_abort(sk, -EBADMSG);
+ goto splice_read_end;
+ }
+
+ tls_rx_rec_done(ctx);
+ skb = darg.skb;
+ }
+
+ rxm = strp_msg(skb);
+ tlm = tls_msg(skb);
+
+ /* splice does not support reading control messages */
+ if (tlm->control != TLS_RECORD_TYPE_DATA) {
+ err = -EINVAL;
+ goto splice_requeue;
+ }
+
+ chunk = min_t(unsigned int, rxm->full_len, len);
+ copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
+ if (copied < 0)
+ goto splice_requeue;
+
+ if (chunk < rxm->full_len) {
+ rxm->offset += len;
+ rxm->full_len -= len;
+ goto splice_requeue;
+ }
+
+ consume_skb(skb);
+
+splice_read_end:
+ tls_rx_reader_unlock(sk, ctx);
+ return copied ? : err;
+
+splice_requeue:
+ __skb_queue_head(&ctx->rx_list, skb);
+ goto splice_read_end;
+}
+
+bool tls_sw_sock_is_readable(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ bool ingress_empty = true;
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (psock)
+ ingress_empty = list_empty(&psock->ingress_msg);
+ rcu_read_unlock();
+
+ return !ingress_empty || tls_strp_msg_ready(ctx) ||
+ !skb_queue_empty(&ctx->rx_list);
+}
+
+int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
+ size_t cipher_overhead;
+ size_t data_len = 0;
+ int ret;
+
+ /* Verify that we have a full TLS header, or wait for more data */
+ if (strp->stm.offset + prot->prepend_size > skb->len)
+ return 0;
+
+ /* Sanity-check size of on-stack buffer. */
+ if (WARN_ON(prot->prepend_size > sizeof(header))) {
+ ret = -EINVAL;
+ goto read_failure;
+ }
+
+ /* Linearize header to local buffer */
+ ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
+ if (ret < 0)
+ goto read_failure;
+
+ strp->mark = header[0];
+
+ data_len = ((header[4] & 0xFF) | (header[3] << 8));
+
+ cipher_overhead = prot->tag_size;
+ if (prot->version != TLS_1_3_VERSION &&
+ prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
+ cipher_overhead += prot->iv_size;
+
+ if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
+ prot->tail_size) {
+ ret = -EMSGSIZE;
+ goto read_failure;
+ }
+ if (data_len < cipher_overhead) {
+ ret = -EBADMSG;
+ goto read_failure;
+ }
+
+ /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
+ if (header[1] != TLS_1_2_VERSION_MINOR ||
+ header[2] != TLS_1_2_VERSION_MAJOR) {
+ ret = -EINVAL;
+ goto read_failure;
+ }
+
+ tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
+ TCP_SKB_CB(skb)->seq + strp->stm.offset);
+ return data_len + TLS_HEADER_SIZE;
+
+read_failure:
+ tls_err_abort(strp->sk, ret);
+
+ return ret;
+}
+
+void tls_rx_msg_ready(struct tls_strparser *strp)
+{
+ struct tls_sw_context_rx *ctx;
+
+ ctx = container_of(strp, struct tls_sw_context_rx, strp);
+ ctx->saved_data_ready(strp->sk);
+}
+
+static void tls_data_ready(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct sk_psock *psock;
+ gfp_t alloc_save;
+
+ alloc_save = sk->sk_allocation;
+ sk->sk_allocation = GFP_ATOMIC;
+ tls_strp_data_ready(&ctx->strp);
+ sk->sk_allocation = alloc_save;
+
+ psock = sk_psock_get(sk);
+ if (psock) {
+ if (!list_empty(&psock->ingress_msg))
+ ctx->saved_data_ready(sk);
+ sk_psock_put(sk, psock);
+ }
+}
+
+void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+
+ set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
+ set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
+ cancel_delayed_work_sync(&ctx->tx_work.work);
+}
+
+void tls_sw_release_resources_tx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec, *tmp;
+ int pending;
+
+ /* Wait for any pending async encryptions to complete */
+ spin_lock_bh(&ctx->encrypt_compl_lock);
+ ctx->async_notify = true;
+ pending = atomic_read(&ctx->encrypt_pending);
+ spin_unlock_bh(&ctx->encrypt_compl_lock);
+
+ if (pending)
+ crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+
+ tls_tx_records(sk, -1);
+
+ /* Free up un-sent records in tx_list. First, free
+ * the partially sent record if any at head of tx_list.
+ */
+ if (tls_ctx->partially_sent_record) {
+ tls_free_partial_record(sk, tls_ctx);
+ rec = list_first_entry(&ctx->tx_list,
+ struct tls_rec, list);
+ list_del(&rec->list);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+ }
+
+ list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
+ list_del(&rec->list);
+ sk_msg_free(sk, &rec->msg_encrypted);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+ }
+
+ crypto_free_aead(ctx->aead_send);
+ tls_free_open_rec(sk);
+}
+
+void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+
+ kfree(ctx);
+}
+
+void tls_sw_release_resources_rx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+
+ kfree(tls_ctx->rx.rec_seq);
+ kfree(tls_ctx->rx.iv);
+
+ if (ctx->aead_recv) {
+ __skb_queue_purge(&ctx->rx_list);
+ crypto_free_aead(ctx->aead_recv);
+ tls_strp_stop(&ctx->strp);
+ /* If tls_sw_strparser_arm() was not called (cleanup paths)
+ * we still want to tls_strp_stop(), but sk->sk_data_ready was
+ * never swapped.
+ */
+ if (ctx->saved_data_ready) {
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_data_ready = ctx->saved_data_ready;
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ }
+}
+
+void tls_sw_strparser_done(struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+
+ tls_strp_done(&ctx->strp);
+}
+
+void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+
+ kfree(ctx);
+}
+
+void tls_sw_free_resources_rx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_sw_release_resources_rx(sk);
+ tls_sw_free_ctx_rx(tls_ctx);
+}
+
+/* The work handler to transmitt the encrypted records in tx_list */
+static void tx_work_handler(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct tx_work *tx_work = container_of(delayed_work,
+ struct tx_work, work);
+ struct sock *sk = tx_work->sk;
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx;
+
+ if (unlikely(!tls_ctx))
+ return;
+
+ ctx = tls_sw_ctx_tx(tls_ctx);
+ if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
+ return;
+
+ if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+ return;
+
+ if (mutex_trylock(&tls_ctx->tx_lock)) {
+ lock_sock(sk);
+ tls_tx_records(sk, -1);
+ release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
+ } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ /* Someone is holding the tx_lock, they will likely run Tx
+ * and cancel the work on their way out of the lock section.
+ * Schedule a long delay just in case.
+ */
+ schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
+ }
+}
+
+static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
+{
+ struct tls_rec *rec;
+
+ rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
+ if (!rec)
+ return false;
+
+ return READ_ONCE(rec->tx_ready);
+}
+
+void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
+{
+ struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
+
+ /* Schedule the transmission if tx list is ready */
+ if (tls_is_tx_ready(tx_ctx) &&
+ !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
+ schedule_delayed_work(&tx_ctx->tx_work.work, 0);
+}
+
+void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ rx_ctx->saved_data_ready = sk->sk_data_ready;
+ sk->sk_data_ready = tls_data_ready;
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
+
+ rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
+ tls_ctx->prot_info.version != TLS_1_3_VERSION;
+}
+
+int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_crypto_info *crypto_info;
+ struct tls_sw_context_tx *sw_ctx_tx = NULL;
+ struct tls_sw_context_rx *sw_ctx_rx = NULL;
+ struct cipher_context *cctx;
+ struct crypto_aead **aead;
+ u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
+ struct crypto_tfm *tfm;
+ char *iv, *rec_seq, *key, *salt, *cipher_name;
+ size_t keysize;
+ int rc = 0;
+
+ if (!ctx) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (tx) {
+ if (!ctx->priv_ctx_tx) {
+ sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
+ if (!sw_ctx_tx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ ctx->priv_ctx_tx = sw_ctx_tx;
+ } else {
+ sw_ctx_tx =
+ (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
+ }
+ } else {
+ if (!ctx->priv_ctx_rx) {
+ sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
+ if (!sw_ctx_rx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ ctx->priv_ctx_rx = sw_ctx_rx;
+ } else {
+ sw_ctx_rx =
+ (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
+ }
+ }
+
+ if (tx) {
+ crypto_init_wait(&sw_ctx_tx->async_wait);
+ spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
+ crypto_info = &ctx->crypto_send.info;
+ cctx = &ctx->tx;
+ aead = &sw_ctx_tx->aead_send;
+ INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
+ INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
+ sw_ctx_tx->tx_work.sk = sk;
+ } else {
+ crypto_init_wait(&sw_ctx_rx->async_wait);
+ spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
+ init_waitqueue_head(&sw_ctx_rx->wq);
+ crypto_info = &ctx->crypto_recv.info;
+ cctx = &ctx->rx;
+ skb_queue_head_init(&sw_ctx_rx->rx_list);
+ skb_queue_head_init(&sw_ctx_rx->async_hold);
+ aead = &sw_ctx_rx->aead_recv;
+ }
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
+
+ gcm_128_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+ tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+ iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+ iv = gcm_128_info->iv;
+ rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
+ rec_seq = gcm_128_info->rec_seq;
+ keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
+ key = gcm_128_info->key;
+ salt = gcm_128_info->salt;
+ salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ cipher_name = "gcm(aes)";
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
+
+ gcm_256_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
+ tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
+ iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
+ iv = gcm_256_info->iv;
+ rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
+ rec_seq = gcm_256_info->rec_seq;
+ keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
+ key = gcm_256_info->key;
+ salt = gcm_256_info->salt;
+ salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
+ cipher_name = "gcm(aes)";
+ break;
+ }
+ case TLS_CIPHER_AES_CCM_128: {
+ struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
+
+ ccm_128_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
+ tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
+ iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
+ iv = ccm_128_info->iv;
+ rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
+ rec_seq = ccm_128_info->rec_seq;
+ keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
+ key = ccm_128_info->key;
+ salt = ccm_128_info->salt;
+ salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
+ cipher_name = "ccm(aes)";
+ break;
+ }
+ case TLS_CIPHER_CHACHA20_POLY1305: {
+ struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
+
+ chacha20_poly1305_info = (void *)crypto_info;
+ nonce_size = 0;
+ tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
+ iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
+ iv = chacha20_poly1305_info->iv;
+ rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
+ rec_seq = chacha20_poly1305_info->rec_seq;
+ keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
+ key = chacha20_poly1305_info->key;
+ salt = chacha20_poly1305_info->salt;
+ salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
+ cipher_name = "rfc7539(chacha20,poly1305)";
+ break;
+ }
+ case TLS_CIPHER_SM4_GCM: {
+ struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
+
+ sm4_gcm_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+ tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
+ iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+ iv = sm4_gcm_info->iv;
+ rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
+ rec_seq = sm4_gcm_info->rec_seq;
+ keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
+ key = sm4_gcm_info->key;
+ salt = sm4_gcm_info->salt;
+ salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
+ cipher_name = "gcm(sm4)";
+ break;
+ }
+ case TLS_CIPHER_SM4_CCM: {
+ struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
+
+ sm4_ccm_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+ tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
+ iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+ iv = sm4_ccm_info->iv;
+ rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
+ rec_seq = sm4_ccm_info->rec_seq;
+ keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
+ key = sm4_ccm_info->key;
+ salt = sm4_ccm_info->salt;
+ salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
+ cipher_name = "ccm(sm4)";
+ break;
+ }
+ case TLS_CIPHER_ARIA_GCM_128: {
+ struct tls12_crypto_info_aria_gcm_128 *aria_gcm_128_info;
+
+ aria_gcm_128_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
+ tag_size = TLS_CIPHER_ARIA_GCM_128_TAG_SIZE;
+ iv_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
+ iv = aria_gcm_128_info->iv;
+ rec_seq_size = TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE;
+ rec_seq = aria_gcm_128_info->rec_seq;
+ keysize = TLS_CIPHER_ARIA_GCM_128_KEY_SIZE;
+ key = aria_gcm_128_info->key;
+ salt = aria_gcm_128_info->salt;
+ salt_size = TLS_CIPHER_ARIA_GCM_128_SALT_SIZE;
+ cipher_name = "gcm(aria)";
+ break;
+ }
+ case TLS_CIPHER_ARIA_GCM_256: {
+ struct tls12_crypto_info_aria_gcm_256 *gcm_256_info;
+
+ gcm_256_info = (void *)crypto_info;
+ nonce_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
+ tag_size = TLS_CIPHER_ARIA_GCM_256_TAG_SIZE;
+ iv_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
+ iv = gcm_256_info->iv;
+ rec_seq_size = TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE;
+ rec_seq = gcm_256_info->rec_seq;
+ keysize = TLS_CIPHER_ARIA_GCM_256_KEY_SIZE;
+ key = gcm_256_info->key;
+ salt = gcm_256_info->salt;
+ salt_size = TLS_CIPHER_ARIA_GCM_256_SALT_SIZE;
+ cipher_name = "gcm(aria)";
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ goto free_priv;
+ }
+
+ if (crypto_info->version == TLS_1_3_VERSION) {
+ nonce_size = 0;
+ prot->aad_size = TLS_HEADER_SIZE;
+ prot->tail_size = 1;
+ } else {
+ prot->aad_size = TLS_AAD_SPACE_SIZE;
+ prot->tail_size = 0;
+ }
+
+ /* Sanity-check the sizes for stack allocations. */
+ if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
+ rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
+ prot->aad_size > TLS_MAX_AAD_SIZE) {
+ rc = -EINVAL;
+ goto free_priv;
+ }
+
+ prot->version = crypto_info->version;
+ prot->cipher_type = crypto_info->cipher_type;
+ prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
+ prot->tag_size = tag_size;
+ prot->overhead_size = prot->prepend_size +
+ prot->tag_size + prot->tail_size;
+ prot->iv_size = iv_size;
+ prot->salt_size = salt_size;
+ cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
+ if (!cctx->iv) {
+ rc = -ENOMEM;
+ goto free_priv;
+ }
+ /* Note: 128 & 256 bit salt are the same size */
+ prot->rec_seq_size = rec_seq_size;
+ memcpy(cctx->iv, salt, salt_size);
+ memcpy(cctx->iv + salt_size, iv, iv_size);
+ cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
+ if (!cctx->rec_seq) {
+ rc = -ENOMEM;
+ goto free_iv;
+ }
+
+ if (!*aead) {
+ *aead = crypto_alloc_aead(cipher_name, 0, 0);
+ if (IS_ERR(*aead)) {
+ rc = PTR_ERR(*aead);
+ *aead = NULL;
+ goto free_rec_seq;
+ }
+ }
+
+ ctx->push_pending_record = tls_sw_push_pending_record;
+
+ rc = crypto_aead_setkey(*aead, key, keysize);
+
+ if (rc)
+ goto free_aead;
+
+ rc = crypto_aead_setauthsize(*aead, prot->tag_size);
+ if (rc)
+ goto free_aead;
+
+ if (sw_ctx_rx) {
+ tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
+
+ tls_update_rx_zc_capable(ctx);
+ sw_ctx_rx->async_capable =
+ crypto_info->version != TLS_1_3_VERSION &&
+ !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
+
+ rc = tls_strp_init(&sw_ctx_rx->strp, sk);
+ if (rc)
+ goto free_aead;
+ }
+
+ goto out;
+
+free_aead:
+ crypto_free_aead(*aead);
+ *aead = NULL;
+free_rec_seq:
+ kfree(cctx->rec_seq);
+ cctx->rec_seq = NULL;
+free_iv:
+ kfree(cctx->iv);
+ cctx->iv = NULL;
+free_priv:
+ if (tx) {
+ kfree(ctx->priv_ctx_tx);
+ ctx->priv_ctx_tx = NULL;
+ } else {
+ kfree(ctx->priv_ctx_rx);
+ ctx->priv_ctx_rx = NULL;
+ }
+out:
+ return rc;
+}
diff --git a/net/tls/tls_toe.c b/net/tls/tls_toe.c
new file mode 100644
index 000000000..825669e1a
--- /dev/null
+++ b/net/tls/tls_toe.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <net/inet_connection_sock.h>
+#include <net/tls.h>
+#include <net/tls_toe.h>
+
+#include "tls.h"
+
+static LIST_HEAD(device_list);
+static DEFINE_SPINLOCK(device_spinlock);
+
+static void tls_toe_sk_destruct(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ ctx->sk_destruct(sk);
+ /* Free ctx */
+ rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
+ tls_ctx_free(sk, ctx);
+}
+
+int tls_toe_bypass(struct sock *sk)
+{
+ struct tls_toe_device *dev;
+ struct tls_context *ctx;
+ int rc = 0;
+
+ spin_lock_bh(&device_spinlock);
+ list_for_each_entry(dev, &device_list, dev_list) {
+ if (dev->feature && dev->feature(dev)) {
+ ctx = tls_ctx_create(sk);
+ if (!ctx)
+ goto out;
+
+ ctx->sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = tls_toe_sk_destruct;
+ ctx->rx_conf = TLS_HW_RECORD;
+ ctx->tx_conf = TLS_HW_RECORD;
+ update_sk_prot(sk, ctx);
+ rc = 1;
+ break;
+ }
+ }
+out:
+ spin_unlock_bh(&device_spinlock);
+ return rc;
+}
+
+void tls_toe_unhash(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_toe_device *dev;
+
+ spin_lock_bh(&device_spinlock);
+ list_for_each_entry(dev, &device_list, dev_list) {
+ if (dev->unhash) {
+ kref_get(&dev->kref);
+ spin_unlock_bh(&device_spinlock);
+ dev->unhash(dev, sk);
+ kref_put(&dev->kref, dev->release);
+ spin_lock_bh(&device_spinlock);
+ }
+ }
+ spin_unlock_bh(&device_spinlock);
+ ctx->sk_proto->unhash(sk);
+}
+
+int tls_toe_hash(struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_toe_device *dev;
+ int err;
+
+ err = ctx->sk_proto->hash(sk);
+ spin_lock_bh(&device_spinlock);
+ list_for_each_entry(dev, &device_list, dev_list) {
+ if (dev->hash) {
+ kref_get(&dev->kref);
+ spin_unlock_bh(&device_spinlock);
+ err |= dev->hash(dev, sk);
+ kref_put(&dev->kref, dev->release);
+ spin_lock_bh(&device_spinlock);
+ }
+ }
+ spin_unlock_bh(&device_spinlock);
+
+ if (err)
+ tls_toe_unhash(sk);
+ return err;
+}
+
+void tls_toe_register_device(struct tls_toe_device *device)
+{
+ spin_lock_bh(&device_spinlock);
+ list_add_tail(&device->dev_list, &device_list);
+ spin_unlock_bh(&device_spinlock);
+}
+EXPORT_SYMBOL(tls_toe_register_device);
+
+void tls_toe_unregister_device(struct tls_toe_device *device)
+{
+ spin_lock_bh(&device_spinlock);
+ list_del(&device->dev_list);
+ spin_unlock_bh(&device_spinlock);
+}
+EXPORT_SYMBOL(tls_toe_unregister_device);
diff --git a/net/tls/trace.c b/net/tls/trace.c
new file mode 100644
index 000000000..e374913cf
--- /dev/null
+++ b/net/tls/trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/net/tls/trace.h b/net/tls/trace.h
new file mode 100644
index 000000000..9ba5f600e
--- /dev/null
+++ b/net/tls/trace.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tls
+
+#if !defined(_TLS_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TLS_TRACE_H_
+
+#include <asm/unaligned.h>
+#include <linux/tracepoint.h>
+
+struct sock;
+
+TRACE_EVENT(tls_device_offload_set,
+
+ TP_PROTO(struct sock *sk, int dir, u32 tcp_seq, u8 *rec_no, int ret),
+
+ TP_ARGS(sk, dir, tcp_seq, rec_no, ret),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( int, dir )
+ __field( u32, tcp_seq )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->dir = dir;
+ __entry->tcp_seq = tcp_seq;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ "sk=%p direction=%d tcp_seq=%u rec_no=%llu ret=%d",
+ __entry->sk, __entry->dir, __entry->tcp_seq, __entry->rec_no,
+ __entry->ret
+ )
+);
+
+TRACE_EVENT(tls_device_decrypted,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no, u32 rec_len,
+ bool encrypted, bool decrypted),
+
+ TP_ARGS(sk, tcp_seq, rec_no, rec_len, encrypted, decrypted),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( u32, tcp_seq )
+ __field( u32, rec_len )
+ __field( bool, encrypted )
+ __field( bool, decrypted )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->tcp_seq = tcp_seq;
+ __entry->rec_len = rec_len;
+ __entry->encrypted = encrypted;
+ __entry->decrypted = decrypted;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u rec_no=%llu len=%u encrypted=%d decrypted=%d",
+ __entry->sk, __entry->tcp_seq,
+ __entry->rec_no, __entry->rec_len,
+ __entry->encrypted, __entry->decrypted
+ )
+);
+
+TRACE_EVENT(tls_device_rx_resync_send,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no, int sync_type),
+
+ TP_ARGS(sk, tcp_seq, rec_no, sync_type),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( u32, tcp_seq )
+ __field( int, sync_type )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->tcp_seq = tcp_seq;
+ __entry->sync_type = sync_type;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u rec_no=%llu sync_type=%d",
+ __entry->sk, __entry->tcp_seq, __entry->rec_no,
+ __entry->sync_type
+ )
+);
+
+TRACE_EVENT(tls_device_rx_resync_nh_schedule,
+
+ TP_PROTO(struct sock *sk),
+
+ TP_ARGS(sk),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ ),
+
+ TP_printk(
+ "sk=%p", __entry->sk
+ )
+);
+
+TRACE_EVENT(tls_device_rx_resync_nh_delay,
+
+ TP_PROTO(struct sock *sk, u32 sock_data, u32 rec_len),
+
+ TP_ARGS(sk, sock_data, rec_len),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u32, sock_data )
+ __field( u32, rec_len )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->sock_data = sock_data;
+ __entry->rec_len = rec_len;
+ ),
+
+ TP_printk(
+ "sk=%p sock_data=%u rec_len=%u",
+ __entry->sk, __entry->sock_data, __entry->rec_len
+ )
+);
+
+TRACE_EVENT(tls_device_tx_resync_req,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u32 exp_tcp_seq),
+
+ TP_ARGS(sk, tcp_seq, exp_tcp_seq),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u32, tcp_seq )
+ __field( u32, exp_tcp_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->tcp_seq = tcp_seq;
+ __entry->exp_tcp_seq = exp_tcp_seq;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u exp_tcp_seq=%u",
+ __entry->sk, __entry->tcp_seq, __entry->exp_tcp_seq
+ )
+);
+
+TRACE_EVENT(tls_device_tx_resync_send,
+
+ TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no),
+
+ TP_ARGS(sk, tcp_seq, rec_no),
+
+ TP_STRUCT__entry(
+ __field( struct sock *, sk )
+ __field( u64, rec_no )
+ __field( u32, tcp_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->sk = sk;
+ __entry->rec_no = get_unaligned_be64(rec_no);
+ __entry->tcp_seq = tcp_seq;
+ ),
+
+ TP_printk(
+ "sk=%p tcp_seq=%u rec_no=%llu",
+ __entry->sk, __entry->tcp_seq, __entry->rec_no
+ )
+);
+
+#endif /* _TLS_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>