summaryrefslogtreecommitdiff
path: root/net/tls
diff options
context:
space:
mode:
Diffstat (limited to 'net/tls')
-rw-r--r--net/tls/tls_device.c184
-rw-r--r--net/tls/tls_device_fallback.c16
-rw-r--r--net/tls/tls_main.c4
-rw-r--r--net/tls/tls_sw.c29
4 files changed, 190 insertions, 43 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 1f9cf57d9754..7c0b2b778703 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -61,7 +61,7 @@ static void tls_device_free_ctx(struct tls_context *ctx)
if (ctx->rx_conf == TLS_HW)
kfree(tls_offload_ctx_rx(ctx));
- kfree(ctx);
+ tls_ctx_free(ctx);
}
static void tls_device_gc_task(struct work_struct *work)
@@ -209,6 +209,33 @@ void tls_device_free_resources_tx(struct sock *sk)
tls_free_partial_record(sk, tls_ctx);
}
+static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
+ u32 seq)
+{
+ struct net_device *netdev;
+ struct sk_buff *skb;
+ int err = 0;
+ u8 *rcd_sn;
+
+ skb = tcp_write_queue_tail(sk);
+ if (skb)
+ TCP_SKB_CB(skb)->eor = 1;
+
+ rcd_sn = tls_ctx->tx.rec_seq;
+
+ down_read(&device_offload_lock);
+ netdev = tls_ctx->netdev;
+ if (netdev)
+ err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
+ rcd_sn,
+ TLS_OFFLOAD_CTX_DIR_TX);
+ up_read(&device_offload_lock);
+ if (err)
+ return;
+
+ clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
+}
+
static void tls_append_frag(struct tls_record_info *record,
struct page_frag *pfrag,
int size)
@@ -252,7 +279,7 @@ static int tls_push_record(struct sock *sk,
skb_frag_address(frag),
record->len - prot->prepend_size,
record_type,
- ctx->crypto_send.info.version);
+ prot->version);
/* HW doesn't care about the data in the tag, because it fills it. */
dummy_tag_frag.page = skb_frag_page(frag);
@@ -264,7 +291,11 @@ static int tls_push_record(struct sock *sk,
list_add_tail(&record->list, &offload_ctx->records_list);
spin_unlock_irq(&offload_ctx->lock);
offload_ctx->open_record = NULL;
- tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version);
+
+ if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
+ tls_device_resync_tx(sk, ctx, tp->write_seq);
+
+ tls_advance_record_sn(sk, prot, &ctx->tx);
for (i = 0; i < record->num_frags; i++) {
frag = &record->frags[i];
@@ -551,7 +582,7 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
}
static void tls_device_resync_rx(struct tls_context *tls_ctx,
- struct sock *sk, u32 seq, u64 rcd_sn)
+ struct sock *sk, u32 seq, u8 *rcd_sn)
{
struct net_device *netdev;
@@ -559,14 +590,17 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
return;
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
- netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
+ netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
+ TLS_OFFLOAD_CTX_DIR_RX);
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
}
-void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
+void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+ struct tls_prot_info *prot;
u32 is_req_pending;
s64 resync_req;
u32 req_seq;
@@ -574,15 +608,83 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
if (tls_ctx->rx_conf != TLS_HW)
return;
+ prot = &tls_ctx->prot_info;
rx_ctx = tls_offload_ctx_rx(tls_ctx);
- resync_req = atomic64_read(&rx_ctx->resync_req);
- req_seq = (resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
- is_req_pending = resync_req;
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
- if (unlikely(is_req_pending) && req_seq == seq &&
- atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
+ switch (rx_ctx->resync_type) {
+ case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
+ resync_req = atomic64_read(&rx_ctx->resync_req);
+ req_seq = resync_req >> 32;
seq += TLS_HEADER_SIZE - 1;
- tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
+ is_req_pending = resync_req;
+
+ if (likely(!is_req_pending) || req_seq != seq ||
+ !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
+ return;
+ break;
+ case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
+ if (likely(!rx_ctx->resync_nh_do_now))
+ return;
+
+ /* head of next rec is already in, note that the sock_inq will
+ * include the currently parsed message when called from parser
+ */
+ if (tcp_inq(sk) > rcd_len)
+ return;
+
+ rx_ctx->resync_nh_do_now = 0;
+ seq += rcd_len;
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
+ break;
+ }
+
+ tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
+}
+
+static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
+ struct tls_offload_context_rx *ctx,
+ struct sock *sk, struct sk_buff *skb)
+{
+ struct strp_msg *rxm;
+
+ /* device will request resyncs by itself based on stream scan */
+ if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
+ return;
+ /* already scheduled */
+ if (ctx->resync_nh_do_now)
+ return;
+ /* seen decrypted fragments since last fully-failed record */
+ if (ctx->resync_nh_reset) {
+ ctx->resync_nh_reset = 0;
+ ctx->resync_nh.decrypted_failed = 1;
+ ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
+ return;
+ }
+
+ if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
+ return;
+
+ /* doing resync, bump the next target in case it fails */
+ if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
+ ctx->resync_nh.decrypted_tgt *= 2;
+ else
+ ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
+
+ rxm = strp_msg(skb);
+
+ /* head of next rec is already in, parser will sync for us */
+ if (tcp_inq(sk) > rxm->full_len) {
+ ctx->resync_nh_do_now = 1;
+ } else {
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
+
+ tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
+ rcd_sn);
}
}
@@ -610,8 +712,10 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
sg_set_buf(&sg[0], buf,
rxm->full_len + TLS_HEADER_SIZE +
TLS_CIPHER_AES_GCM_128_IV_SIZE);
- skb_copy_bits(skb, offset, buf,
- TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ err = skb_copy_bits(skb, offset, buf,
+ TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ if (err)
+ goto free_buf;
/* We are interested only in the decrypted data not the auth */
err = decrypt_skb(sk, skb, sg);
@@ -625,8 +729,11 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
if (skb_pagelen(skb) > offset) {
copy = min_t(int, skb_pagelen(skb) - offset, data_len);
- if (skb->decrypted)
- skb_store_bits(skb, offset, buf, copy);
+ if (skb->decrypted) {
+ err = skb_store_bits(skb, offset, buf, copy);
+ if (err)
+ goto free_buf;
+ }
offset += copy;
buf += copy;
@@ -649,8 +756,11 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
copy = min_t(int, skb_iter->len - frag_pos,
data_len + rxm->offset - offset);
- if (skb_iter->decrypted)
- skb_store_bits(skb_iter, frag_pos, buf, copy);
+ if (skb_iter->decrypted) {
+ err = skb_store_bits(skb_iter, frag_pos, buf, copy);
+ if (err)
+ goto free_buf;
+ }
offset += copy;
buf += copy;
@@ -671,10 +781,6 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
int is_encrypted = !is_decrypted;
struct sk_buff *skb_iter;
- /* Skip if it is already decrypted */
- if (ctx->sw.decrypted)
- return 0;
-
/* Check if all the data is decrypted already */
skb_walk_frags(skb, skb_iter) {
is_decrypted &= skb_iter->decrypted;
@@ -683,12 +789,21 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
ctx->sw.decrypted |= is_decrypted;
- /* Return immedeatly if the record is either entirely plaintext or
+ /* Return immediately if the record is either entirely plaintext or
* entirely ciphertext. Otherwise handle reencrypt partially decrypted
* record.
*/
- return (is_encrypted || is_decrypted) ? 0 :
- tls_device_reencrypt(sk, skb);
+ if (is_decrypted) {
+ ctx->resync_nh_reset = 1;
+ return 0;
+ }
+ if (is_encrypted) {
+ tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
+ return 0;
+ }
+
+ ctx->resync_nh_reset = 1;
+ return tls_device_reencrypt(sk, skb);
}
static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
@@ -742,6 +857,11 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
}
crypto_info = &ctx->crypto_send.info;
+ if (crypto_info->version != TLS_1_2_VERSION) {
+ rc = -EOPNOTSUPP;
+ goto free_offload_ctx;
+ }
+
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
@@ -757,6 +877,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
goto free_offload_ctx;
}
+ /* Sanity-check the rec_seq_size for stack allocations */
+ if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
+ rc = -EINVAL;
+ goto free_offload_ctx;
+ }
+
+ prot->version = crypto_info->version;
+ prot->cipher_type = crypto_info->cipher_type;
prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
prot->tag_size = tag_size;
prot->overhead_size = prot->prepend_size + prot->tag_size;
@@ -876,6 +1004,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
struct net_device *netdev;
int rc = 0;
+ if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
+ return -EOPNOTSUPP;
+
/* We support starting offload on multiple sockets
* concurrently, so we only need a read lock here.
* This lock must precede get_netdev_for_sock to prevent races between
@@ -908,6 +1039,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
rc = -ENOMEM;
goto release_netdev;
}
+ context->resync_nh_reset = 1;
ctx->priv_ctx_rx = context;
rc = tls_set_sw_offload(sk, ctx, 0);
@@ -1015,7 +1147,7 @@ static int tls_dev_event(struct notifier_block *this, unsigned long event,
case NETDEV_REGISTER:
case NETDEV_FEAT_CHANGE:
if ((dev->features & NETIF_F_HW_TLS_RX) &&
- !dev->tlsdev_ops->tls_dev_resync_rx)
+ !dev->tlsdev_ops->tls_dev_resync)
return NOTIFY_BAD;
if (dev->tlsdev_ops &&
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index c3a5fe624b4e..9070d68a92a4 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -209,6 +209,10 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
update_chksum(nskb, headln);
+ /* sock_efree means skb must gone through skb_orphan_partial() */
+ if (nskb->destructor == sock_efree)
+ return;
+
delta = nskb->truesize - skb->truesize;
if (likely(delta < 0))
WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
@@ -240,7 +244,6 @@ static int fill_sg_in(struct scatterlist *sg_in,
record = tls_get_record(ctx, tcp_seq, rcd_sn);
if (!record) {
spin_unlock_irqrestore(&ctx->lock, flags);
- WARN(1, "Record not found for seq %u\n", tcp_seq);
return -EINVAL;
}
@@ -409,7 +412,10 @@ put_sg:
put_page(sg_page(&sg_in[--resync_sgs]));
kfree(sg_in);
free_orig:
- kfree_skb(skb);
+ if (nskb)
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
return nskb;
}
@@ -424,6 +430,12 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
}
EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
+struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
+{
+ return tls_sw_fallback(skb->sk, skb);
+}
+EXPORT_SYMBOL_GPL(tls_encrypt_skb);
+
int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index e2b69e805d46..4674e57e66b0 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -251,7 +251,7 @@ static void tls_write_space(struct sock *sk)
ctx->sk_write_space(sk);
}
-static void tls_ctx_free(struct tls_context *ctx)
+void tls_ctx_free(struct tls_context *ctx)
{
if (!ctx)
return;
@@ -643,7 +643,7 @@ static void tls_hw_sk_destruct(struct sock *sk)
ctx->sk_destruct(sk);
/* Free ctx */
- kfree(ctx);
+ tls_ctx_free(ctx);
icsk->icsk_ulp_data = NULL;
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 455a782c7658..53b4ad94e74a 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -534,7 +534,7 @@ static int tls_do_encryption(struct sock *sk,
/* Unhook the record from context if encryption is not failure */
ctx->open_rec = NULL;
- tls_advance_record_sn(sk, &tls_ctx->tx, prot->version);
+ tls_advance_record_sn(sk, prot, &tls_ctx->tx);
return rc;
}
@@ -1485,15 +1485,16 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
- int version = prot->version;
struct strp_msg *rxm = strp_msg(skb);
int pad, err = 0;
if (!ctx->decrypted) {
#ifdef CONFIG_TLS_DEVICE
- err = tls_device_decrypted(sk, skb);
- if (err < 0)
- return err;
+ if (tls_ctx->rx_conf == TLS_HW) {
+ err = tls_device_decrypted(sk, skb);
+ if (err < 0)
+ return err;
+ }
#endif
/* Still not decrypted after tls_device */
if (!ctx->decrypted) {
@@ -1501,8 +1502,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
async);
if (err < 0) {
if (err == -EINPROGRESS)
- tls_advance_record_sn(sk, &tls_ctx->rx,
- version);
+ tls_advance_record_sn(sk, prot,
+ &tls_ctx->rx);
return err;
}
@@ -1517,7 +1518,7 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
rxm->full_len -= pad;
rxm->offset += prot->prepend_size;
rxm->full_len -= prot->overhead_size;
- tls_advance_record_sn(sk, &tls_ctx->rx, version);
+ tls_advance_record_sn(sk, prot, &tls_ctx->rx);
ctx->decrypted = true;
ctx->saved_data_ready(sk);
} else {
@@ -1958,7 +1959,8 @@ bool tls_sw_stream_read(const struct sock *sk)
ingress_empty = list_empty(&psock->ingress_msg);
rcu_read_unlock();
- return !ingress_empty || ctx->recv_pkt;
+ return !ingress_empty || ctx->recv_pkt ||
+ !skb_queue_empty(&ctx->rx_list);
}
static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@ -2013,8 +2015,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure;
}
#ifdef CONFIG_TLS_DEVICE
- handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
- *(u64*)tls_ctx->rx.rec_seq);
+ tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
+ TCP_SKB_CB(skb)->seq + rxm->offset);
#endif
return data_len + TLS_HEADER_SIZE;
@@ -2281,8 +2283,9 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_priv;
}
- /* Sanity-check the IV size for stack allocations. */
- if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
+ /* Sanity-check the sizes for stack allocations. */
+ if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
+ rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
rc = -EINVAL;
goto free_priv;
}