summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorVakul Garg <vakul.garg@nxp.com>2018-09-24 15:35:56 +0530
committerDavid S. Miller <davem@davemloft.net>2018-09-24 12:24:24 -0700
commit9932a29ab1be1427a2ccbdf852a0f131f2849685 (patch)
treeb721798d416991241369628a76f48a872d08bb0a /include
parent094fe7392d6e0c8fa516dca451d4e005a2238e28 (diff)
net/tls: Fixed race condition in async encryption
On processors with multi-engine crypto accelerators, it is possible that multiple records get encrypted in parallel and their encryption completion is notified to different cpus in multicore processor. This leads to the situation where tls_encrypt_done() starts executing in parallel on different cores. In current implementation, encrypted records are queued to tx_ready_list in tls_encrypt_done(). This requires addition to linked list 'tx_ready_list' to be protected. As tls_decrypt_done() could be executing in irq content, it is not possible to protect linked list addition operation using a lock. To fix the problem, we remove linked list addition operation from the irq context. We do tx_ready_list addition/removal operation from application context only and get rid of possible multiple access to the linked list. Before starting encryption on the record, we add it to the tail of tx_ready_list. To prevent tls_tx_records() from transmitting it, we mark the record with a new flag 'tx_ready' in 'struct tls_rec'. When record encryption gets completed, tls_encrypt_done() has to only update the 'tx_ready' flag to true & linked list add operation is not required. The changed logic brings some other side benefits. Since the records are always submitted in tls sequence number order for encryption, the tx_ready_list always remains sorted and addition of new records to it does not have to traverse the linked list. Lastly, we renamed tx_ready_list in 'struct tls_sw_context_tx' to 'tx_list'. This is because now, the some of the records at the tail are not ready to transmit. Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption") Signed-off-by: Vakul Garg <vakul.garg@nxp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/net/tls.h16
1 files changed, 5 insertions, 11 deletions
diff --git a/include/net/tls.h b/include/net/tls.h
index 3aa73e2d8823..1615fb5ea114 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -99,6 +99,7 @@ enum {
*/
struct tls_rec {
struct list_head list;
+ int tx_ready;
int tx_flags;
struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS];
struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS];
@@ -128,7 +129,7 @@ struct tls_sw_context_tx {
struct crypto_wait async_wait;
struct tx_work tx_work;
struct tls_rec *open_rec;
- struct list_head tx_ready_list;
+ struct list_head tx_list;
atomic_t encrypt_pending;
int async_notify;
@@ -220,7 +221,6 @@ struct tls_context {
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
- u64 tx_seq_number; /* Next TLS seqnum to be transmitted */
unsigned long flags;
bool in_tcp_sendpages;
@@ -341,21 +341,15 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
-static inline bool is_tx_ready(struct tls_context *tls_ctx,
- struct tls_sw_context_tx *ctx)
+static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
{
struct tls_rec *rec;
- u64 seq;
- rec = list_first_entry(&ctx->tx_ready_list, struct tls_rec, list);
+ rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
if (!rec)
return false;
- seq = be64_to_cpup((const __be64 *)&rec->aad_space);
- if (seq == tls_ctx->tx_seq_number)
- return true;
- else
- return false;
+ return READ_ONCE(rec->tx_ready);
}
struct sk_buff *