diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-28 15:38:56 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-28 15:38:56 -0800 |
commit | a78208e2436963d0b2c7d186277d6e1a9755029a (patch) | |
tree | 090caa51386d811a2750aef3dc70cd247f6aa622 /drivers/crypto/chelsio | |
parent | 68353984d63d8d7ea728819dbdb7aecc5f32d360 (diff) | |
parent | 0bc81767c5bd9d005fae1099fb39eb3688370cb1 (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Removed CRYPTO_TFM_RES flags
- Extended spawn grabbing to all algorithm types
- Moved hash descsize verification into API code
Algorithms:
- Fixed recursive pcrypt dead-lock
- Added new 32 and 64-bit generic versions of poly1305
- Added cryptogams implementation of x86/poly1305
Drivers:
- Added support for i.MX8M Mini in caam
- Added support for i.MX8M Nano in caam
- Added support for i.MX8M Plus in caam
- Added support for A33 variant of SS in sun4i-ss
- Added TEE support for Raven Ridge in ccp
- Added in-kernel API to submit TEE commands in ccp
- Added AMD-TEE driver
- Added support for BCM2711 in iproc-rng200
- Added support for AES256-GCM based ciphers for chtls
- Added aead support on SEC2 in hisilicon"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (244 commits)
crypto: arm/chacha - fix build failured when kernel mode NEON is disabled
crypto: caam - add support for i.MX8M Plus
crypto: x86/poly1305 - emit does base conversion itself
crypto: hisilicon - fix spelling mistake "disgest" -> "digest"
crypto: chacha20poly1305 - add back missing test vectors and test chunking
crypto: x86/poly1305 - fix .gitignore typo
tee: fix memory allocation failure checks on drv_data and amdtee
crypto: ccree - erase unneeded inline funcs
crypto: ccree - make cc_pm_put_suspend() void
crypto: ccree - split overloaded usage of irq field
crypto: ccree - fix PM race condition
crypto: ccree - fix FDE descriptor sequence
crypto: ccree - cc_do_send_request() is void func
crypto: ccree - fix pm wrongful error reporting
crypto: ccree - turn errors to debug msgs
crypto: ccree - fix AEAD decrypt auth fail
crypto: ccree - fix typo in comment
crypto: ccree - fix typos in error msgs
crypto: atmel-{aes,sha,tdes} - Retire crypto_platform_data
crypto: x86/sha - Eliminate casts on asm implementations
...
Diffstat (limited to 'drivers/crypto/chelsio')
-rw-r--r-- | drivers/crypto/chelsio/Kconfig | 30 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 53 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_core.c | 10 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls.h | 7 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_cm.c | 57 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_cm.h | 21 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_hw.c | 65 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_main.c | 28 |
8 files changed, 152 insertions, 119 deletions
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 91e424378217..f078b2686418 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig @@ -23,22 +23,22 @@ config CRYPTO_DEV_CHELSIO will be called chcr. config CHELSIO_IPSEC_INLINE - bool "Chelsio IPSec XFRM Tx crypto offload" - depends on CHELSIO_T4 + bool "Chelsio IPSec XFRM Tx crypto offload" + depends on CHELSIO_T4 depends on CRYPTO_DEV_CHELSIO - depends on XFRM_OFFLOAD - depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD - default n - ---help--- - Enable support for IPSec Tx Inline. + depends on XFRM_OFFLOAD + depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD + default n + ---help--- + Enable support for IPSec Tx Inline. config CRYPTO_DEV_CHELSIO_TLS - tristate "Chelsio Crypto Inline TLS Driver" - depends on CHELSIO_T4 - depends on TLS_TOE - select CRYPTO_DEV_CHELSIO - ---help--- - Support Chelsio Inline TLS with Chelsio crypto accelerator. + tristate "Chelsio Crypto Inline TLS Driver" + depends on CHELSIO_T4 + depends on TLS_TOE + select CRYPTO_DEV_CHELSIO + ---help--- + Support Chelsio Inline TLS with Chelsio crypto accelerator. - To compile this driver as a module, choose M here: the module - will be called chtls. + To compile this driver as a module, choose M here: the module + will be called chtls. diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 1b4a5664e604..b4b9b22125d1 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -870,20 +870,13 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); - int err = 0; crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= - crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) & - CRYPTO_TFM_RES_MASK; - return err; + return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); } static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, @@ -912,7 +905,6 @@ static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -943,7 +935,6 @@ static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher, return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -981,7 +972,6 @@ static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher, return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -1379,7 +1369,8 @@ static int chcr_device_init(struct chcr_context *ctx) txq_perchan = ntxq / u_ctx->lldi.nchan; spin_lock(&ctx->dev->lock_chcr_dev); ctx->tx_chan_id = ctx->dev->tx_channel_id; - ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; + ctx->dev->tx_channel_id = + (ctx->dev->tx_channel_id + 1) % u_ctx->lldi.nchan; spin_unlock(&ctx->dev->lock_chcr_dev); rxq_idx = ctx->tx_chan_id * rxq_perchan; rxq_idx += id % rxq_perchan; @@ -2173,7 +2164,6 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; return 0; badkey_err: - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ablkctx->enckey_len = 0; return err; @@ -3195,9 +3185,6 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) aeadctx->mayverify = VERIFY_SW; break; default: - - crypto_tfm_set_flags((struct crypto_tfm *) tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3222,8 +3209,6 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, aeadctx->mayverify = VERIFY_HW; break; default: - crypto_tfm_set_flags((struct crypto_tfm *)tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3264,8 +3249,6 @@ static int chcr_ccm_setauthsize(struct crypto_aead *tfm, aeadctx->mayverify = VERIFY_HW; break; default: - crypto_tfm_set_flags((struct crypto_tfm *)tfm, - CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); @@ -3290,8 +3273,6 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead, ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; } else { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3314,9 +3295,6 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (error) return error; return chcr_ccm_common_setkey(aead, key, keylen); @@ -3329,8 +3307,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, int error; if (keylen < 3) { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); aeadctx->enckey_len = 0; return -EINVAL; } @@ -3338,9 +3314,6 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (error) return error; keylen -= 3; @@ -3362,9 +3335,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & - CRYPTO_TFM_RES_MASK); if (ret) goto out; @@ -3380,8 +3350,6 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, } else if (keylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { - crypto_tfm_set_flags((struct crypto_tfm *)aead, - CRYPTO_TFM_RES_BAD_KEY_LEN); pr_err("GCM: Invalid key length %d\n", keylen); ret = -EINVAL; goto out; @@ -3432,16 +3400,11 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) - & CRYPTO_TFM_RES_MASK); if (err) goto out; - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; - } if (get_alg_config(¶m, max_authsize)) { pr_err("chcr : Unsupported digest size\n"); @@ -3562,16 +3525,12 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); - crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); - crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) - & CRYPTO_TFM_RES_MASK); if (err) goto out; - if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { - crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; - } + subtype = get_aead_subtype(authenc); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index 029a7354f541..e937605670ac 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -132,8 +132,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx) static int chcr_dev_move(struct uld_ctx *u_ctx) { - struct adapter *adap; - mutex_lock(&drv_data.drv_mutex); if (drv_data.last_dev == u_ctx) { if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) @@ -146,8 +144,6 @@ static int chcr_dev_move(struct uld_ctx *u_ctx) list_move(&u_ctx->entry, &drv_data.inact_dev); if (list_empty(&drv_data.act_dev)) drv_data.last_dev = NULL; - adap = padap(&u_ctx->dev); - memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); atomic_dec(&drv_data.dev_count); mutex_unlock(&drv_data.drv_mutex); @@ -299,17 +295,21 @@ static int __init chcr_crypto_init(void) static void __exit chcr_crypto_exit(void) { struct uld_ctx *u_ctx, *tmp; + struct adapter *adap; stop_crypto(); - cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); /* Remove all devices from list */ mutex_lock(&drv_data.drv_mutex); list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { + adap = padap(&u_ctx->dev); + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); list_del(&u_ctx->entry); kfree(u_ctx); } list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { + adap = padap(&u_ctx->dev); + memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); list_del(&u_ctx->entry); kfree(u_ctx); } diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index d2bc655ab931..459442704eb1 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -179,7 +179,10 @@ struct chtls_hws { u32 copied_seq; u64 tx_seq_no; struct tls_scmd scmd; - struct tls12_crypto_info_aes_gcm_128 crypto_info; + union { + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; + struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; + } crypto_info; }; struct chtls_sock { @@ -482,7 +485,7 @@ int send_tx_flowc_wr(struct sock *sk, int compl, void chtls_tcp_push(struct sock *sk, int flags); int chtls_push_frames(struct chtls_sock *csk, int comp); int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val); -int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode); +int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type); void skb_entail(struct sock *sk, struct sk_buff *skb, int flags); unsigned int keyid_to_addr(int start_addr, int keyid); void free_tls_keyid(struct sock *sk); diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index aca75237bbcf..d4674589e1aa 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb) return 0; } +static void chtls_purge_wr_queue(struct sock *sk) +{ + struct sk_buff *skb; + + while ((skb = dequeue_wr(sk)) != NULL) + kfree_skb(skb); +} + static void chtls_release_resources(struct sock *sk) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); @@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk) kfree_skb(csk->txdata_skb_cache); csk->txdata_skb_cache = NULL; + if (csk->wr_credits != csk->wr_max_credits) { + chtls_purge_wr_queue(sk); + chtls_reset_wr_list(csk); + } + if (csk->l2t_entry) { cxgb4_l2t_release(csk->l2t_entry); csk->l2t_entry = NULL; @@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb) else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } + kfree_skb(skb); } static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb) @@ -1815,6 +1829,20 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb) kfree_skb(skb); } +/* + * Add an skb to the deferred skb queue for processing from process context. + */ +static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev, + defer_handler_t handler) +{ + DEFERRED_SKB_CB(skb)->handler = handler; + spin_lock_bh(&cdev->deferq.lock); + __skb_queue_tail(&cdev->deferq, skb); + if (skb_queue_len(&cdev->deferq) == 1) + schedule_work(&cdev->deferq_task); + spin_unlock_bh(&cdev->deferq.lock); +} + static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, struct chtls_dev *cdev, int status, int queue) { @@ -1829,7 +1857,7 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, if (!reply_skb) { req->status = (queue << 1); - send_defer_abort_rpl(cdev, skb); + t4_defer_reply(skb, cdev, send_defer_abort_rpl); return; } @@ -1848,20 +1876,6 @@ static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); } -/* - * Add an skb to the deferred skb queue for processing from process context. - */ -static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev, - defer_handler_t handler) -{ - DEFERRED_SKB_CB(skb)->handler = handler; - spin_lock_bh(&cdev->deferq.lock); - __skb_queue_tail(&cdev->deferq, skb); - if (skb_queue_len(&cdev->deferq) == 1) - schedule_work(&cdev->deferq_task); - spin_unlock_bh(&cdev->deferq.lock); -} - static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb, struct chtls_dev *cdev, int status, int queue) @@ -2062,19 +2076,6 @@ rel_skb: return 0; } -static struct sk_buff *dequeue_wr(struct sock *sk) -{ - struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); - struct sk_buff *skb = csk->wr_skb_head; - - if (likely(skb)) { - /* Don't bother clearing the tail */ - csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; - WR_SKB_CB(skb)->next_wr = NULL; - } - return skb; -} - static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) { struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h index 129d7ac649a9..3fac0c74a41f 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.h +++ b/drivers/crypto/chelsio/chtls/chtls_cm.h @@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } +static inline void chtls_reset_wr_list(struct chtls_sock *csk) +{ + csk->wr_skb_head = NULL; + csk->wr_skb_tail = NULL; +} + static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) { WR_SKB_CB(skb)->next_wr = NULL; @@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb) WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb; csk->wr_skb_tail = skb; } + +static inline struct sk_buff *dequeue_wr(struct sock *sk) +{ + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); + struct sk_buff *skb = NULL; + + skb = csk->wr_skb_head; + + if (likely(skb)) { + /* Don't bother clearing the tail */ + csk->wr_skb_head = WR_SKB_CB(skb)->next_wr; + WR_SKB_CB(skb)->next_wr = NULL; + } + return skb; +} #endif diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c index 2a34035d3cfb..f1820aca0d33 100644 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c @@ -208,28 +208,53 @@ static void chtls_rxkey_ivauth(struct _key_ctx *kctx) static int chtls_key_info(struct chtls_sock *csk, struct _key_ctx *kctx, - u32 keylen, u32 optname) + u32 keylen, u32 optname, + int cipher_type) { - unsigned char key[AES_KEYSIZE_128]; - struct tls12_crypto_info_aes_gcm_128 *gcm_ctx; + unsigned char key[AES_MAX_KEY_SIZE]; + unsigned char *key_p, *salt; unsigned char ghash_h[AEAD_H_SIZE]; - int ck_size, key_ctx_size; + int ck_size, key_ctx_size, kctx_mackey_size, salt_size; struct crypto_aes_ctx aes; int ret; - gcm_ctx = (struct tls12_crypto_info_aes_gcm_128 *) - &csk->tlshws.crypto_info; - key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) + AEAD_H_SIZE; - if (keylen == AES_KEYSIZE_128) { - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; - } else { + /* GCM mode of AES supports 128 and 256 bit encryption, so + * prepare key context base on GCM cipher type + */ + switch (cipher_type) { + case TLS_CIPHER_AES_GCM_128: { + struct tls12_crypto_info_aes_gcm_128 *gcm_ctx_128 = + (struct tls12_crypto_info_aes_gcm_128 *) + &csk->tlshws.crypto_info; + memcpy(key, gcm_ctx_128->key, keylen); + + key_p = gcm_ctx_128->key; + salt = gcm_ctx_128->salt; + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; + salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; + kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; + break; + } + case TLS_CIPHER_AES_GCM_256: { + struct tls12_crypto_info_aes_gcm_256 *gcm_ctx_256 = + (struct tls12_crypto_info_aes_gcm_256 *) + &csk->tlshws.crypto_info; + memcpy(key, gcm_ctx_256->key, keylen); + + key_p = gcm_ctx_256->key; + salt = gcm_ctx_256->salt; + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; + salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE; + kctx_mackey_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; + break; + } + default: pr_err("GCM: Invalid key length %d\n", keylen); return -EINVAL; } - memcpy(key, gcm_ctx->key, keylen); /* Calculate the H = CIPH(K, 0 repeated 16 times). * It will go in key context @@ -249,20 +274,20 @@ static int chtls_key_info(struct chtls_sock *csk, key_ctx = ((key_ctx_size >> 4) << 3); kctx->ctx_hdr = FILL_KEY_CRX_HDR(ck_size, - CHCR_KEYCTX_MAC_KEY_SIZE_128, + kctx_mackey_size, 0, 0, key_ctx); chtls_rxkey_ivauth(kctx); } else { kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size, - CHCR_KEYCTX_MAC_KEY_SIZE_128, + kctx_mackey_size, 0, 0, key_ctx_size >> 4); } - memcpy(kctx->salt, gcm_ctx->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); - memcpy(kctx->key, gcm_ctx->key, keylen); + memcpy(kctx->salt, salt, salt_size); + memcpy(kctx->key, key_p, keylen); memcpy(kctx->key + keylen, ghash_h, AEAD_H_SIZE); /* erase key info from driver */ - memset(gcm_ctx->key, 0, keylen); + memset(key_p, 0, keylen); return 0; } @@ -288,7 +313,8 @@ static void chtls_set_scmd(struct chtls_sock *csk) SCMD_TLS_FRAG_ENABLE_V(1); } -int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) +int chtls_setkey(struct chtls_sock *csk, u32 keylen, + u32 optname, int cipher_type) { struct tls_key_req *kwr; struct chtls_dev *cdev; @@ -350,9 +376,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM)); kwr->sc_imm.len = cpu_to_be32(klen); + lock_sock(sk); /* key info */ kctx = (struct _key_ctx *)(kwr + 1); - ret = chtls_key_info(csk, kctx, keylen, optname); + ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type); if (ret) goto out_notcb; @@ -388,8 +415,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname) csk->tlshws.txkey = keyid; } + release_sock(sk); return ret; out_notcb: + release_sock(sk); free_tls_keyid(sk); out_nokey: kfree_skb(skb); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 18996935d8ba..a038de90b2ea 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -84,7 +84,6 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb) static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) { struct chtls_listen *clisten; - int err; if (sk->sk_protocol != IPPROTO_TCP) return -EPROTONOSUPPORT; @@ -100,10 +99,10 @@ static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) clisten->cdev = cdev; clisten->sk = sk; mutex_lock(¬ify_mutex); - err = raw_notifier_call_chain(&listen_notify_list, + raw_notifier_call_chain(&listen_notify_list, CHTLS_LISTEN_START, clisten); mutex_unlock(¬ify_mutex); - return err; + return 0; } static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk) @@ -486,6 +485,7 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, struct tls_crypto_info *crypto_info, tmp_crypto_info; struct chtls_sock *csk; int keylen; + int cipher_type; int rc = 0; csk = rcu_dereference_sk_user_data(sk); @@ -509,6 +509,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info; + /* GCM mode of AES supports 128 and 256 bit encryption, so + * copy keys from user based on GCM cipher type. + */ switch (tmp_crypto_info.cipher_type) { case TLS_CIPHER_AES_GCM_128: { /* Obtain version and type from previous copy */ @@ -525,13 +528,30 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, } keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE; - rc = chtls_setkey(csk, keylen, optname); + cipher_type = TLS_CIPHER_AES_GCM_128; + break; + } + case TLS_CIPHER_AES_GCM_256: { + crypto_info[0] = tmp_crypto_info; + rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), + optval + sizeof(*crypto_info), + sizeof(struct tls12_crypto_info_aes_gcm_256) + - sizeof(*crypto_info)); + + if (rc) { + rc = -EFAULT; + goto out; + } + + keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE; + cipher_type = TLS_CIPHER_AES_GCM_256; break; } default: rc = -EINVAL; goto out; } + rc = chtls_setkey(csk, keylen, optname, cipher_type); out: return rc; } |