diff options
Diffstat (limited to 'crypto')
61 files changed, 1152 insertions, 776 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index c24a47406f8f..1b57419fa2e7 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -316,7 +316,6 @@ config CRYPTO_AEGIS128 config CRYPTO_AEGIS128_SIMD bool "Support SIMD acceleration for AEGIS-128" depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON) - depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800 default y config CRYPTO_AEGIS128_AESNI_SSE2 @@ -370,7 +369,6 @@ config CRYPTO_CFB config CRYPTO_CTR tristate "CTR support" select CRYPTO_SKCIPHER - select CRYPTO_SEQIV select CRYPTO_MANAGER help CTR: Counter mode @@ -550,7 +548,7 @@ config CRYPTO_XCBC select CRYPTO_MANAGER help XCBC: Keyed-Hashing with encryption algorithm - http://www.ietf.org/rfc/rfc3566.txt + https://www.ietf.org/rfc/rfc3566.txt http://csrc.nist.gov/encryption/modes/proposedmodes/ xcbc-mac/xcbc-mac-spec.pdf @@ -563,7 +561,7 @@ config CRYPTO_VMAC very high speed on 64-bit architectures. See also: - <http://fastcrypto.org/vmac> + <https://fastcrypto.org/vmac> comment "Digest" @@ -818,7 +816,7 @@ config CRYPTO_RMD128 RIPEMD-160 should be used. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> config CRYPTO_RMD160 tristate "RIPEMD-160 digest algorithm" @@ -835,7 +833,7 @@ config CRYPTO_RMD160 against RIPEMD-160. Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> config CRYPTO_RMD256 tristate "RIPEMD-256 digest algorithm" @@ -847,7 +845,7 @@ config CRYPTO_RMD256 (than RIPEMD-128). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> config CRYPTO_RMD320 tristate "RIPEMD-320 digest algorithm" @@ -859,7 +857,7 @@ config CRYPTO_RMD320 (than RIPEMD-160). Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. - See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> + See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html> config CRYPTO_SHA1 tristate "SHA1 digest algorithm" @@ -1047,7 +1045,7 @@ config CRYPTO_TGR192 Tiger was developed by Ross Anderson and Eli Biham. See also: - <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>. + <https://www.cs.technion.ac.il/~biham/Reports/Tiger/>. config CRYPTO_WP512 tristate "Whirlpool digest algorithms" @@ -1223,7 +1221,7 @@ config CRYPTO_BLOWFISH designed for use on "large microprocessors". See also: - <http://www.schneier.com/blowfish.html> + <https://www.schneier.com/blowfish.html> config CRYPTO_BLOWFISH_COMMON tristate @@ -1232,7 +1230,7 @@ config CRYPTO_BLOWFISH_COMMON generic c and the assembler implementations. See also: - <http://www.schneier.com/blowfish.html> + <https://www.schneier.com/blowfish.html> config CRYPTO_BLOWFISH_X86_64 tristate "Blowfish cipher algorithm (x86_64)" @@ -1247,7 +1245,7 @@ config CRYPTO_BLOWFISH_X86_64 designed for use on "large microprocessors". See also: - <http://www.schneier.com/blowfish.html> + <https://www.schneier.com/blowfish.html> config CRYPTO_CAMELLIA tristate "Camellia cipher algorithms" @@ -1443,10 +1441,10 @@ config CRYPTO_SALSA20 Salsa20 stream cipher algorithm. Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT - Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/> + Stream Cipher Project. See <https://www.ecrypt.eu.org/stream/> The Salsa20 stream cipher algorithm is designed by Daniel J. - Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html> + Bernstein <djb@cr.yp.to>. See <https://cr.yp.to/snuffle.html> config CRYPTO_CHACHA20 tristate "ChaCha stream cipher algorithms" @@ -1458,7 +1456,7 @@ config CRYPTO_CHACHA20 ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J. Bernstein and further specified in RFC7539 for use in IETF protocols. This is the portable C implementation of ChaCha20. See also: - <http://cr.yp.to/chacha/chacha-20080128.pdf> + <https://cr.yp.to/chacha/chacha-20080128.pdf> XChaCha20 is the application of the XSalsa20 construction to ChaCha20 rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length @@ -1511,7 +1509,7 @@ config CRYPTO_SERPENT variant of Serpent for compatibility with old kerneli.org code. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SERPENT_SSE2_X86_64 tristate "Serpent cipher algorithm (x86_64/SSE2)" @@ -1530,7 +1528,7 @@ config CRYPTO_SERPENT_SSE2_X86_64 blocks parallel using SSE2 instruction set. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SERPENT_SSE2_586 tristate "Serpent cipher algorithm (i586/SSE2)" @@ -1549,7 +1547,7 @@ config CRYPTO_SERPENT_SSE2_586 blocks parallel using SSE2 instruction set. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SERPENT_AVX_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX)" @@ -1569,7 +1567,7 @@ config CRYPTO_SERPENT_AVX_X86_64 eight blocks parallel using the AVX instruction set. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SERPENT_AVX2_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX2)" @@ -1585,7 +1583,7 @@ config CRYPTO_SERPENT_AVX2_X86_64 blocks parallel using AVX2 instruction set. See also: - <http://www.cl.cam.ac.uk/~rja14/serpent.html> + <https://www.cl.cam.ac.uk/~rja14/serpent.html> config CRYPTO_SM4 tristate "SM4 cipher algorithm" @@ -1642,7 +1640,7 @@ config CRYPTO_TWOFISH bits. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> config CRYPTO_TWOFISH_COMMON tristate @@ -1664,7 +1662,7 @@ config CRYPTO_TWOFISH_586 bits. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> config CRYPTO_TWOFISH_X86_64 tristate "Twofish cipher algorithm (x86_64)" @@ -1680,7 +1678,7 @@ config CRYPTO_TWOFISH_X86_64 bits. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> config CRYPTO_TWOFISH_X86_64_3WAY tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" @@ -1701,7 +1699,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY blocks parallel, utilizing resources of out-of-order CPUs better. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> config CRYPTO_TWOFISH_AVX_X86_64 tristate "Twofish cipher algorithm (x86_64/AVX)" @@ -1724,7 +1722,7 @@ config CRYPTO_TWOFISH_AVX_X86_64 eight blocks parallel using the AVX Instruction Set. See also: - <http://www.schneier.com/twofish.html> + <https://www.schneier.com/twofish.html> comment "Compression" @@ -1820,7 +1818,7 @@ config CRYPTO_DRBG_HASH config CRYPTO_DRBG_CTR bool "Enable CTR DRBG" select CRYPTO_AES - depends on CRYPTO_CTR + select CRYPTO_CTR help Enable the CTR DRBG variant as defined in NIST SP800-90A. diff --git a/crypto/acompress.c b/crypto/acompress.c index 84a76723e851..c32c72048a1c 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_acomp); +struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, + u32 mask, int node) +{ + return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask, + node); +} +EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); + struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) { struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); diff --git a/crypto/adiantum.c b/crypto/adiantum.c index cf2b9f4103dd..ce4d5725342c 100644 --- a/crypto/adiantum.c +++ b/crypto/adiantum.c @@ -177,7 +177,7 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key, keyp += NHPOLY1305_KEY_SIZE; WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]); out: - kzfree(data); + kfree_sensitive(data); return err; } @@ -490,7 +490,6 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg, static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; const char *nhpoly1305_name; struct skcipher_instance *inst; @@ -500,14 +499,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_alg *hash_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -565,8 +559,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags & - CRYPTO_ALG_ASYNC; inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx); inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask | diff --git a/crypto/af_alg.c b/crypto/af_alg.c index b1cd3535c525..8be8bec07cdd 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/net.h> #include <linux/rwsem.h> +#include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/security.h> @@ -128,21 +129,15 @@ EXPORT_SYMBOL_GPL(af_alg_release); void af_alg_release_parent(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); - unsigned int nokey = ask->nokey_refcnt; - bool last = nokey && !ask->refcnt; + unsigned int nokey = atomic_read(&ask->nokey_refcnt); sk = ask->parent; ask = alg_sk(sk); - local_bh_disable(); - bh_lock_sock(sk); - ask->nokey_refcnt -= nokey; - if (!last) - last = !--ask->refcnt; - bh_unlock_sock(sk); - local_bh_enable(); + if (nokey) + atomic_dec(&ask->nokey_refcnt); - if (last) + if (atomic_dec_and_test(&ask->refcnt)) sock_put(sk); } EXPORT_SYMBOL_GPL(af_alg_release_parent); @@ -187,7 +182,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = -EBUSY; lock_sock(sk); - if (ask->refcnt | ask->nokey_refcnt) + if (atomic_read(&ask->refcnt)) goto unlock; swap(ask->type, type); @@ -203,8 +198,7 @@ unlock: return err; } -static int alg_setkey(struct sock *sk, char __user *ukey, - unsigned int keylen) +static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen) { struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type = ask->type; @@ -216,7 +210,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey, return -ENOMEM; err = -EFAULT; - if (copy_from_user(key, ukey, keylen)) + if (copy_from_sockptr(key, ukey, keylen)) goto out; err = type->setkey(ask->private, key, keylen); @@ -228,7 +222,7 @@ out: } static int alg_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -236,7 +230,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, int err = -EBUSY; lock_sock(sk); - if (ask->refcnt) + if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) goto unlock; type = ask->type; @@ -301,12 +295,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) if (err) goto unlock; - if (nokey || !ask->refcnt++) + if (atomic_inc_return_relaxed(&ask->refcnt) == 1) sock_hold(sk); - ask->nokey_refcnt += nokey; + if (nokey) { + atomic_inc(&ask->nokey_refcnt); + atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); + } alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; - alg_sk(sk2)->nokey_refcnt = nokey; newsock->ops = type->ops; newsock->state = SS_CONNECTED; @@ -339,7 +335,6 @@ static const struct proto_ops alg_proto_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, .sendmsg = sock_no_sendmsg, @@ -639,6 +634,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, if (!ctx->used) ctx->merge = 0; + ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); @@ -738,9 +734,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); * * @sk socket of connection to user space * @flags If MSG_DONTWAIT is set, then only report if function would sleep + * @min Set to minimum request size if partial requests are allowed. * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_data(struct sock *sk, unsigned flags) +int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); @@ -758,7 +755,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags) if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; - if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), + if (sk_wait_event(sk, &timeout, + ctx->init && (!ctx->more || + (min && ctx->used >= min)), &wait)) { err = 0; break; @@ -847,10 +846,17 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (!ctx->more && ctx->used) { - err = -EINVAL; - goto unlock; + if (ctx->init && !ctx->more) { + if (ctx->used) { + err = -EINVAL; + goto unlock; + } + + pr_info_once( + "%s sent an empty control message without MSG_MORE.\n", + current->comm); } + ctx->init = true; if (init) { ctx->enc = enc; diff --git a/crypto/ahash.c b/crypto/ahash.c index 68a0f0cb75c4..d9d65d1cc669 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -183,7 +183,7 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = tfm->setkey(tfm, alignbuffer, keylen); - kzfree(buffer); + kfree_sensitive(buffer); return ret; } @@ -302,7 +302,7 @@ static void ahash_restore_req(struct ahash_request *req, int err) req->priv = NULL; /* Free the req->priv.priv from the ADJUSTED request. */ - kzfree(priv); + kfree_sensitive(priv); } static void ahash_notify_einprogress(struct ahash_request *req) diff --git a/crypto/algapi.c b/crypto/algapi.c index 69605e21af92..fdabf2675b63 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -403,7 +403,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval) err = wait_for_completion_killable(&larval->completion); WARN_ON(err); if (!err) - crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval); + crypto_notify(CRYPTO_MSG_ALG_LOADED, larval); out: crypto_larval_kill(&larval->alg); @@ -690,6 +690,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, spawn->mask = mask; spawn->next = inst->spawns; inst->spawns = spawn; + inst->alg.cra_flags |= + (alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS); err = 0; } up_write(&crypto_alg_sem); @@ -716,17 +718,27 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn); static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) { - struct crypto_alg *alg; + struct crypto_alg *alg = ERR_PTR(-EAGAIN); + struct crypto_alg *target; + bool shoot = false; down_read(&crypto_alg_sem); - alg = spawn->alg; - if (!spawn->dead && !crypto_mod_get(alg)) { - alg->cra_flags |= CRYPTO_ALG_DYING; - alg = NULL; + if (!spawn->dead) { + alg = spawn->alg; + if (!crypto_mod_get(alg)) { + target = crypto_alg_get(alg); + shoot = true; + alg = ERR_PTR(-EAGAIN); + } } up_read(&crypto_alg_sem); - return alg ?: ERR_PTR(-EAGAIN); + if (shoot) { + crypto_shoot_alg(target); + crypto_alg_put(target); + } + + return alg; } struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, @@ -806,7 +818,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) } EXPORT_SYMBOL_GPL(crypto_get_attr_type); -int crypto_check_attr_type(struct rtattr **tb, u32 type) +/** + * crypto_check_attr_type() - check algorithm type and compute inherited mask + * @tb: the template parameters + * @type: the algorithm type the template would be instantiated as + * @mask_ret: (output) the mask that should be passed to crypto_grab_*() + * to restrict the flags of any inner algorithms + * + * Validate that the algorithm type the user requested is compatible with the + * one the template would actually be instantiated as. E.g., if the user is + * doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because + * the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm. + * + * Also compute the mask to use to restrict the flags of any inner algorithms. + * + * Return: 0 on success; -errno on failure + */ +int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret) { struct crypto_attr_type *algt; @@ -817,6 +845,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type) if ((algt->type ^ type) & algt->mask) return -EINVAL; + *mask_ret = crypto_algt_inherited_mask(algt); return 0; } EXPORT_SYMBOL_GPL(crypto_check_attr_type); @@ -904,6 +933,14 @@ out: } EXPORT_SYMBOL_GPL(crypto_enqueue_request); +void crypto_enqueue_request_head(struct crypto_queue *queue, + struct crypto_async_request *request) +{ + queue->qlen++; + list_add(&request->list, &queue->list); +} +EXPORT_SYMBOL_GPL(crypto_enqueue_request_head); + struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) { struct list_head *request; diff --git a/crypto/algboss.c b/crypto/algboss.c index 535f1f87e6c1..5ebccbd6b74e 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -178,8 +178,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) if (IS_ERR(thread)) goto err_put_larval; - wait_for_completion_interruptible(&larval->completion); - return NOTIFY_STOP; err_put_larval: diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index eb1910b6d434..21efa786f09c 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || ctx->more) { + err = af_alg_wait_for_data(sk, flags, 0); if (err) return err; } @@ -361,11 +361,9 @@ static struct proto_ops algif_aead_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = aead_sendmsg, @@ -384,7 +382,7 @@ static int aead_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -396,11 +394,8 @@ static int aead_check_key(struct socket *sock) if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -457,11 +452,9 @@ static struct proto_ops algif_aead_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = aead_sendmsg_nokey, @@ -561,12 +554,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; - ctx->aead_assoclen = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index da1ffa4f7f8d..50f7b22f1b48 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -279,10 +279,8 @@ static struct proto_ops algif_hash_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = hash_sendmsg, @@ -301,7 +299,7 @@ static int hash_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -313,11 +311,8 @@ static int hash_check_key(struct socket *sock) if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -386,10 +381,8 @@ static struct proto_ops algif_hash_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = hash_sendmsg_nokey, diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 22df3799a17b..6300e0566dc5 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -61,7 +61,7 @@ static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct rng_ctx *ctx = ask->private; - int err = -EFAULT; + int err; int genlen = 0; u8 result[MAXSIZE]; @@ -101,11 +101,9 @@ static struct proto_ops algif_rng_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .sendmsg = sock_no_sendmsg, .sendpage = sock_no_sendpage, diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index e2c8ab408bed..478f3b8f5bd5 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); + if (!ctx->init || (ctx->more && ctx->used < bs)) { + err = af_alg_wait_for_data(sk, flags, bs); if (err) return err; } @@ -74,14 +74,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, return PTR_ERR(areq); /* convert iovecs of output buffers into RX SGL */ - err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); + err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len); if (err) goto free; - /* Process only as much RX buffers for which we have TX data */ - if (len > ctx->used) - len = ctx->used; - /* * If more buffers are to be expected to be processed, process only * full block size buffers. @@ -192,11 +188,9 @@ static struct proto_ops algif_skcipher_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg, @@ -215,7 +209,7 @@ static int skcipher_check_key(struct socket *sock) struct alg_sock *ask = alg_sk(sk); lock_sock(sk); - if (ask->refcnt) + if (!atomic_read(&ask->nokey_refcnt)) goto unlock_child; psk = ask->parent; @@ -227,11 +221,8 @@ static int skcipher_check_key(struct socket *sock) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; - if (!pask->refcnt++) - sock_hold(psk); - - ask->refcnt = 1; - sock_put(psk); + atomic_dec(&pask->nokey_refcnt); + atomic_set(&ask->nokey_refcnt, 0); err = 0; @@ -288,11 +279,9 @@ static struct proto_ops algif_skcipher_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg_nokey, @@ -340,6 +329,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; + memset(ctx, 0, len); ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm), GFP_KERNEL); @@ -347,16 +337,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) sock_kfree_s(sk, ctx, len); return -ENOMEM; } - memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm)); INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; - ctx->used = 0; - atomic_set(&ctx->rcvused, 0); - ctx->more = 0; - ctx->merge = 0; - ctx->enc = 0; crypto_init_wait(&ctx->wait); ask->private = ctx; diff --git a/crypto/api.c b/crypto/api.c index 7d71a9b10e5f..ed08cbd5b9d3 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -333,12 +333,13 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) return len; } -static void crypto_shoot_alg(struct crypto_alg *alg) +void crypto_shoot_alg(struct crypto_alg *alg) { down_write(&crypto_alg_sem); alg->cra_flags |= CRYPTO_ALG_DYING; up_write(&crypto_alg_sem); } +EXPORT_SYMBOL_GPL(crypto_shoot_alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask) @@ -432,8 +433,9 @@ err: } EXPORT_SYMBOL_GPL(crypto_alloc_base); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend) +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, + int node) { char *mem; struct crypto_tfm *tfm = NULL; @@ -444,12 +446,13 @@ void *crypto_create_tfm(struct crypto_alg *alg, tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); - mem = kzalloc(total, GFP_KERNEL); + mem = kzalloc_node(total, GFP_KERNEL, node); if (mem == NULL) goto out_err; tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; + tfm->node = node; err = frontend->init_tfm(tfm); if (err) @@ -471,7 +474,7 @@ out_err: out: return mem; } -EXPORT_SYMBOL_GPL(crypto_create_tfm); +EXPORT_SYMBOL_GPL(crypto_create_tfm_node); struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, @@ -489,11 +492,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name, EXPORT_SYMBOL_GPL(crypto_find_alg); /* - * crypto_alloc_tfm - Locate algorithm and allocate transform + * crypto_alloc_tfm_node - Locate algorithm and allocate transform * @alg_name: Name of algorithm * @frontend: Frontend algorithm type * @type: Type of algorithm * @mask: Mask for type comparison + * @node: NUMA node in which users desire to put requests, if node is + * NUMA_NO_NODE, it means users have no special requirement. * * crypto_alloc_tfm() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable @@ -508,8 +513,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg); * * In case of error the return value is an error pointer. */ -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask) + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node) { void *tfm; int err; @@ -523,7 +530,7 @@ void *crypto_alloc_tfm(const char *alg_name, goto err; } - tfm = crypto_create_tfm(alg, frontend); + tfm = crypto_create_tfm_node(alg, frontend, node); if (!IS_ERR(tfm)) return tfm; @@ -541,7 +548,7 @@ err: return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(crypto_alloc_tfm); +EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node); /* * crypto_destroy_tfm - Free crypto transform @@ -564,7 +571,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) alg->cra_exit(tfm); crypto_exit_ops(tfm); crypto_mod_put(alg); - kzfree(mem); + kfree_sensitive(mem); } EXPORT_SYMBOL_GPL(crypto_destroy_tfm); diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index 6e5fc8e31f01..33e77d846caa 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Asymmetric public-key cryptography key type * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index d7f43d4ea925..d8410ffd7f12 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* In-software asymmetric public-key crypto subtype * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -119,6 +119,7 @@ static int software_key_query(const struct kernel_pkey_params *params, if (IS_ERR(tfm)) return PTR_ERR(tfm); + ret = -ENOMEM; key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index e24a031db1e4..4aff3eebec17 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* Signature verification with an asymmetric key * - * See Documentation/crypto/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.rst * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c index cc9dbcecaaca..7553ab18db89 100644 --- a/crypto/asymmetric_keys/verify_pefile.c +++ b/crypto/asymmetric_keys/verify_pefile.c @@ -376,7 +376,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, } error: - kzfree(desc); + kfree_sensitive(desc); error_no_desc: crypto_free_shash(tfm); kleave(" = %d", ret); @@ -447,6 +447,6 @@ int verify_pefile_signature(const void *pebuf, unsigned pelen, ret = pefile_digest_pe(pebuf, pelen, &ctx); error: - kzfree(ctx.digest); + kfree_sensitive(ctx.digest); return ret; } diff --git a/crypto/authenc.c b/crypto/authenc.c index 775e7138fd10..670bf1a01d00 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -372,7 +372,6 @@ static void crypto_authenc_free(struct aead_instance *inst) static int crypto_authenc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_instance_ctx *ctx; @@ -381,14 +380,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -423,8 +417,6 @@ static int crypto_authenc_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 149b70df2a91..b60e61b1904c 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -390,7 +390,6 @@ static void crypto_authenc_esn_free(struct aead_instance *inst) static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct authenc_esn_instance_ctx *ctx; @@ -399,14 +398,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, struct skcipher_alg *enc; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -437,8 +431,6 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl, enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (auth_base->cra_flags | - enc->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = enc->base.cra_priority * 10 + auth_base->cra_priority; inst->alg.base.cra_blocksize = enc->base.cra_blocksize; diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 1d262374fa4e..a2ffe60e06d3 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -8,7 +8,7 @@ * * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 * - OpenSSL license : https://www.openssl.org/source/license.html - * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 + * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 * * More information about the BLAKE2 hash function can be found at * https://blake2.net. @@ -129,7 +129,9 @@ static void blake2b_compress(struct blake2b_state *S, ROUND(9); ROUND(10); ROUND(11); - +#ifdef CONFIG_CC_IS_CLANG +#pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */ +#endif for (i = 0; i < 8; ++i) S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; } diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 9a5783e5196a..0b9f409f7370 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -6,7 +6,7 @@ /* * Algorithm Specification - * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html + * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html */ /* diff --git a/crypto/ccm.c b/crypto/ccm.c index d1fb01bbc814..494d70901186 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -447,7 +447,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *mac_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct ccm_instance_ctx *ictx; @@ -455,14 +454,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, struct hash_alg_common *mac; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); if (!inst) @@ -470,7 +464,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, ictx = aead_instance_ctx(inst); err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst), - mac_name, 0, CRYPTO_ALG_ASYNC); + mac_name, 0, mask | CRYPTO_ALG_ASYNC); if (err) goto err_free_inst; mac = crypto_spawn_ahash_alg(&ictx->mac); @@ -507,7 +501,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl, mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (mac->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -712,21 +705,15 @@ static void crypto_rfc4309_free(struct aead_instance *inst) static int crypto_rfc4309_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -759,7 +746,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -878,9 +864,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -890,7 +877,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index ccaea5cb66d1..97bbb135e9a6 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -555,7 +555,6 @@ static void chachapoly_free(struct aead_instance *inst) static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, const char *name, unsigned int ivsize) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; @@ -566,14 +565,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, if (ivsize > CHACHAPOLY_IV_SIZE) return -EINVAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -613,8 +607,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (chacha->base.cra_flags | - poly->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; diff --git a/crypto/cmac.c b/crypto/cmac.c index 143a6544c873..df36be1efb81 100644 --- a/crypto/cmac.c +++ b/crypto/cmac.c @@ -225,9 +225,10 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -237,7 +238,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 283212262adb..a1bea0f4baa8 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -191,17 +191,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) return ictx->queue; } -static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, - u32 *mask) +static void cryptd_type_and_mask(struct crypto_attr_type *algt, + u32 *type, u32 *mask) { - struct crypto_attr_type *algt; + /* + * cryptd is allowed to wrap internal algorithms, but in that case the + * resulting cryptd instance will be marked as internal as well. + */ + *type = algt->type & CRYPTO_ALG_INTERNAL; + *mask = algt->mask & CRYPTO_ALG_INTERNAL; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return; + /* No point in cryptd wrapping an algorithm that's already async. */ + *mask |= CRYPTO_ALG_ASYNC; - *type |= algt->type & CRYPTO_ALG_INTERNAL; - *mask |= algt->mask & CRYPTO_ALG_INTERNAL; + *mask |= crypto_algt_inherited_mask(algt); } static int cryptd_init_instance(struct crypto_instance *inst, @@ -364,6 +367,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst) static int cryptd_create_skcipher(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct skcipherd_instance_ctx *ctx; @@ -373,10 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, u32 mask; int err; - type = 0; - mask = CRYPTO_ALG_ASYNC; - - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -395,9 +396,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); - + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); @@ -633,16 +633,17 @@ static void cryptd_hash_free(struct ahash_instance *inst) } static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct hashd_instance_ctx *ctx; struct ahash_instance *inst; struct shash_alg *alg; - u32 type = 0; - u32 mask = 0; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -661,10 +662,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL | + inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL| CRYPTO_ALG_OPTIONAL_KEY)); - inst->alg.halg.digestsize = alg->digestsize; inst->alg.halg.statesize = alg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); @@ -820,16 +820,17 @@ static void cryptd_aead_free(struct aead_instance *inst) static int cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_attr_type *algt, struct cryptd_queue *queue) { struct aead_instance_ctx *ctx; struct aead_instance *inst; struct aead_alg *alg; - u32 type = 0; - u32 mask = CRYPTO_ALG_ASYNC; + u32 type; + u32 mask; int err; - cryptd_check_internal(tb, &type, &mask); + cryptd_type_and_mask(algt, &type, &mask); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -848,8 +849,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | - (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC | + (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); inst->alg.ivsize = crypto_aead_alg_ivsize(alg); @@ -884,11 +885,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_SKCIPHER: - return cryptd_create_skcipher(tmpl, tb, &queue); + return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: - return cryptd_create_hash(tmpl, tb, &queue); + return cryptd_create_hash(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_AEAD: - return cryptd_create_aead(tmpl, tb, &queue); + return cryptd_create_aead(tmpl, tb, algt, &queue); } return -EINVAL; diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index eb029ff1e05a..198a8eb1cd56 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -22,32 +22,36 @@ * @err: error number */ static void crypto_finalize_request(struct crypto_engine *engine, - struct crypto_async_request *req, int err) + struct crypto_async_request *req, int err) { unsigned long flags; - bool finalize_cur_req = false; + bool finalize_req = false; int ret; struct crypto_engine_ctx *enginectx; - spin_lock_irqsave(&engine->queue_lock, flags); - if (engine->cur_req == req) - finalize_cur_req = true; - spin_unlock_irqrestore(&engine->queue_lock, flags); + /* + * If hardware cannot enqueue more requests + * and retry mechanism is not supported + * make sure we are completing the current request + */ + if (!engine->retry_support) { + spin_lock_irqsave(&engine->queue_lock, flags); + if (engine->cur_req == req) { + finalize_req = true; + engine->cur_req = NULL; + } + spin_unlock_irqrestore(&engine->queue_lock, flags); + } - if (finalize_cur_req) { + if (finalize_req || engine->retry_support) { enginectx = crypto_tfm_ctx(req->tfm); - if (engine->cur_req_prepared && + if (enginectx->op.prepare_request && enginectx->op.unprepare_request) { ret = enginectx->op.unprepare_request(engine, req); if (ret) dev_err(engine->dev, "failed to unprepare request\n"); } - spin_lock_irqsave(&engine->queue_lock, flags); - engine->cur_req = NULL; - engine->cur_req_prepared = false; - spin_unlock_irqrestore(&engine->queue_lock, flags); } - req->complete(req, err); kthread_queue_work(engine->kworker, &engine->pump_requests); @@ -74,7 +78,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, spin_lock_irqsave(&engine->queue_lock, flags); /* Make sure we are not already running a request */ - if (engine->cur_req) + if (!engine->retry_support && engine->cur_req) goto out; /* If another context is idling then defer */ @@ -108,13 +112,21 @@ static void crypto_pump_requests(struct crypto_engine *engine, goto out; } +start_request: /* Get the fist request from the engine queue to handle */ backlog = crypto_get_backlog(&engine->queue); async_req = crypto_dequeue_request(&engine->queue); if (!async_req) goto out; - engine->cur_req = async_req; + /* + * If hardware doesn't support the retry mechanism, + * keep track of the request we are processing now. + * We'll need it on completion (crypto_finalize_request). + */ + if (!engine->retry_support) + engine->cur_req = async_req; + if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -130,7 +142,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, ret = engine->prepare_crypt_hardware(engine); if (ret) { dev_err(engine->dev, "failed to prepare crypt hardware\n"); - goto req_err; + goto req_err_2; } } @@ -141,28 +153,90 @@ static void crypto_pump_requests(struct crypto_engine *engine, if (ret) { dev_err(engine->dev, "failed to prepare request: %d\n", ret); - goto req_err; + goto req_err_2; } - engine->cur_req_prepared = true; } if (!enginectx->op.do_one_request) { dev_err(engine->dev, "failed to do request\n"); ret = -EINVAL; - goto req_err; + goto req_err_1; } + ret = enginectx->op.do_one_request(engine, async_req); - if (ret) { - dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret); - goto req_err; + + /* Request unsuccessfully executed by hardware */ + if (ret < 0) { + /* + * If hardware queue is full (-ENOSPC), requeue request + * regardless of backlog flag. + * Otherwise, unprepare and complete the request. + */ + if (!engine->retry_support || + (ret != -ENOSPC)) { + dev_err(engine->dev, + "Failed to do one request from queue: %d\n", + ret); + goto req_err_1; + } + /* + * If retry mechanism is supported, + * unprepare current request and + * enqueue it back into crypto-engine queue. + */ + if (enginectx->op.unprepare_request) { + ret = enginectx->op.unprepare_request(engine, + async_req); + if (ret) + dev_err(engine->dev, + "failed to unprepare request\n"); + } + spin_lock_irqsave(&engine->queue_lock, flags); + /* + * If hardware was unable to execute request, enqueue it + * back in front of crypto-engine queue, to keep the order + * of requests. + */ + crypto_enqueue_request_head(&engine->queue, async_req); + + kthread_queue_work(engine->kworker, &engine->pump_requests); + goto out; } - return; -req_err: - crypto_finalize_request(engine, async_req, ret); + goto retry; + +req_err_1: + if (enginectx->op.unprepare_request) { + ret = enginectx->op.unprepare_request(engine, async_req); + if (ret) + dev_err(engine->dev, "failed to unprepare request\n"); + } + +req_err_2: + async_req->complete(async_req, ret); + +retry: + /* If retry mechanism is supported, send new requests to engine */ + if (engine->retry_support) { + spin_lock_irqsave(&engine->queue_lock, flags); + goto start_request; + } return; out: spin_unlock_irqrestore(&engine->queue_lock, flags); + + /* + * Batch requests is possible only if + * hardware can enqueue multiple requests + */ + if (engine->do_batch_requests) { + ret = engine->do_batch_requests(engine); + if (ret) + dev_err(engine->dev, "failed to do batch requests: %d\n", + ret); + } + + return; } static void crypto_pump_work(struct kthread_work *work) @@ -386,17 +460,28 @@ int crypto_engine_stop(struct crypto_engine *engine) EXPORT_SYMBOL_GPL(crypto_engine_stop); /** - * crypto_engine_alloc_init - allocate crypto hardware engine structure and - * initialize it. + * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure + * and initialize it by setting the maximum number of entries in the software + * crypto-engine queue. * @dev: the device attached with one hardware engine + * @retry_support: whether hardware has support for retry mechanism + * @cbk_do_batch: pointer to a callback function to be invoked when executing a + * a batch of requests. + * This has the form: + * callback(struct crypto_engine *engine) + * where: + * @engine: the crypto engine structure. * @rt: whether this queue is set to run as a realtime task + * @qlen: maximum size of the crypto-engine queue * * This must be called from context that can sleep. * Return: the crypto engine structure on success, else NULL. */ -struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) +struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, + bool retry_support, + int (*cbk_do_batch)(struct crypto_engine *engine), + bool rt, int qlen) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; struct crypto_engine *engine; if (!dev) @@ -411,12 +496,18 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) engine->running = false; engine->busy = false; engine->idling = false; - engine->cur_req_prepared = false; + engine->retry_support = retry_support; engine->priv_data = dev; + /* + * Batch requests is possible only if + * hardware has support for retry mechanism. + */ + engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; + snprintf(engine->name, sizeof(engine->name), "%s-engine", dev_name(dev)); - crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); + crypto_init_queue(&engine->queue, qlen); spin_lock_init(&engine->queue_lock); engine->kworker = kthread_create_worker(0, "%s", engine->name); @@ -428,11 +519,27 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) if (engine->rt) { dev_info(dev, "will run requests pump with realtime priority\n"); - sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); + sched_set_fifo(engine->kworker->task); } return engine; } +EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); + +/** + * crypto_engine_alloc_init - allocate crypto hardware engine structure and + * initialize it. + * @dev: the device attached with one hardware engine + * @rt: whether this queue is set to run as a realtime task + * + * This must be called from context that can sleep. + * Return: the crypto engine structure on success, else NULL. + */ +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) +{ + return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, + CRYPTO_ENGINE_MAX_QLEN); +} EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); /** diff --git a/crypto/ctr.c b/crypto/ctr.c index 31ac4ae598e1..c39fcffba27f 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -256,29 +256,20 @@ static void crypto_rfc3686_free(struct skcipher_instance *inst) static int crypto_rfc3686_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; struct skcipher_instance *inst; struct skcipher_alg *alg; struct crypto_skcipher_spawn *spawn; u32 mask; - int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; - mask = crypto_requires_sync(algt->type, algt->mask) | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - spawn = skcipher_instance_ctx(inst); err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), @@ -310,8 +301,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl, inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.ivsize = CTR_RFC3686_IV_SIZE; inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + diff --git a/crypto/cts.c b/crypto/cts.c index 5e005c4f0221..3766d47ebcc0 100644 --- a/crypto/cts.c +++ b/crypto/cts.c @@ -325,19 +325,13 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -364,7 +358,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/deflate.c b/crypto/deflate.c index 4c0e6c9d942a..b2a46f6dc961 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -163,7 +163,7 @@ static void __deflate_exit(void *ctx) static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) { __deflate_exit(ctx); - kzfree(ctx); + kfree_sensitive(ctx); } static void deflate_exit(struct crypto_tfm *tfm) diff --git a/crypto/dh.c b/crypto/dh.c index 566f624a2de2..cd4f32092e5c 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -9,6 +9,7 @@ #include <crypto/internal/kpp.h> #include <crypto/kpp.h> #include <crypto/dh.h> +#include <linux/fips.h> #include <linux/mpi.h> struct dh_ctx { @@ -179,6 +180,43 @@ static int dh_compute_value(struct kpp_request *req) if (ret) goto err_free_base; + if (fips_enabled) { + /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ + if (req->src) { + MPI pone; + + /* z <= 1 */ + if (mpi_cmp_ui(val, 1) < 1) { + ret = -EBADMSG; + goto err_free_base; + } + + /* z == p - 1 */ + pone = mpi_alloc(0); + + if (!pone) { + ret = -ENOMEM; + goto err_free_base; + } + + ret = mpi_sub_ui(pone, ctx->p, 1); + if (!ret && !mpi_cmp(pone, val)) + ret = -EBADMSG; + + mpi_free(pone); + + if (ret) + goto err_free_base; + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + } else { + if (dh_is_pubkey_valid(ctx, val)) { + ret = -EAGAIN; + goto err_free_val; + } + } + } + ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign); if (ret) goto err_free_base; diff --git a/crypto/drbg.c b/crypto/drbg.c index b6929eb5f565..3132967a1749 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1087,10 +1087,6 @@ static void drbg_async_seed(struct work_struct *work) if (ret) goto unlock; - /* If nonblocking pool is initialized, deactivate Jitter RNG */ - crypto_free_rng(drbg->jent); - drbg->jent = NULL; - /* Set seeded to false so that if __drbg_seed fails the * next generate call will trigger a reseed. */ @@ -1168,7 +1164,23 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, entropylen); if (ret) { pr_devel("DRBG: jent failed with %d\n", ret); - goto out; + + /* + * Do not treat the transient failure of the + * Jitter RNG as an error that needs to be + * reported. The combined number of the + * maximum reseed threshold times the maximum + * number of Jitter RNG transient errors is + * less than the reseed threshold required by + * SP800-90A allowing us to treat the + * transient errors as such. + * + * However, we mandate that at least the first + * seeding operation must succeed with the + * Jitter RNG. + */ + if (!reseed || ret != -EAGAIN) + goto out; } drbg_string_fill(&data1, entropy, entropylen * 2); @@ -1206,19 +1218,19 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) { if (!drbg) return; - kzfree(drbg->Vbuf); + kfree_sensitive(drbg->Vbuf); drbg->Vbuf = NULL; drbg->V = NULL; - kzfree(drbg->Cbuf); + kfree_sensitive(drbg->Cbuf); drbg->Cbuf = NULL; drbg->C = NULL; - kzfree(drbg->scratchpadbuf); + kfree_sensitive(drbg->scratchpadbuf); drbg->scratchpadbuf = NULL; drbg->reseed_ctr = 0; drbg->d_ops = NULL; drbg->core = NULL; if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { - kzfree(drbg->prev); + kfree_sensitive(drbg->prev); drbg->prev = NULL; drbg->fips_primed = false; } @@ -1294,8 +1306,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags), GFP_KERNEL); - if (!drbg->prev) + if (!drbg->prev) { + ret = -ENOMEM; goto fini; + } drbg->fips_primed = false; } @@ -1492,6 +1506,8 @@ static int drbg_prepare_hrng(struct drbg_state *drbg) if (list_empty(&drbg->test_data.list)) return 0; + drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); + INIT_WORK(&drbg->seed_work, drbg_async_seed); drbg->random_ready.owner = THIS_MODULE; @@ -1505,15 +1521,13 @@ static int drbg_prepare_hrng(struct drbg_state *drbg) case -EALREADY: err = 0; - /* fall through */ + fallthrough; default: drbg->random_ready.func = NULL; return err; } - drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); - /* * Require frequent reseeds until the seed source is fully * initialized. @@ -1617,10 +1631,12 @@ static int drbg_uninstantiate(struct drbg_state *drbg) if (drbg->random_ready.func) { del_random_ready_callback(&drbg->random_ready); cancel_work_sync(&drbg->seed_work); - crypto_free_rng(drbg->jent); - drbg->jent = NULL; } + if (!IS_ERR_OR_NULL(drbg->jent)) + crypto_free_rng(drbg->jent); + drbg->jent = NULL; + if (drbg->d_ops) drbg->d_ops->crypto_fini(drbg); drbg_dealloc_state(drbg); @@ -1685,7 +1701,7 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg) struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; if (sdesc) { crypto_free_shash(sdesc->shash.tfm); - kzfree(sdesc); + kfree_sensitive(sdesc); } drbg->priv_data = NULL; return 0; diff --git a/crypto/ecc.c b/crypto/ecc.c index 02d35be7702b..c80aa25994a0 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -67,7 +67,7 @@ static u64 *ecc_alloc_digits_space(unsigned int ndigits) static void ecc_free_digits_space(u64 *space) { - kzfree(space); + kfree_sensitive(space); } static struct ecc_point *ecc_alloc_point(unsigned int ndigits) @@ -101,9 +101,9 @@ static void ecc_free_point(struct ecc_point *p) if (!p) return; - kzfree(p->x); - kzfree(p->y); - kzfree(p); + kfree_sensitive(p->x); + kfree_sensitive(p->y); + kfree_sensitive(p); } static void vli_clear(u64 *vli, unsigned int ndigits) @@ -940,7 +940,7 @@ static bool ecc_point_is_zero(const struct ecc_point *point) } /* Point multiplication algorithm using Montgomery's ladder with co-Z - * coordinates. From http://eprint.iacr.org/2011/338.pdf + * coordinates. From https://eprint.iacr.org/2011/338.pdf */ /* Double in place */ @@ -1404,7 +1404,9 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, } ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits); - if (ecc_point_is_zero(pk)) { + + /* SP800-56A rev 3 5.6.2.1.3 key check */ + if (ecc_is_pubkey_valid_full(curve, pk)) { ret = -EAGAIN; goto err_free_point; } @@ -1452,6 +1454,33 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, } EXPORT_SYMBOL(ecc_is_pubkey_valid_partial); +/* SP800-56A section 5.6.2.3.3 full verification */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk) +{ + struct ecc_point *nQ; + + /* Checks 1 through 3 */ + int ret = ecc_is_pubkey_valid_partial(curve, pk); + + if (ret) + return ret; + + /* Check 4: Verify that nQ is the zero point. */ + nQ = ecc_alloc_point(pk->ndigits); + if (!nQ) + return -ENOMEM; + + ecc_point_mult(nQ, pk, curve->n, NULL, curve, pk->ndigits); + if (!ecc_point_is_zero(nQ)) + ret = -EINVAL; + + ecc_free_point(nQ); + + return ret; +} +EXPORT_SYMBOL(ecc_is_pubkey_valid_full); + int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, const u64 *private_key, const u64 *public_key, u64 *secret) @@ -1495,11 +1524,16 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, ecc_point_mult(product, pk, priv, rand_z, curve, ndigits); - ecc_swap_digits(product->x, secret, ndigits); - - if (ecc_point_is_zero(product)) + if (ecc_point_is_zero(product)) { ret = -EFAULT; + goto err_validity; + } + + ecc_swap_digits(product->x, secret, ndigits); +err_validity: + memzero_explicit(priv, sizeof(priv)); + memzero_explicit(rand_z, sizeof(rand_z)); ecc_free_point(product); err_alloc_product: ecc_free_point(pk); diff --git a/crypto/ecc.h b/crypto/ecc.h index ab0eb70b9c09..d4e546b9ad79 100644 --- a/crypto/ecc.h +++ b/crypto/ecc.h @@ -148,6 +148,20 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, struct ecc_point *pk); /** + * ecc_is_pubkey_valid_full() - Full public key validation + * + * @curve: elliptic curve domain parameters + * @pk: public key as a point + * + * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full + * Public-Key Validation Routine. + * + * Return: 0 if validation is successful, -EINVAL if validation is failed. + */ +int ecc_is_pubkey_valid_full(const struct ecc_curve *curve, + struct ecc_point *pk); + +/** * vli_is_zero() - Determine is vli is zero * * @vli: vli to check. diff --git a/crypto/ecdh.c b/crypto/ecdh.c index bd599053a8c4..b0232d6ab4ce 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -124,7 +124,7 @@ static int ecdh_compute_value(struct kpp_request *req) /* fall through */ free_all: - kzfree(shared_secret); + kfree_sensitive(shared_secret); free_pubkey: kfree(public_key); return ret; diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 4a2f02baba14..69686668625e 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c @@ -115,7 +115,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl, struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); diff --git a/crypto/essiv.c b/crypto/essiv.c index 465a89c9d1ef..d012be23d496 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -66,7 +66,6 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); - SHASH_DESC_ON_STACK(desc, tctx->hash); u8 salt[HASH_MAX_DIGESTSIZE]; int err; @@ -78,8 +77,7 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm, if (err) return err; - desc->tfm = tctx->hash; - err = crypto_shash_digest(desc, key, keylen, salt); + err = crypto_shash_tfm_digest(tctx->hash, key, keylen, salt); if (err) return err; @@ -468,7 +466,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(shash_name); type = algt->type & algt->mask; - mask = crypto_requires_sync(algt->type, algt->mask); + mask = crypto_algt_inherited_mask(algt); switch (type) { case CRYPTO_ALG_TYPE_SKCIPHER: @@ -527,7 +525,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) /* Synchronous hash, e.g., "sha256" */ _hash_alg = crypto_alg_mod_lookup(shash_name, CRYPTO_ALG_TYPE_SHASH, - CRYPTO_ALG_TYPE_MASK); + CRYPTO_ALG_TYPE_MASK | mask); if (IS_ERR(_hash_alg)) { err = PTR_ERR(_hash_alg); goto out_drop_skcipher; @@ -559,7 +557,12 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb) hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_free_hash; - base->cra_flags = block_base->cra_flags & CRYPTO_ALG_ASYNC; + /* + * hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its + * flags manually. + */ + base->cra_flags |= (hash_alg->base.cra_flags & + CRYPTO_ALG_INHERITED_FLAGS); base->cra_blocksize = block_base->cra_blocksize; base->cra_ctxsize = sizeof(struct essiv_tfm_ctx); base->cra_alignmask = block_base->cra_alignmask; diff --git a/crypto/gcm.c b/crypto/gcm.c index 0103d28c541e..338ee0769747 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -139,7 +139,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); out: - kzfree(data); + kfree_sensitive(data); return err; } @@ -578,7 +578,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, const char *ctr_name, const char *ghash_name) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; @@ -586,14 +585,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, struct hash_alg_common *ghash; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -635,8 +629,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = (ghash->base.cra_flags | - ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; @@ -835,21 +827,15 @@ static void crypto_rfc4106_free(struct aead_instance *inst) static int crypto_rfc4106_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -882,7 +868,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; @@ -1057,21 +1042,15 @@ static void crypto_rfc4543_free(struct aead_instance *inst) static int crypto_rfc4543_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct aead_instance *inst; struct aead_alg *alg; struct crypto_rfc4543_instance_ctx *ctx; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -1104,7 +1083,6 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/geniv.c b/crypto/geniv.c index 6a90c52d49ad..bee4621b4f12 100644 --- a/crypto/geniv.c +++ b/crypto/geniv.c @@ -39,22 +39,19 @@ static void aead_geniv_free(struct aead_instance *inst) } struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, u32 mask) + struct rtattr **tb) { struct crypto_aead_spawn *spawn; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; unsigned int ivsize; unsigned int maxauthsize; + u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) - return ERR_PTR(-EINVAL); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) + return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) @@ -62,11 +59,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, spawn = aead_instance_ctx(inst); - /* Ignore async algorithms if necessary. */ - mask |= crypto_requires_sync(algt->type, algt->mask); - err = crypto_grab_aead(spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), type, mask); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -89,7 +83,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, CRYPTO_MAX_ALG_NAME) goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index a4b1c026aaee..a69ae3e6c16c 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c @@ -304,8 +304,8 @@ void gf128mul_free_64k(struct gf128mul_64k *t) int i; for (i = 0; i < 16; i++) - kzfree(t->t[i]); - kzfree(t); + kfree_sensitive(t->t[i]); + kfree_sensitive(t); } EXPORT_SYMBOL(gf128mul_free_64k); diff --git a/crypto/hmac.c b/crypto/hmac.c index e38bfb948278..25856aa7ccbf 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -168,11 +168,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_shash_spawn *spawn; struct crypto_alg *alg; struct shash_alg *salg; + u32 mask; int err; int ds; int ss; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -182,7 +183,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_shash(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; salg = crypto_spawn_shash_alg(spawn); diff --git a/crypto/internal.h b/crypto/internal.h index d5ebc60c5143..1b92a5a61852 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -65,15 +65,31 @@ void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg); void crypto_remove_final(struct list_head *list); +void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); -void *crypto_create_tfm(struct crypto_alg *alg, - const struct crypto_type *frontend); +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, int node); + +static inline void *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend) +{ + return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE); +} + struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask); -void *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, u32 type, u32 mask); + +void *crypto_alloc_tfm_node(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask, + int node); + +static inline void *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, u32 type, u32 mask) +{ + return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE); +} int crypto_probing_notify(unsigned long val, void *v); diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index a5ce8f96790f..eb7d1dd506bf 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -57,7 +57,7 @@ void *jent_zalloc(unsigned int len) void jent_zfree(void *ptr) { - kzfree(ptr); + kfree_sensitive(ptr); } int jent_fips_enabled(void) @@ -108,6 +108,7 @@ void jent_get_nstime(__u64 *out) struct jitterentropy { spinlock_t jent_lock; struct rand_data *entropy_collector; + unsigned int reset_cnt; }; static int jent_kcapi_init(struct crypto_tfm *tfm) @@ -142,7 +143,33 @@ static int jent_kcapi_random(struct crypto_rng *tfm, int ret = 0; spin_lock(&rng->jent_lock); + + /* Return a permanent error in case we had too many resets in a row. */ + if (rng->reset_cnt > (1<<10)) { + ret = -EFAULT; + goto out; + } + ret = jent_read_entropy(rng->entropy_collector, rdata, dlen); + + /* Reset RNG in case of health failures */ + if (ret < -1) { + pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n", + (ret == -2) ? "Repetition Count Test" : + "Adaptive Proportion Test"); + + rng->reset_cnt++; + + ret = -EAGAIN; + } else { + rng->reset_cnt = 0; + + /* Convert the Jitter RNG error into a usable error code */ + if (ret == -1) + ret = -EINVAL; + } + +out: spin_unlock(&rng->jent_lock); return ret; diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 042157f0d28b..6e147c43fc18 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -2,12 +2,12 @@ * Non-physical true random number generator based on timing jitter -- * Jitter RNG standalone code. * - * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2019 + * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2020 * * Design * ====== * - * See http://www.chronox.de/jent.html + * See https://www.chronox.de/jent.html * * License * ======= @@ -47,7 +47,7 @@ /* * This Jitterentropy RNG is based on the jitterentropy library - * version 2.1.2 provided at http://www.chronox.de/jent.html + * version 2.2.0 provided at https://www.chronox.de/jent.html */ #ifdef __OPTIMIZE__ @@ -83,6 +83,22 @@ struct rand_data { unsigned int memblocksize; /* Size of one memory block in bytes */ unsigned int memaccessloops; /* Number of memory accesses per random * bit generation */ + + /* Repetition Count Test */ + int rct_count; /* Number of stuck values */ + + /* Adaptive Proportion Test for a significance level of 2^-30 */ +#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */ +#define JENT_APT_WINDOW_SIZE 512 /* Data window size */ + /* LSB of time stamp to process */ +#define JENT_APT_LSB 16 +#define JENT_APT_WORD_MASK (JENT_APT_LSB - 1) + unsigned int apt_observations; /* Number of collected observations */ + unsigned int apt_count; /* APT counter */ + unsigned int apt_base; /* APT base reference */ + unsigned int apt_base_set:1; /* APT base reference set? */ + + unsigned int health_failure:1; /* Permanent health failure */ }; /* Flags that can be used to initialize the RNG */ @@ -98,12 +114,201 @@ struct rand_data { * variations (2nd derivation of time is * zero). */ #define JENT_ESTUCK 8 /* Too many stuck results during init. */ +#define JENT_EHEALTH 9 /* Health test failed during initialization */ +#define JENT_ERCT 10 /* RCT failed during initialization */ + +#include "jitterentropy.h" /*************************************************************************** - * Helper functions + * Adaptive Proportion Test + * + * This test complies with SP800-90B section 4.4.2. ***************************************************************************/ -#include "jitterentropy.h" +/** + * Reset the APT counter + * + * @ec [in] Reference to entropy collector + */ +static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked) +{ + /* Reset APT counter */ + ec->apt_count = 0; + ec->apt_base = delta_masked; + ec->apt_observations = 0; +} + +/** + * Insert a new entropy event into APT + * + * @ec [in] Reference to entropy collector + * @delta_masked [in] Masked time delta to process + */ +static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) +{ + /* Initialize the base reference */ + if (!ec->apt_base_set) { + ec->apt_base = delta_masked; + ec->apt_base_set = 1; + return; + } + + if (delta_masked == ec->apt_base) { + ec->apt_count++; + + if (ec->apt_count >= JENT_APT_CUTOFF) + ec->health_failure = 1; + } + + ec->apt_observations++; + + if (ec->apt_observations >= JENT_APT_WINDOW_SIZE) + jent_apt_reset(ec, delta_masked); +} + +/*************************************************************************** + * Stuck Test and its use as Repetition Count Test + * + * The Jitter RNG uses an enhanced version of the Repetition Count Test + * (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical + * back-to-back values, the input to the RCT is the counting of the stuck + * values during the generation of one Jitter RNG output block. + * + * The RCT is applied with an alpha of 2^{-30} compliant to FIPS 140-2 IG 9.8. + * + * During the counting operation, the Jitter RNG always calculates the RCT + * cut-off value of C. If that value exceeds the allowed cut-off value, + * the Jitter RNG output block will be calculated completely but discarded at + * the end. The caller of the Jitter RNG is informed with an error code. + ***************************************************************************/ + +/** + * Repetition Count Test as defined in SP800-90B section 4.4.1 + * + * @ec [in] Reference to entropy collector + * @stuck [in] Indicator whether the value is stuck + */ +static void jent_rct_insert(struct rand_data *ec, int stuck) +{ + /* + * If we have a count less than zero, a previous RCT round identified + * a failure. We will not overwrite it. + */ + if (ec->rct_count < 0) + return; + + if (stuck) { + ec->rct_count++; + + /* + * The cutoff value is based on the following consideration: + * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8. + * In addition, we require an entropy value H of 1/OSR as this + * is the minimum entropy required to provide full entropy. + * Note, we collect 64 * OSR deltas for inserting them into + * the entropy pool which should then have (close to) 64 bits + * of entropy. + * + * Note, ec->rct_count (which equals to value B in the pseudo + * code of SP800-90B section 4.4.1) starts with zero. Hence + * we need to subtract one from the cutoff value as calculated + * following SP800-90B. + */ + if ((unsigned int)ec->rct_count >= (31 * ec->osr)) { + ec->rct_count = -1; + ec->health_failure = 1; + } + } else { + ec->rct_count = 0; + } +} + +/** + * Is there an RCT health test failure? + * + * @ec [in] Reference to entropy collector + * + * @return + * 0 No health test failure + * 1 Permanent health test failure + */ +static int jent_rct_failure(struct rand_data *ec) +{ + if (ec->rct_count < 0) + return 1; + return 0; +} + +static inline __u64 jent_delta(__u64 prev, __u64 next) +{ +#define JENT_UINT64_MAX (__u64)(~((__u64) 0)) + return (prev < next) ? (next - prev) : + (JENT_UINT64_MAX - prev + 1 + next); +} + +/** + * Stuck test by checking the: + * 1st derivative of the jitter measurement (time delta) + * 2nd derivative of the jitter measurement (delta of time deltas) + * 3rd derivative of the jitter measurement (delta of delta of time deltas) + * + * All values must always be non-zero. + * + * @ec [in] Reference to entropy collector + * @current_delta [in] Jitter time delta + * + * @return + * 0 jitter measurement not stuck (good bit) + * 1 jitter measurement stuck (reject bit) + */ +static int jent_stuck(struct rand_data *ec, __u64 current_delta) +{ + __u64 delta2 = jent_delta(ec->last_delta, current_delta); + __u64 delta3 = jent_delta(ec->last_delta2, delta2); + unsigned int delta_masked = current_delta & JENT_APT_WORD_MASK; + + ec->last_delta = current_delta; + ec->last_delta2 = delta2; + + /* + * Insert the result of the comparison of two back-to-back time + * deltas. + */ + jent_apt_insert(ec, delta_masked); + + if (!current_delta || !delta2 || !delta3) { + /* RCT with a stuck bit */ + jent_rct_insert(ec, 1); + return 1; + } + + /* RCT with a non-stuck bit */ + jent_rct_insert(ec, 0); + + return 0; +} + +/** + * Report any health test failures + * + * @ec [in] Reference to entropy collector + * + * @return + * 0 No health test failure + * 1 Permanent health test failure + */ +static int jent_health_failure(struct rand_data *ec) +{ + /* Test is only enabled in FIPS mode */ + if (!jent_fips_enabled()) + return 0; + + return ec->health_failure; +} + +/*************************************************************************** + * Noise sources + ***************************************************************************/ /** * Update of the loop count used for the next round of @@ -148,10 +353,6 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, return (shuffle + (1<<min)); } -/*************************************************************************** - * Noise sources - ***************************************************************************/ - /** * CPU Jitter noise source -- this is the noise source based on the CPU * execution time jitter @@ -166,18 +367,19 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, * the CPU execution time jitter. Any change to the loop in this function * implies that careful retesting must be done. * - * Input: - * @ec entropy collector struct - * @time time stamp to be injected - * @loop_cnt if a value not equal to 0 is set, use the given value as number of - * loops to perform the folding + * @ec [in] entropy collector struct + * @time [in] time stamp to be injected + * @loop_cnt [in] if a value not equal to 0 is set, use the given value as + * number of loops to perform the folding + * @stuck [in] Is the time stamp identified as stuck? * * Output: * updated ec->data * * @return Number of loops the folding operation is performed */ -static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) +static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt, + int stuck) { unsigned int i; __u64 j = 0; @@ -220,9 +422,17 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) new ^= tmp; } } - ec->data = new; - return fold_loop_cnt; + /* + * If the time stamp is stuck, do not finally insert the value into + * the entropy pool. Although this operation should not do any harm + * even when the time stamp has no entropy, SP800-90B requires that + * any conditioning operation (SP800-90B considers the LFSR to be a + * conditioning operation) to have an identical amount of input + * data according to section 3.1.5. + */ + if (!stuck) + ec->data = new; } /** @@ -243,16 +453,13 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) * to reliably access either L3 or memory, the ec->mem memory must be quite * large which is usually not desirable. * - * Input: - * @ec Reference to the entropy collector with the memory access data -- if - * the reference to the memory block to be accessed is NULL, this noise - * source is disabled - * @loop_cnt if a value not equal to 0 is set, use the given value as number of - * loops to perform the folding - * - * @return Number of memory access operations + * @ec [in] Reference to the entropy collector with the memory access data -- if + * the reference to the memory block to be accessed is NULL, this noise + * source is disabled + * @loop_cnt [in] if a value not equal to 0 is set, use the given value + * number of loops to perform the LFSR */ -static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) +static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) { unsigned int wrap = 0; __u64 i = 0; @@ -262,7 +469,7 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); if (NULL == ec || NULL == ec->mem) - return 0; + return; wrap = ec->memblocksize * ec->memblocks; /* @@ -288,43 +495,11 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) ec->memlocation = ec->memlocation + ec->memblocksize - 1; ec->memlocation = ec->memlocation % wrap; } - return i; } /*************************************************************************** * Start of entropy processing logic ***************************************************************************/ - -/** - * Stuck test by checking the: - * 1st derivation of the jitter measurement (time delta) - * 2nd derivation of the jitter measurement (delta of time deltas) - * 3rd derivation of the jitter measurement (delta of delta of time deltas) - * - * All values must always be non-zero. - * - * Input: - * @ec Reference to entropy collector - * @current_delta Jitter time delta - * - * @return - * 0 jitter measurement not stuck (good bit) - * 1 jitter measurement stuck (reject bit) - */ -static int jent_stuck(struct rand_data *ec, __u64 current_delta) -{ - __s64 delta2 = ec->last_delta - current_delta; - __s64 delta3 = delta2 - ec->last_delta2; - - ec->last_delta = current_delta; - ec->last_delta2 = delta2; - - if (!current_delta || !delta2 || !delta3) - return 1; - - return 0; -} - /** * This is the heart of the entropy generation: calculate time deltas and * use the CPU jitter in the time deltas. The jitter is injected into the @@ -334,8 +509,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) * of this function! This can be done by calling this function * and not using its result. * - * Input: - * @entropy_collector Reference to entropy collector + * @ec [in] Reference to entropy collector * * @return result of stuck test */ @@ -343,6 +517,7 @@ static int jent_measure_jitter(struct rand_data *ec) { __u64 time = 0; __u64 current_delta = 0; + int stuck; /* Invoke one noise source before time measurement to add variations */ jent_memaccess(ec, 0); @@ -352,22 +527,23 @@ static int jent_measure_jitter(struct rand_data *ec) * invocation to measure the timing variations */ jent_get_nstime(&time); - current_delta = time - ec->prev_time; + current_delta = jent_delta(ec->prev_time, time); ec->prev_time = time; + /* Check whether we have a stuck measurement. */ + stuck = jent_stuck(ec, current_delta); + /* Now call the next noise sources which also injects the data */ - jent_lfsr_time(ec, current_delta, 0); + jent_lfsr_time(ec, current_delta, 0, stuck); - /* Check whether we have a stuck measurement. */ - return jent_stuck(ec, current_delta); + return stuck; } /** * Generator of one 64 bit random number * Function fills rand_data->data * - * Input: - * @ec Reference to entropy collector + * @ec [in] Reference to entropy collector */ static void jent_gen_entropy(struct rand_data *ec) { @@ -391,31 +567,6 @@ static void jent_gen_entropy(struct rand_data *ec) } /** - * The continuous test required by FIPS 140-2 -- the function automatically - * primes the test if needed. - * - * Return: - * returns normally if FIPS test passed - * panics the kernel if FIPS test failed - */ -static void jent_fips_test(struct rand_data *ec) -{ - if (!jent_fips_enabled()) - return; - - /* prime the FIPS test */ - if (!ec->old_data) { - ec->old_data = ec->data; - jent_gen_entropy(ec); - } - - if (ec->data == ec->old_data) - jent_panic("jitterentropy: Duplicate output detected\n"); - - ec->old_data = ec->data; -} - -/** * Entry function: Obtain entropy for the caller. * * This function invokes the entropy gathering logic as often to generate @@ -425,17 +576,18 @@ static void jent_fips_test(struct rand_data *ec) * This function truncates the last 64 bit entropy value output to the exact * size specified by the caller. * - * Input: - * @ec Reference to entropy collector - * @data pointer to buffer for storing random data -- buffer must already - * exist - * @len size of the buffer, specifying also the requested number of random - * in bytes + * @ec [in] Reference to entropy collector + * @data [in] pointer to buffer for storing random data -- buffer must already + * exist + * @len [in] size of the buffer, specifying also the requested number of random + * in bytes * * @return 0 when request is fulfilled or an error * * The following error codes can occur: * -1 entropy_collector is NULL + * -2 RCT failed + * -3 APT test failed */ int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len) @@ -449,7 +601,42 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int tocopy; jent_gen_entropy(ec); - jent_fips_test(ec); + + if (jent_health_failure(ec)) { + int ret; + + if (jent_rct_failure(ec)) + ret = -2; + else + ret = -3; + + /* + * Re-initialize the noise source + * + * If the health test fails, the Jitter RNG remains + * in failure state and will return a health failure + * during next invocation. + */ + if (jent_entropy_init()) + return ret; + + /* Set APT to initial state */ + jent_apt_reset(ec, 0); + ec->apt_base_set = 0; + + /* Set RCT to initial state */ + ec->rct_count = 0; + + /* Re-enable Jitter RNG */ + ec->health_failure = 0; + + /* + * Return the health test failure status to the + * caller as the generated value is not appropriate. + */ + return ret; + } + if ((DATA_SIZE_BITS / 8) < len) tocopy = (DATA_SIZE_BITS / 8); else @@ -513,11 +700,15 @@ int jent_entropy_init(void) int i; __u64 delta_sum = 0; __u64 old_delta = 0; + unsigned int nonstuck = 0; int time_backwards = 0; int count_mod = 0; int count_stuck = 0; struct rand_data ec = { 0 }; + /* Required for RCT */ + ec.osr = 1; + /* We could perform statistical tests here, but the problem is * that we only have a few loop counts to do testing. These * loop counts may show some slight skew and we produce @@ -539,8 +730,10 @@ int jent_entropy_init(void) /* * TESTLOOPCOUNT needs some loops to identify edge systems. 100 is * definitely too little. + * + * SP800-90B requires at least 1024 initial test cycles. */ -#define TESTLOOPCOUNT 300 +#define TESTLOOPCOUNT 1024 #define CLEARCACHE 100 for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { __u64 time = 0; @@ -552,13 +745,13 @@ int jent_entropy_init(void) /* Invoke core entropy collection logic */ jent_get_nstime(&time); ec.prev_time = time; - jent_lfsr_time(&ec, time, 0); + jent_lfsr_time(&ec, time, 0, 0); jent_get_nstime(&time2); /* test whether timer works */ if (!time || !time2) return JENT_ENOTIME; - delta = time2 - time; + delta = jent_delta(time, time2); /* * test whether timer is fine grained enough to provide * delta even when called shortly after each other -- this @@ -581,6 +774,28 @@ int jent_entropy_init(void) if (stuck) count_stuck++; + else { + nonstuck++; + + /* + * Ensure that the APT succeeded. + * + * With the check below that count_stuck must be less + * than 10% of the overall generated raw entropy values + * it is guaranteed that the APT is invoked at + * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times. + */ + if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) { + jent_apt_reset(&ec, + delta & JENT_APT_WORD_MASK); + if (jent_health_failure(&ec)) + return JENT_EHEALTH; + } + } + + /* Validate RCT */ + if (jent_rct_failure(&ec)) + return JENT_ERCT; /* test whether we have an increasing timer */ if (!(time2 > time)) diff --git a/crypto/lrw.c b/crypto/lrw.c index 3c734b81b3a2..bcf09fbc750a 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -9,7 +9,7 @@ */ /* This implementation is checked against the test vectors in the above * document and by a test vector provided by Ken Buchanan at - * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html + * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html * * The test vectors are included in the testing module tcrypt.[ch] */ @@ -27,7 +27,7 @@ #define LRW_BLOCK_SIZE 16 -struct priv { +struct lrw_tfm_ctx { struct crypto_skcipher *child; /* @@ -49,12 +49,12 @@ struct priv { be128 mulinc[128]; }; -struct rctx { +struct lrw_request_ctx { be128 t; struct skcipher_request subreq; }; -static inline void setbit128_bbe(void *b, int bit) +static inline void lrw_setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - #ifdef __BIG_ENDIAN @@ -65,10 +65,10 @@ static inline void setbit128_bbe(void *b, int bit) ), b); } -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - struct priv *ctx = crypto_skcipher_ctx(parent); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; int err, bsize = LRW_BLOCK_SIZE; const u8 *tweak = key + keylen - bsize; @@ -92,7 +92,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, /* initialize optimization table */ for (i = 0; i < 128; i++) { - setbit128_bbe(&tmp, i); + lrw_setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } @@ -108,10 +108,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * For example: * * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; - * int i = next_index(&counter); + * int i = lrw_next_index(&counter); * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } */ -static int next_index(u32 *counter) +static int lrw_next_index(u32 *counter) { int i, res = 0; @@ -135,14 +135,14 @@ static int next_index(u32 *counter) * We compute the tweak masks twice (both before and after the ECB encryption or * decryption) to avoid having to allocate a temporary buffer and/or make * mutliple calls to the 'ecb(..)' instance, which usually would be slower than - * just doing the next_index() calls again. + * just doing the lrw_next_index() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass) +static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) { const int bs = LRW_BLOCK_SIZE; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct priv *ctx = crypto_skcipher_ctx(tfm); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); be128 t = rctx->t; struct skcipher_walk w; __be32 *iv; @@ -178,7 +178,8 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) /* T <- I*Key2, using the optimization * discussed in the specification */ - be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]); + be128_xor(&t, &t, + &ctx->mulinc[lrw_next_index(counter)]); } while ((avail -= bs) >= bs); if (second_pass && w.nbytes == w.total) { @@ -194,38 +195,40 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass) return err; } -static int xor_tweak_pre(struct skcipher_request *req) +static int lrw_xor_tweak_pre(struct skcipher_request *req) { - return xor_tweak(req, false); + return lrw_xor_tweak(req, false); } -static int xor_tweak_post(struct skcipher_request *req) +static int lrw_xor_tweak_post(struct skcipher_request *req) { - return xor_tweak(req, true); + return lrw_xor_tweak(req, true); } -static void crypt_done(struct crypto_async_request *areq, int err) +static void lrw_crypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req); + err = lrw_xor_tweak_post(req); } skcipher_request_complete(req, err); } -static void init_crypt(struct skcipher_request *req) +static void lrw_init_crypt(struct skcipher_request *req) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct rctx *rctx = skcipher_request_ctx(req); + const struct lrw_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req); + skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done, + req); /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ skcipher_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen, req->iv); @@ -237,33 +240,33 @@ static void init_crypt(struct skcipher_request *req) gf128mul_64k_bbe(&rctx->t, ctx->table); } -static int encrypt(struct skcipher_request *req) +static int lrw_encrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int decrypt(struct skcipher_request *req) +static int lrw_decrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; - init_crypt(req); - return xor_tweak_pre(req) ?: + lrw_init_crypt(req); + return lrw_xor_tweak_pre(req) ?: crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req); + lrw_xor_tweak_post(req); } -static int init_tfm(struct crypto_skcipher *tfm) +static int lrw_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *cipher; cipher = crypto_spawn_skcipher(spawn); @@ -273,45 +276,39 @@ static int init_tfm(struct crypto_skcipher *tfm) ctx->child = cipher; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + - sizeof(struct rctx)); + sizeof(struct lrw_request_ctx)); return 0; } -static void exit_tfm(struct crypto_skcipher *tfm) +static void lrw_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->table) gf128mul_free_64k(ctx->table); crypto_free_skcipher(ctx->child); } -static void free_inst(struct skcipher_instance *inst) +static void lrw_free_instance(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); } -static int create(struct crypto_template *tmpl, struct rtattr **tb) +static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct skcipher_alg *alg; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -379,7 +376,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | @@ -391,43 +387,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + LRW_BLOCK_SIZE; - inst->alg.base.cra_ctxsize = sizeof(struct priv); + inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); - inst->alg.init = init_tfm; - inst->alg.exit = exit_tfm; + inst->alg.init = lrw_init_tfm; + inst->alg.exit = lrw_exit_tfm; - inst->alg.setkey = setkey; - inst->alg.encrypt = encrypt; - inst->alg.decrypt = decrypt; + inst->alg.setkey = lrw_setkey; + inst->alg.encrypt = lrw_encrypt; + inst->alg.decrypt = lrw_decrypt; - inst->free = free_inst; + inst->free = lrw_free_instance; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - free_inst(inst); + lrw_free_instance(inst); } return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template lrw_tmpl = { .name = "lrw", - .create = create, + .create = lrw_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init lrw_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&lrw_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit lrw_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&lrw_tmpl); } -subsys_initcall(crypto_module_init); -module_exit(crypto_module_exit); +subsys_initcall(lrw_module_init); +module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode"); diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 8bddc65cd509..d569c7ed6c80 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -226,18 +226,14 @@ static int pcrypt_init_instance(struct crypto_instance *inst, } static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, - u32 type, u32 mask) + struct crypto_attr_type *algt) { struct pcrypt_instance_ctx *ctx; - struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; + u32 mask = crypto_algt_inherited_mask(algt); int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; @@ -254,7 +250,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, goto err_free_inst; err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; @@ -263,7 +259,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto err_free_inst; - inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; + inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); @@ -298,7 +294,7 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: - return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); + return pcrypt_create_aead(tmpl, tb, algt); } return -EINVAL; @@ -320,7 +316,7 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; - *pinst = padata_alloc_possible(name); + *pinst = padata_alloc(name); if (!*pinst) return ret; @@ -331,12 +327,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) return ret; } -static void pcrypt_fini_padata(struct padata_instance *pinst) -{ - padata_stop(pinst); - padata_free(pinst); -} - static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, @@ -359,13 +349,10 @@ static int __init pcrypt_init(void) if (err) goto err_deinit_pencrypt; - padata_start(pencrypt); - padata_start(pdecrypt); - return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: - pcrypt_fini_padata(pencrypt); + padata_free(pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: @@ -376,8 +363,8 @@ static void __exit pcrypt_exit(void) { crypto_unregister_template(&pcrypt_tmpl); - pcrypt_fini_padata(pencrypt); - pcrypt_fini_padata(pdecrypt); + padata_free(pencrypt); + padata_free(pdecrypt); kset_unregister(pcrypt_kset); } diff --git a/crypto/rng.c b/crypto/rng.c index 1490d210f1a1..a888d84b524a 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -53,7 +53,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); crypto_stats_rng_seed(alg, err); out: - kzfree(buf); + kfree_sensitive(buf); return err; } EXPORT_SYMBOL_GPL(crypto_rng_reset); diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index d31031de51bc..ddd3d10ffc15 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -199,7 +199,7 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, ctx->key_size), out_buf, ctx->key_size); - kzfree(out_buf); + kfree_sensitive(out_buf); out: req->dst_len = ctx->key_size; @@ -322,7 +322,7 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err) out_buf + pos, req->dst_len); done: - kzfree(req_ctx->out_buf); + kfree_sensitive(req_ctx->out_buf); return err; } @@ -500,7 +500,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) req->dst_len) != 0) err = -EKEYREJECTED; done: - kzfree(req_ctx->out_buf); + kfree_sensitive(req_ctx->out_buf); return err; } @@ -596,7 +596,6 @@ static void pkcs1pad_free(struct akcipher_instance *inst) static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct akcipher_instance *inst; struct pkcs1pad_inst_ctx *ctx; @@ -604,14 +603,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) const char *hash_name; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) - return -EINVAL; - - mask = crypto_requires_sync(algt->type, algt->mask); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask); + if (err) + return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) @@ -658,7 +652,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) goto err_free_inst; } - inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = rsa_alg->base.cra_priority; inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index c81a44404086..3418869dabef 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -9,8 +9,8 @@ * Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream * Cipher Project. It is designed by Daniel J. Bernstein <djb@cr.yp.to>. * More information about eSTREAM and Salsa20 can be found here: - * http://www.ecrypt.eu.org/stream/ - * http://cr.yp.to/snuffle.html + * https://www.ecrypt.eu.org/stream/ + * https://cr.yp.to/snuffle.html * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/crypto/seqiv.c b/crypto/seqiv.c index f124b9b54e15..0899d527c284 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -33,7 +33,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); out: - kzfree(subreq->iv); + kfree_sensitive(subreq->iv); } static void seqiv_aead_encrypt_complete(struct crypto_async_request *base, @@ -138,7 +138,7 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) struct aead_instance *inst; int err; - inst = aead_geniv_alloc(tmpl, tb, 0, 0); + inst = aead_geniv_alloc(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); @@ -164,23 +164,9 @@ free_inst: return err; } -static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct crypto_attr_type *algt; - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) - return -EINVAL; - - return seqiv_aead_create(tmpl, tb); -} - static struct crypto_template seqiv_tmpl = { .name = "seqiv", - .create = seqiv_create, + .create = seqiv_aead_create, .module = THIS_MODULE, }; diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 7c57b844c382..1d43472fecbd 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -15,7 +15,6 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <linux/cryptohash.h> #include <linux/types.h> #include <crypto/sha.h> #include <crypto/sha1_base.h> @@ -31,10 +30,10 @@ EXPORT_SYMBOL_GPL(sha1_zero_message_hash); static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, int blocks) { - u32 temp[SHA_WORKSPACE_WORDS]; + u32 temp[SHA1_WORKSPACE_WORDS]; while (blocks--) { - sha_transform(sst->state, src, temp); + sha1_transform(sst->state, src, temp); src += SHA1_BLOCK_SIZE; } memzero_explicit(temp, sizeof(temp)); diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index f2d7095d4f2d..88156e3e2a33 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -35,27 +35,31 @@ EXPORT_SYMBOL_GPL(sha256_zero_message_hash); static int crypto_sha256_init(struct shash_desc *desc) { - return sha256_init(shash_desc_ctx(desc)); + sha256_init(shash_desc_ctx(desc)); + return 0; } static int crypto_sha224_init(struct shash_desc *desc) { - return sha224_init(shash_desc_ctx(desc)); + sha224_init(shash_desc_ctx(desc)); + return 0; } int crypto_sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha256_update(shash_desc_ctx(desc), data, len); + sha256_update(shash_desc_ctx(desc), data, len); + return 0; } EXPORT_SYMBOL(crypto_sha256_update); static int crypto_sha256_final(struct shash_desc *desc, u8 *out) { if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE) - return sha224_final(shash_desc_ctx(desc), out); + sha224_final(shash_desc_ctx(desc), out); else - return sha256_final(shash_desc_ctx(desc), out); + sha256_final(shash_desc_ctx(desc), out); + return 0; } int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 44e263e25599..3e4069935b53 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -3,7 +3,7 @@ * Cryptographic API. * * SHA-3, as specified in - * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf + * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf * * SHA-3 code by Jeff Garzik <jeff@garzik.org> * Ard Biesheuvel <ard.biesheuvel@linaro.org> diff --git a/crypto/shash.c b/crypto/shash.c index c075b26c2a1d..2e3433ad9762 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -44,7 +44,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); err = shash->setkey(tfm, alignbuffer, keylen); - kzfree(buffer); + kfree_sensitive(buffer); return err; } @@ -206,6 +206,22 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_digest); +int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, + unsigned int len, u8 *out) +{ + SHASH_DESC_ON_STACK(desc, tfm); + int err; + + desc->tfm = tfm; + + err = crypto_shash_digest(desc, data, len, out); + + shash_desc_zero(desc); + + return err; +} +EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); + static int shash_default_export(struct shash_desc *desc, void *out) { memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); diff --git a/crypto/simd.c b/crypto/simd.c index 56885af49c24..edaa479a1ec5 100644 --- a/crypto/simd.c +++ b/crypto/simd.c @@ -171,7 +171,8 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; @@ -417,7 +418,8 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; - alg->base.cra_flags = CRYPTO_ALG_ASYNC; + alg->base.cra_flags = CRYPTO_ALG_ASYNC | + (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7221def7b9a7..b4dae640de9f 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -592,7 +592,7 @@ static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cipher->setkey(tfm, alignbuffer, keylen); - kzfree(buffer); + kfree_sensitive(buffer); return ret; } @@ -934,22 +934,15 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst) struct skcipher_instance *skcipher_alloc_instance_simple( struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_attr_type *algt; u32 mask; struct skcipher_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *cipher_alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return ERR_PTR(-EINVAL); - - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index ba0b7702f2e9..12e82a61b896 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -2348,121 +2348,121 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed(alg, sec, generic_hash_speed_template); break; } - /* fall through */ + fallthrough; case 301: test_hash_speed("md4", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 302: test_hash_speed("md5", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 303: test_hash_speed("sha1", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 304: test_hash_speed("sha256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 305: test_hash_speed("sha384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 306: test_hash_speed("sha512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 307: test_hash_speed("wp256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 308: test_hash_speed("wp384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 309: test_hash_speed("wp512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 310: test_hash_speed("tgr128", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 311: test_hash_speed("tgr160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 312: test_hash_speed("tgr192", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 313: test_hash_speed("sha224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 314: test_hash_speed("rmd128", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 315: test_hash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 316: test_hash_speed("rmd256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 317: test_hash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 318: test_hash_speed("ghash-generic", sec, hash_speed_template_16); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 319: test_hash_speed("crc32c", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 320: test_hash_speed("crct10dif", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 321: test_hash_speed("poly1305", sec, poly1305_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 322: test_hash_speed("sha3-224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 323: test_hash_speed("sha3-256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 324: test_hash_speed("sha3-384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 325: test_hash_speed("sha3-512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 326: test_hash_speed("sm3", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 327: test_hash_speed("streebog256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 328: test_hash_speed("streebog512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 399: break; @@ -2471,121 +2471,121 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed(alg, sec, generic_hash_speed_template); break; } - /* fall through */ + fallthrough; case 401: test_ahash_speed("md4", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 402: test_ahash_speed("md5", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 403: test_ahash_speed("sha1", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 404: test_ahash_speed("sha256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 405: test_ahash_speed("sha384", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 406: test_ahash_speed("sha512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 407: test_ahash_speed("wp256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 408: test_ahash_speed("wp384", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 409: test_ahash_speed("wp512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 410: test_ahash_speed("tgr128", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 411: test_ahash_speed("tgr160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 412: test_ahash_speed("tgr192", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 413: test_ahash_speed("sha224", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 414: test_ahash_speed("rmd128", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 415: test_ahash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 416: test_ahash_speed("rmd256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 417: test_ahash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 418: test_ahash_speed("sha3-224", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 419: test_ahash_speed("sha3-256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 420: test_ahash_speed("sha3-384", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 421: test_ahash_speed("sha3-512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 422: test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 423: test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 424: test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 425: test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 426: test_mb_ahash_speed("streebog256", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 427: test_mb_ahash_speed("streebog512", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 499: break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 6863f911fcee..23c27fc96394 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1744,7 +1744,7 @@ out: kfree(vec.plaintext); kfree(vec.digest); crypto_free_shash(generic_tfm); - kzfree(generic_desc); + kfree_sensitive(generic_desc); return err; } #else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ @@ -3665,7 +3665,7 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr, if (IS_ERR(drng)) { printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " "%s\n", driver); - kzfree(buf); + kfree_sensitive(buf); return -ENOMEM; } @@ -3712,7 +3712,7 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr, outbuf: crypto_free_rng(drng); - kzfree(buf); + kfree_sensitive(buf); return ret; } diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d29983908c38..b9a2d73d9f8d 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -3916,7 +3916,7 @@ static const struct hash_testvec hmac_sm3_tv_template[] = { }; /* - * SHA1 test vectors from from FIPS PUB 180-1 + * SHA1 test vectors from FIPS PUB 180-1 * Long vector from CAVS 5.0 */ static const struct hash_testvec sha1_tv_template[] = { @@ -4103,7 +4103,7 @@ static const struct hash_testvec sha1_tv_template[] = { /* - * SHA224 test vectors from from FIPS PUB 180-2 + * SHA224 test vectors from FIPS PUB 180-2 */ static const struct hash_testvec sha224_tv_template[] = { { @@ -4273,7 +4273,7 @@ static const struct hash_testvec sha224_tv_template[] = { }; /* - * SHA256 test vectors from from NIST + * SHA256 test vectors from NIST */ static const struct hash_testvec sha256_tv_template[] = { { @@ -4442,7 +4442,7 @@ static const struct hash_testvec sha256_tv_template[] = { }; /* - * SHA384 test vectors from from NIST and kerneli + * SHA384 test vectors from NIST and kerneli */ static const struct hash_testvec sha384_tv_template[] = { { @@ -4632,7 +4632,7 @@ static const struct hash_testvec sha384_tv_template[] = { }; /* - * SHA512 test vectors from from NIST and kerneli + * SHA512 test vectors from NIST and kerneli */ static const struct hash_testvec sha512_tv_template[] = { { diff --git a/crypto/vmac.c b/crypto/vmac.c index 2d906830df96..9b565d1040d6 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -620,9 +620,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_instance *inst; struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -632,7 +633,7 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 598ec88abf0f..af3b7eb5d7c7 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -191,9 +191,10 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) struct crypto_cipher_spawn *spawn; struct crypto_alg *alg; unsigned long alignmask; + u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); if (err) return err; @@ -203,7 +204,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) spawn = shash_instance_ctx(inst); err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, 0); + crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_cipher_alg(spawn); diff --git a/crypto/xts.c b/crypto/xts.c index 6d8cea94b3cf..ad45b009774b 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -20,7 +20,7 @@ #include <crypto/b128ops.h> #include <crypto/gf128mul.h> -struct priv { +struct xts_tfm_ctx { struct crypto_skcipher *child; struct crypto_cipher *tweak; }; @@ -30,17 +30,17 @@ struct xts_instance_ctx { char name[CRYPTO_MAX_ALG_NAME]; }; -struct rctx { +struct xts_request_ctx { le128 t; struct scatterlist *tail; struct scatterlist sg[2]; struct skcipher_request subreq; }; -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int xts_setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { - struct priv *ctx = crypto_skcipher_ctx(parent); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child; struct crypto_cipher *tweak; int err; @@ -78,9 +78,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key, * mutliple calls to the 'ecb(..)' instance, which usually would be slower than * just doing the gf128mul_x_ble() calls again. */ -static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) +static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, + bool enc) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const bool cts = (req->cryptlen % XTS_BLOCK_SIZE); const int bs = XTS_BLOCK_SIZE; @@ -128,23 +129,23 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc) return err; } -static int xor_tweak_pre(struct skcipher_request *req, bool enc) +static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc) { - return xor_tweak(req, false, enc); + return xts_xor_tweak(req, false, enc); } -static int xor_tweak_post(struct skcipher_request *req, bool enc) +static int xts_xor_tweak_post(struct skcipher_request *req, bool enc) { - return xor_tweak(req, true, enc); + return xts_xor_tweak(req, true, enc); } -static void cts_done(struct crypto_async_request *areq, int err) +static void xts_cts_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; le128 b; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); le128_xor(&b, &rctx->t, &b); @@ -154,12 +155,13 @@ static void cts_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static int cts_final(struct skcipher_request *req, - int (*crypt)(struct skcipher_request *req)) +static int xts_cts_final(struct skcipher_request *req, + int (*crypt)(struct skcipher_request *req)) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + const struct xts_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1); - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int tail = req->cryptlen % XTS_BLOCK_SIZE; le128 b[2]; @@ -169,7 +171,7 @@ static int cts_final(struct skcipher_request *req, offset - XTS_BLOCK_SIZE); scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0); - memcpy(b + 1, b, tail); + b[1] = b[0]; scatterwalk_map_and_copy(b, req->src, offset, tail, 0); le128_xor(b, &rctx->t, b); @@ -177,7 +179,8 @@ static int cts_final(struct skcipher_request *req, scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1); skcipher_request_set_tfm(subreq, ctx->child); - skcipher_request_set_callback(subreq, req->base.flags, cts_done, req); + skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done, + req); skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail, XTS_BLOCK_SIZE, NULL); @@ -192,18 +195,18 @@ static int cts_final(struct skcipher_request *req, return 0; } -static void encrypt_done(struct crypto_async_request *areq, int err) +static void xts_encrypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req, true); + err = xts_xor_tweak_post(req, true); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = cts_final(req, crypto_skcipher_encrypt); + err = xts_cts_final(req, crypto_skcipher_encrypt); if (err == -EINPROGRESS) return; } @@ -212,18 +215,18 @@ static void encrypt_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static void decrypt_done(struct crypto_async_request *areq, int err) +static void xts_decrypt_done(struct crypto_async_request *areq, int err) { struct skcipher_request *req = areq->data; if (!err) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - err = xor_tweak_post(req, false); + err = xts_xor_tweak_post(req, false); if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) { - err = cts_final(req, crypto_skcipher_decrypt); + err = xts_cts_final(req, crypto_skcipher_decrypt); if (err == -EINPROGRESS) return; } @@ -232,10 +235,12 @@ static void decrypt_done(struct crypto_async_request *areq, int err) skcipher_request_complete(req, err); } -static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) +static int xts_init_crypt(struct skcipher_request *req, + crypto_completion_t compl) { - struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); - struct rctx *rctx = skcipher_request_ctx(req); + const struct xts_tfm_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; if (req->cryptlen < XTS_BLOCK_SIZE) @@ -252,45 +257,45 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t compl) return 0; } -static int encrypt(struct skcipher_request *req) +static int xts_encrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int err; - err = init_crypt(req, encrypt_done) ?: - xor_tweak_pre(req, true) ?: + err = xts_init_crypt(req, xts_encrypt_done) ?: + xts_xor_tweak_pre(req, true) ?: crypto_skcipher_encrypt(subreq) ?: - xor_tweak_post(req, true); + xts_xor_tweak_post(req, true); if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; - return cts_final(req, crypto_skcipher_encrypt); + return xts_cts_final(req, crypto_skcipher_encrypt); } -static int decrypt(struct skcipher_request *req) +static int xts_decrypt(struct skcipher_request *req) { - struct rctx *rctx = skcipher_request_ctx(req); + struct xts_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; int err; - err = init_crypt(req, decrypt_done) ?: - xor_tweak_pre(req, false) ?: + err = xts_init_crypt(req, xts_decrypt_done) ?: + xts_xor_tweak_pre(req, false) ?: crypto_skcipher_decrypt(subreq) ?: - xor_tweak_post(req, false); + xts_xor_tweak_post(req, false); if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0)) return err; - return cts_final(req, crypto_skcipher_decrypt); + return xts_cts_final(req, crypto_skcipher_decrypt); } -static int init_tfm(struct crypto_skcipher *tfm) +static int xts_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *child; struct crypto_cipher *tweak; @@ -309,41 +314,39 @@ static int init_tfm(struct crypto_skcipher *tfm) ctx->tweak = tweak; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + - sizeof(struct rctx)); + sizeof(struct xts_request_ctx)); return 0; } -static void exit_tfm(struct crypto_skcipher *tfm) +static void xts_exit_tfm(struct crypto_skcipher *tfm) { - struct priv *ctx = crypto_skcipher_ctx(tfm); + struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->child); crypto_free_cipher(ctx->tweak); } -static void free_inst(struct skcipher_instance *inst) +static void xts_free_instance(struct skcipher_instance *inst) { - crypto_drop_skcipher(skcipher_instance_ctx(inst)); + struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); + + crypto_drop_skcipher(&ictx->spawn); kfree(inst); } -static int create(struct crypto_template *tmpl, struct rtattr **tb) +static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; struct xts_instance_ctx *ctx; struct skcipher_alg *alg; const char *cipher_name; u32 mask; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) - return -EINVAL; + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) @@ -355,10 +358,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ctx = skcipher_instance_ctx(inst); - mask = crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK | - CRYPTO_ALG_ASYNC); - err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); if (err == -ENOENT) { @@ -415,7 +414,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) } else goto err_free_inst; - inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | @@ -425,43 +423,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; - inst->alg.base.cra_ctxsize = sizeof(struct priv); + inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx); - inst->alg.init = init_tfm; - inst->alg.exit = exit_tfm; + inst->alg.init = xts_init_tfm; + inst->alg.exit = xts_exit_tfm; - inst->alg.setkey = setkey; - inst->alg.encrypt = encrypt; - inst->alg.decrypt = decrypt; + inst->alg.setkey = xts_setkey; + inst->alg.encrypt = xts_encrypt; + inst->alg.decrypt = xts_decrypt; - inst->free = free_inst; + inst->free = xts_free_instance; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - free_inst(inst); + xts_free_instance(inst); } return err; } -static struct crypto_template crypto_tmpl = { +static struct crypto_template xts_tmpl = { .name = "xts", - .create = create, + .create = xts_create, .module = THIS_MODULE, }; -static int __init crypto_module_init(void) +static int __init xts_module_init(void) { - return crypto_register_template(&crypto_tmpl); + return crypto_register_template(&xts_tmpl); } -static void __exit crypto_module_exit(void) +static void __exit xts_module_exit(void) { - crypto_unregister_template(&crypto_tmpl); + crypto_unregister_template(&xts_tmpl); } -subsys_initcall(crypto_module_init); -module_exit(crypto_module_exit); +subsys_initcall(xts_module_init); +module_exit(xts_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode"); diff --git a/crypto/zstd.c b/crypto/zstd.c index 5a3ff258d8f7..1a3309f066f7 100644 --- a/crypto/zstd.c +++ b/crypto/zstd.c @@ -137,7 +137,7 @@ static void __zstd_exit(void *ctx) static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx) { __zstd_exit(ctx); - kzfree(ctx); + kfree_sensitive(ctx); } static void zstd_exit(struct crypto_tfm *tfm) |