From 5b739ef8a4e8cf5201d21abff897e292c232477b Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Thu, 18 Jun 2009 19:50:21 +0800 Subject: random: Add optional continuous repetition test to entropy store based rngs FIPS-140 requires that all random number generators implement continuous self tests in which each extracted block of data is compared against the last block for repetition. The ansi_cprng implements such a test, but it would be nice if the hw rng's did the same thing. Obviously its not something thats always needed, but it seems like it would be a nice feature to have on occasion. I've written the below patch which allows individual entropy stores to be flagged as desiring a continuous test to be run on them as is extracted. By default this option is off, but is enabled in the event that fips mode is selected during bootup. Signed-off-by: Neil Horman Acked-by: Matt Mackall Signed-off-by: Herbert Xu --- crypto/internal.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/internal.h b/crypto/internal.h index 113579a82dff..95baaea21fbc 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -25,12 +25,7 @@ #include #include #include - -#ifdef CONFIG_CRYPTO_FIPS -extern int fips_enabled; -#else -#define fips_enabled 0 -#endif +#include /* Crypto notification events. */ enum { -- cgit v1.2.3 From a873a5f1c4eda125f506c059a4f8ea48b9f42eff Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 19 Jun 2009 19:46:53 +0800 Subject: crypto: tcrypt - Test algorithms by name This adds the 'alg' module parameter to be able to test an algorithm by name. If the algorithm type is not ad-hoc clear for a algorithm (e.g. pcrypt, cryptd) it is possilbe to set the algorithm type with the 'type' module parameter. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index d59ba5079d14..dfeec0c544cb 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -45,6 +45,8 @@ */ static unsigned int sec; +static char *alg = NULL; +static u32 type; static int mode; static char *tvmem[TVMEMSIZE]; @@ -885,6 +887,11 @@ static int do_test(int m) return ret; } +static int do_alg_test(const char *alg, u32 type) +{ + return crypto_has_alg(alg, type, CRYPTO_ALG_TYPE_MASK); +} + static int __init tcrypt_mod_init(void) { int err = -ENOMEM; @@ -896,7 +903,11 @@ static int __init tcrypt_mod_init(void) goto err_free_tv; } - err = do_test(mode); + if (alg) + err = do_alg_test(alg, type); + else + err = do_test(mode); + if (err) { printk(KERN_ERR "tcrypt: one or more tests failed!\n"); goto err_free_tv; @@ -928,6 +939,8 @@ static void __exit tcrypt_mod_fini(void) { } module_init(tcrypt_mod_init); module_exit(tcrypt_mod_fini); +module_param(alg, charp, 0); +module_param(type, uint, 0); module_param(mode, int, 0); module_param(sec, uint, 0); MODULE_PARM_DESC(sec, "Length in seconds of speed tests " -- cgit v1.2.3 From 27300176d75e4723e2125e745a98a77bf0133f72 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 19 Jun 2009 20:32:58 +0800 Subject: crypto: ansi_cprng - Do not select FIPS The RNG should work with FIPS disabled. Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 4dfdd03e708f..03ef1a986d8b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -782,7 +782,6 @@ config CRYPTO_ANSI_CPRNG tristate "Pseudo Random Number Generation for Cryptographic modules" select CRYPTO_AES select CRYPTO_RNG - select CRYPTO_FIPS help This option enables the generic pseudo random number generator for cryptographic modules. Uses the Algorithm specified in -- cgit v1.2.3 From ea4006576945195ed35824acfb4007ca7cb78b70 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 19 Jun 2009 20:37:00 +0800 Subject: crypto: tcrypt - Fix module return code when testing by name We should return 0/-ENOENT instead of 1/0 when testing by name. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index dfeec0c544cb..a890a6792c7b 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -889,7 +889,7 @@ static int do_test(int m) static int do_alg_test(const char *alg, u32 type) { - return crypto_has_alg(alg, type, CRYPTO_ALG_TYPE_MASK); + return crypto_has_alg(alg, type, CRYPTO_ALG_TYPE_MASK) ? 0 : -ENOENT; } static int __init tcrypt_mod_init(void) -- cgit v1.2.3 From 215ccd6f55a2144bd553e0a3d12e1386f02309fd Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Sun, 21 Jun 2009 21:38:03 +0800 Subject: crypto: fips - Select CPRNG The ANSI CPRNG has no dependence on FIPS support. FIPS support however, requires the use of the CPRNG. Adjust that depedency relationship in Kconfig. Signed-off-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 03ef1a986d8b..f2002d8e5f67 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -23,6 +23,7 @@ comment "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" + select CRYPTO_ANSI_CPRNG help This options enables the fips boot option which is required if you want to system to operate in a FIPS 200 -- cgit v1.2.3 From 259c5e05c13daaaea039b5bf29a2674701cfd68e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 24 Jun 2009 13:48:13 +0800 Subject: crypto: testmgr - Remove hash size check Until hash test vectors grow longer than 256 bytes, the only purpose of the check is to generate a gcc warning. Signed-off-by: Herbert Xu --- crypto/testmgr.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index e9e9d84293b9..f9bea9d989fa 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -190,10 +190,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template, hash_buff = xbuf[0]; - ret = -EINVAL; - if (WARN_ON(template[i].psize > PAGE_SIZE)) - goto out; - memcpy(hash_buff, template[i].plaintext, template[i].psize); sg_init_one(&sg[0], hash_buff, template[i].psize); -- cgit v1.2.3 From 435578aeaad5859dda8657e3ed2c1a5bc1e524ec Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 25 Jun 2009 14:46:31 +0800 Subject: crypto: skcipher - Fix request for sync algorithms When a sync givcipher algorithm is requested, if an async version of the same algorithm already exists, then we will loop forever without ever constructing the sync version based on a blkcipher. This is because we did not include the requested type/mask when getting a larval for the geniv algorithm that is to be constructed. Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index e11ce37c7104..43fc8fb9f978 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -201,8 +201,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) int err; larval = crypto_larval_lookup(alg->cra_driver_name, + (type & ~CRYPTO_ALG_TYPE_MASK) | CRYPTO_ALG_TYPE_GIVCIPHER, - CRYPTO_ALG_TYPE_MASK); + mask | CRYPTO_ALG_TYPE_MASK); err = PTR_ERR(larval); if (IS_ERR(larval)) goto out; -- cgit v1.2.3 From 0b67fb65d1b2ba1396de69112b8b9bc95d8d5feb Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 25 Jun 2009 18:43:48 +0800 Subject: crypto: skcipher - Change default sync geniv on SMP to eseqiv As it stands we use chainiv for sync algorithms and eseqiv for async algorithms. However, when there is more than one CPU chainiv forces all processing to be serialised which is usually not what you want. Also, the added overhead of eseqiv isn't that great. Therefore this patch changes the default sync geniv on SMP machines to eseqiv. For the odd situation where the overhead is unacceptable then chainiv is still available as an option. Note that on UP machines chainiv is still preferred over eseqiv for sync algorithms. Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 43fc8fb9f978..03fb5facf0b4 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -14,6 +14,7 @@ */ #include +#include #include #include #include @@ -25,6 +26,8 @@ #include "internal.h" +static const char *skcipher_default_geniv __read_mostly; + static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { @@ -180,7 +183,8 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type); const char *crypto_default_geniv(const struct crypto_alg *alg) { - return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv"; + return alg->cra_flags & CRYPTO_ALG_ASYNC ? + "eseqiv" : skcipher_default_geniv; } static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) @@ -361,3 +365,17 @@ err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); + +static int __init skcipher_module_init(void) +{ + skcipher_default_geniv = num_possible_cpus() > 1 ? + "eseqiv" : "chainiv"; + return 0; +} + +static void skcipher_module_exit(void) +{ +} + +module_init(skcipher_module_init); +module_exit(skcipher_module_exit); -- cgit v1.2.3 From a68f6610d4f1ebe61818f5926fa8fa9e75d06a95 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 2 Jul 2009 16:32:12 +0800 Subject: crypto: testmgr - Allow implementation-specific tests This patch adds the support for testing specific implementations. This should only be used in very specific situations. Right now this means specific implementations of random number generators. Signed-off-by: Herbert Xu --- crypto/testmgr.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f9bea9d989fa..29b228d9b1a2 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2344,6 +2344,7 @@ static int alg_find_test(const char *alg) int alg_test(const char *driver, const char *alg, u32 type, u32 mask) { int i; + int j; int rc; if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { @@ -2365,14 +2366,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) } i = alg_find_test(alg); - if (i < 0) + j = alg_find_test(driver); + if (i < 0 && j < 0) goto notest; - if (fips_enabled && !alg_test_descs[i].fips_allowed) + if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) || + (j >= 0 && !alg_test_descs[j].fips_allowed))) goto non_fips_alg; - rc = alg_test_descs[i].test(alg_test_descs + i, driver, - type, mask); + rc = 0; + if (i >= 0) + rc |= alg_test_descs[i].test(alg_test_descs + i, driver, + type, mask); + if (j >= 0) + rc |= alg_test_descs[j].test(alg_test_descs + j, driver, + type, mask); + test_done: if (fips_enabled && rc) panic("%s: %s alg self test failed in fips mode!\n", driver, alg); -- cgit v1.2.3 From ed94070058033a3c99fe62c90d00c39dc443c679 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 3 Jul 2009 12:09:41 +0800 Subject: crypto: ansi_prng - Use just a BH lock The current code uses a mix of sping_lock() & spin_lock_irqsave(). This can lead to deadlock with the correct timming & cprng_get_random() + cprng_reset() sequence. I've converted them to bottom half locks since all three user grab just a BH lock so this runs probably in softirq :) Signed-off-by: Sebastian Andrzej Siewior Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/ansi_cprng.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index d80ed4c1e009..ff00b58c2cdc 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -187,7 +187,6 @@ static int _get_more_prng_bytes(struct prng_context *ctx) /* Our exported functions */ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) { - unsigned long flags; unsigned char *ptr = buf; unsigned int byte_count = (unsigned int)nbytes; int err; @@ -196,7 +195,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) if (nbytes < 0) return -EINVAL; - spin_lock_irqsave(&ctx->prng_lock, flags); + spin_lock_bh(&ctx->prng_lock); err = -EINVAL; if (ctx->flags & PRNG_NEED_RESET) @@ -268,7 +267,7 @@ empty_rbuf: goto remainder; done: - spin_unlock_irqrestore(&ctx->prng_lock, flags); + spin_unlock_bh(&ctx->prng_lock); dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", err, ctx); return err; @@ -287,7 +286,7 @@ static int reset_prng_context(struct prng_context *ctx, int rc = -EINVAL; unsigned char *prng_key; - spin_lock(&ctx->prng_lock); + spin_lock_bh(&ctx->prng_lock); ctx->flags |= PRNG_NEED_RESET; prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; @@ -332,7 +331,7 @@ static int reset_prng_context(struct prng_context *ctx, rc = 0; ctx->flags &= ~PRNG_NEED_RESET; out: - spin_unlock(&ctx->prng_lock); + spin_unlock_bh(&ctx->prng_lock); return rc; -- cgit v1.2.3 From fd09d7facb7cf3a884979eb5f843338ce1ce9b43 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 3 Jul 2009 12:10:47 +0800 Subject: crypto: ansi_prng - alloc cipher just in init As reported by Eric Sesterhenn the re-allocation of the cipher in reset leads to: |BUG: sleeping function called from invalid context at kernel/rwsem.c:21 |in_atomic(): 1, irqs_disabled(): 0, pid: 4926, name: modprobe |INFO: lockdep is turned off. |Pid: 4926, comm: modprobe Tainted: G M 2.6.31-rc1-22297-g5298976 #24 |Call Trace: | [] __might_sleep+0xf9/0x101 | [] down_read+0x16/0x68 | [] crypto_alg_lookup+0x16/0x34 | [] crypto_larval_lookup+0x30/0xf9 | [] crypto_alg_mod_lookup+0x1d/0x62 | [] crypto_alloc_base+0x1e/0x64 | [] reset_prng_context+0xab/0x13f | [] ? __spin_lock_init+0x27/0x51 | [] cprng_init+0x2a/0x42 | [] __crypto_alloc_tfm+0xfa/0x128 | [] crypto_alloc_base+0x33/0x64 | [] alg_test_cprng+0x30/0x1f4 | [] alg_test+0x12f/0x19f | [] ? __alloc_pages_nodemask+0x14d/0x481 | [] do_test+0xf9d/0x163f [tcrypt] | [] do_test+0x3a1/0x163f [tcrypt] | [] tcrypt_mod_init+0x35/0x7c [tcrypt] | [] _stext+0x54/0x12c | [] ? tcrypt_mod_init+0x0/0x7c [tcrypt] | [] ? up_read+0x16/0x2b | [] ? __blocking_notifier_call_chain+0x40/0x4c | [] sys_init_module+0xa9/0x1bf | [] sysenter_do_call+0x12/0x32 because a spin lock is held and crypto_alloc_base() may sleep. There is no reason to re-allocate the cipher, the state is resetted in ->setkey(). This patches makes the cipher allocation a one time thing and moves it to init. Reported-by: Eric Sesterhenn Signed-off-by: Sebastian Andrzej Siewior Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/ansi_cprng.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) (limited to 'crypto') diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index ff00b58c2cdc..5357ba7d821a 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -283,7 +283,6 @@ static int reset_prng_context(struct prng_context *ctx, unsigned char *V, unsigned char *DT) { int ret; - int rc = -EINVAL; unsigned char *prng_key; spin_lock_bh(&ctx->prng_lock); @@ -307,34 +306,20 @@ static int reset_prng_context(struct prng_context *ctx, memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); - if (ctx->tfm) - crypto_free_cipher(ctx->tfm); - - ctx->tfm = crypto_alloc_cipher("aes", 0, 0); - if (IS_ERR(ctx->tfm)) { - dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", - ctx); - ctx->tfm = NULL; - goto out; - } - ctx->rand_data_valid = DEFAULT_BLK_SZ; ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); if (ret) { dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", crypto_cipher_get_flags(ctx->tfm)); - crypto_free_cipher(ctx->tfm); goto out; } - rc = 0; + ret = 0; ctx->flags &= ~PRNG_NEED_RESET; out: spin_unlock_bh(&ctx->prng_lock); - - return rc; - + return ret; } static int cprng_init(struct crypto_tfm *tfm) @@ -342,6 +327,12 @@ static int cprng_init(struct crypto_tfm *tfm) struct prng_context *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->prng_lock); + ctx->tfm = crypto_alloc_cipher("aes", 0, 0); + if (IS_ERR(ctx->tfm)) { + dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", + ctx); + return PTR_ERR(ctx->tfm); + } if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) return -EINVAL; -- cgit v1.2.3 From f2ac72e8268d9559c3114d5a22679f91f80a2238 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 7 Jul 2009 12:30:33 +0800 Subject: crypto: api - Add new template create function This patch introduces the template->create function intended to replace the existing alloc function. The intention is for create to handle the registration directly, whereas currently the caller of alloc has to handle the registration. This allows type-specific code to be run prior to registration. Signed-off-by: Herbert Xu --- crypto/algboss.c | 5 +++++ include/crypto/algapi.h | 1 + 2 files changed, 6 insertions(+) (limited to 'crypto') diff --git a/crypto/algboss.c b/crypto/algboss.c index 9908dd830c26..412241ce4cfa 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -68,6 +68,11 @@ static int cryptomgr_probe(void *data) goto err; do { + if (tmpl->create) { + err = tmpl->create(tmpl, param->tb); + continue; + } + inst = tmpl->alloc(param->tb); if (IS_ERR(inst)) err = PTR_ERR(inst); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 010545436efa..ce010a346420 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -52,6 +52,7 @@ struct crypto_template { struct crypto_instance *(*alloc)(struct rtattr **tb); void (*free)(struct crypto_instance *inst); + int (*create)(struct crypto_template *tmpl, struct rtattr **tb); char name[CRYPTO_MAX_ALG_NAME]; }; -- cgit v1.2.3 From 70ec7bb91ad0d6cce84c8e17f8cbb608dda7b18c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 7 Jul 2009 14:07:37 +0800 Subject: crypto: api - Add crypto_alloc_instance2 This patch adds a new argument to crypto_alloc_instance which sets aside some space before the instance for use by algorithms such as shash that place type-specific data before crypto_alg. For compatibility the function has been renamed so that existing users aren't affected. Signed-off-by: Herbert Xu --- crypto/algapi.c | 37 +++++++++++++++++++++++++++++++------ include/crypto/algapi.h | 2 ++ 2 files changed, 33 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 56c62e2858d5..429f1a003b06 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -627,17 +627,20 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num) } EXPORT_SYMBOL_GPL(crypto_attr_u32); -struct crypto_instance *crypto_alloc_instance(const char *name, - struct crypto_alg *alg) +void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, + unsigned int head) { struct crypto_instance *inst; - struct crypto_spawn *spawn; + char *p; int err; - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) + p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn), + GFP_KERNEL); + if (!p) return ERR_PTR(-ENOMEM); + inst = (void *)(p + head); + err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, alg->cra_name) >= CRYPTO_MAX_ALG_NAME) @@ -647,6 +650,25 @@ struct crypto_instance *crypto_alloc_instance(const char *name, name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; + return p; + +err_free_inst: + kfree(p); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(crypto_alloc_instance2); + +struct crypto_instance *crypto_alloc_instance(const char *name, + struct crypto_alg *alg) +{ + struct crypto_instance *inst; + struct crypto_spawn *spawn; + int err; + + inst = crypto_alloc_instance2(name, alg, 0); + if (IS_ERR(inst)) + goto out; + spawn = crypto_instance_ctx(inst); err = crypto_init_spawn(spawn, alg, inst, CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); @@ -658,7 +680,10 @@ struct crypto_instance *crypto_alloc_instance(const char *name, err_free_inst: kfree(inst); - return ERR_PTR(err); + inst = ERR_PTR(err); + +out: + return inst; } EXPORT_SYMBOL_GPL(crypto_alloc_instance); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index ce010a346420..99bb29723130 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -132,6 +132,8 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type); const char *crypto_attr_alg_name(struct rtattr *rta); struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask); int crypto_attr_u32(struct rtattr *rta, u32 *num); +void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, + unsigned int head); struct crypto_instance *crypto_alloc_instance(const char *name, struct crypto_alg *alg); -- cgit v1.2.3 From 2e4fddd8e420e8f531a34e7a97f9cdb851a6ad13 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 7 Jul 2009 15:17:12 +0800 Subject: crypto: shash - Add shash_instance This patch adds shash_instance and the associated alloc/free functions. This is meant to be an instance that with a shash algorithm under it. Note that the instance itself doesn't have to be shash. Signed-off-by: Herbert Xu --- crypto/shash.c | 7 +++++++ include/crypto/internal/hash.h | 26 ++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 2ccc8b0076ce..8f9d8831e293 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -502,5 +502,12 @@ int crypto_unregister_shash(struct shash_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_shash); +void shash_free_instance(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(shash_instance(inst)); +} +EXPORT_SYMBOL_GPL(shash_free_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Synchronous cryptographic hash type"); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 82b70564bcab..44d3bcdce6e2 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -34,6 +34,10 @@ struct crypto_hash_walk { unsigned int flags; }; +struct shash_instance { + struct shash_alg alg; +}; + extern const struct crypto_type crypto_ahash_type; int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); @@ -46,6 +50,8 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, int crypto_register_shash(struct shash_alg *alg); int crypto_unregister_shash(struct shash_alg *alg); +void shash_free_instance(struct crypto_instance *inst); + static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) { return crypto_tfm_ctx(&tfm->base); @@ -80,5 +86,25 @@ static inline void *crypto_shash_ctx(struct crypto_shash *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline struct crypto_instance *shash_crypto_instance( + struct shash_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct shash_instance *shash_instance( + struct crypto_instance *inst) +{ + return container_of(__crypto_shash_alg(&inst->alg), + struct shash_instance, alg); +} + +static inline struct shash_instance *shash_alloc_instance( + const char *name, struct crypto_alg *alg) +{ + return crypto_alloc_instance2(name, alg, + sizeof(struct shash_alg) - sizeof(*alg)); +} + #endif /* _CRYPTO_INTERNAL_HASH_H */ -- cgit v1.2.3 From 97eedce1a64a57648ac5e39f03825528c47ba72e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 8 Jul 2009 15:55:52 +0800 Subject: crypto: api - Add new style spawn support This patch modifies the spawn infrastructure to support new style algorithms like shash. In particular, this means storing the frontend type in the spawn and using crypto_create_tfm to allocate the tfm. Signed-off-by: Herbert Xu --- crypto/algapi.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++--- include/crypto/algapi.h | 6 ++++++ 2 files changed, 58 insertions(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 429f1a003b06..17a5fff8f777 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -488,6 +488,23 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, } EXPORT_SYMBOL_GPL(crypto_init_spawn); +int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst, + const struct crypto_type *frontend) +{ + int err = -EINVAL; + + if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset) + goto out; + + spawn->frontend = frontend; + err = crypto_init_spawn(spawn, alg, inst, frontend->maskset); + +out: + return err; +} +EXPORT_SYMBOL_GPL(crypto_init_spawn2); + void crypto_drop_spawn(struct crypto_spawn *spawn) { down_write(&crypto_alg_sem); @@ -496,12 +513,10 @@ void crypto_drop_spawn(struct crypto_spawn *spawn) } EXPORT_SYMBOL_GPL(crypto_drop_spawn); -struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, - u32 mask) +static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) { struct crypto_alg *alg; struct crypto_alg *alg2; - struct crypto_tfm *tfm; down_read(&crypto_alg_sem); alg = spawn->alg; @@ -516,6 +531,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, return ERR_PTR(-EAGAIN); } + return alg; +} + +struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, + u32 mask) +{ + struct crypto_alg *alg; + struct crypto_tfm *tfm; + + alg = crypto_spawn_alg(spawn); + if (IS_ERR(alg)) + return ERR_CAST(alg); + tfm = ERR_PTR(-EINVAL); if (unlikely((alg->cra_flags ^ type) & mask)) goto out_put_alg; @@ -532,6 +560,27 @@ out_put_alg: } EXPORT_SYMBOL_GPL(crypto_spawn_tfm); +void *crypto_spawn_tfm2(struct crypto_spawn *spawn) +{ + struct crypto_alg *alg; + struct crypto_tfm *tfm; + + alg = crypto_spawn_alg(spawn); + if (IS_ERR(alg)) + return ERR_CAST(alg); + + tfm = crypto_create_tfm(alg, spawn->frontend); + if (IS_ERR(tfm)) + goto out_put_alg; + + return tfm; + +out_put_alg: + crypto_mod_put(alg); + return tfm; +} +EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); + int crypto_register_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&crypto_chain, nb); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 99bb29723130..c9bff92a9e0e 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -61,6 +61,7 @@ struct crypto_spawn { struct list_head list; struct crypto_alg *alg; struct crypto_instance *inst; + const struct crypto_type *frontend; u32 mask; }; @@ -117,9 +118,14 @@ struct crypto_template *crypto_lookup_template(const char *name); int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, struct crypto_instance *inst, u32 mask); +int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst, + const struct crypto_type *frontend); + void crypto_drop_spawn(struct crypto_spawn *spawn); struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, u32 mask); +void *crypto_spawn_tfm2(struct crypto_spawn *spawn); static inline void crypto_set_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst) -- cgit v1.2.3 From 942969992d86330c9700e2cd9afe8a6bea42df78 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 8 Jul 2009 17:21:37 +0800 Subject: crypto: shash - Add spawn support This patch adds the functions needed to create and use shash spawns, i.e., to use shash algorithms in a template. Signed-off-by: Herbert Xu --- crypto/shash.c | 9 +++++++++ include/crypto/internal/hash.h | 14 ++++++++++++++ 2 files changed, 23 insertions(+) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 8f9d8831e293..97f6c8ba103a 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -509,5 +509,14 @@ void shash_free_instance(struct crypto_instance *inst) } EXPORT_SYMBOL_GPL(shash_free_instance); +int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, + struct shash_alg *alg, + struct crypto_instance *inst) +{ + return crypto_init_spawn2(&spawn->base, &alg->base, inst, + &crypto_shash_type); +} +EXPORT_SYMBOL_GPL(crypto_init_shash_spawn); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Synchronous cryptographic hash type"); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 44d3bcdce6e2..6cc824b79331 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -38,6 +38,10 @@ struct shash_instance { struct shash_alg alg; }; +struct crypto_shash_spawn { + struct crypto_spawn base; +}; + extern const struct crypto_type crypto_ahash_type; int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); @@ -52,6 +56,10 @@ int crypto_unregister_shash(struct shash_alg *alg); void shash_free_instance(struct crypto_instance *inst); +int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, + struct shash_alg *alg, + struct crypto_instance *inst); + static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) { return crypto_tfm_ctx(&tfm->base); @@ -106,5 +114,11 @@ static inline struct shash_instance *shash_alloc_instance( sizeof(struct shash_alg) - sizeof(*alg)); } +static inline struct crypto_shash *crypto_spawn_shash( + struct crypto_shash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + #endif /* _CRYPTO_INTERNAL_HASH_H */ -- cgit v1.2.3 From d06854f0243d91badabaab14503f7f3bb770061d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 8 Jul 2009 17:53:16 +0800 Subject: crypto: api - Add crypto_attr_alg2 helper This patch adds the helper crypto_attr_alg2 which is similar to crypto_attr_alg but takes an extra frontend argument. This is intended to be used by new style algorithm types such as shash. Signed-off-by: Herbert Xu --- crypto/algapi.c | 8 +++++--- crypto/api.c | 31 ++++++++++++++++++++++--------- crypto/internal.h | 3 +++ include/crypto/algapi.h | 11 ++++++++++- 4 files changed, 40 insertions(+), 13 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 17a5fff8f777..2154815201a7 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -644,7 +644,9 @@ const char *crypto_attr_alg_name(struct rtattr *rta) } EXPORT_SYMBOL_GPL(crypto_attr_alg_name); -struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) +struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, + const struct crypto_type *frontend, + u32 type, u32 mask) { const char *name; int err; @@ -654,9 +656,9 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) if (IS_ERR(name)) return ERR_PTR(err); - return crypto_alg_mod_lookup(name, type, mask); + return crypto_find_alg(name, frontend, type, mask); } -EXPORT_SYMBOL_GPL(crypto_attr_alg); +EXPORT_SYMBOL_GPL(crypto_attr_alg2); int crypto_attr_u32(struct rtattr *rta, u32 *num) { diff --git a/crypto/api.c b/crypto/api.c index d5944f92b416..ba81221b3bef 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -503,6 +503,27 @@ out: } EXPORT_SYMBOL_GPL(crypto_create_tfm); +struct crypto_alg *crypto_find_alg(const char *alg_name, + const struct crypto_type *frontend, + u32 type, u32 mask) +{ + struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) = + crypto_alg_mod_lookup; + + if (frontend) { + type &= frontend->maskclear; + mask &= frontend->maskclear; + type |= frontend->type; + mask |= frontend->maskset; + + if (frontend->lookup) + lookup = frontend->lookup; + } + + return lookup(alg_name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_find_alg); + /* * crypto_alloc_tfm - Locate algorithm and allocate transform * @alg_name: Name of algorithm @@ -526,21 +547,13 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm); void *crypto_alloc_tfm(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask) { - struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); void *tfm; int err; - type &= frontend->maskclear; - mask &= frontend->maskclear; - type |= frontend->type; - mask |= frontend->maskset; - - lookup = frontend->lookup ?: crypto_alg_mod_lookup; - for (;;) { struct crypto_alg *alg; - alg = lookup(alg_name, type, mask); + alg = crypto_find_alg(alg_name, frontend, type, mask); if (IS_ERR(alg)) { err = PTR_ERR(alg); goto err; diff --git a/crypto/internal.h b/crypto/internal.h index 95baaea21fbc..7efa4d0533ff 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -106,6 +106,9 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend); +struct crypto_alg *crypto_find_alg(const char *alg_name, + const struct crypto_type *frontend, + u32 type, u32 mask); void *crypto_alloc_tfm(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index c9bff92a9e0e..1d15a926041e 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -136,7 +136,16 @@ static inline void crypto_set_spawn(struct crypto_spawn *spawn, struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type); const char *crypto_attr_alg_name(struct rtattr *rta); -struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask); +struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, + const struct crypto_type *frontend, + u32 type, u32 mask); + +static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, + u32 type, u32 mask) +{ + return crypto_attr_alg2(rta, NULL, type, mask); +} + int crypto_attr_u32(struct rtattr *rta, u32 *num); void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, unsigned int head); -- cgit v1.2.3 From 7d6f56400a695af497a8b7c23ea0ff9c3d9d99f4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 8 Jul 2009 17:56:28 +0800 Subject: crypto: shash - Add shash_attr_alg2 helper This patch adds the helper shash_attr_alg2 which locates a shash algorithm based on the information in the given attribute. Signed-off-by: Herbert Xu --- crypto/shash.c | 10 ++++++++++ include/crypto/internal/hash.h | 2 ++ 2 files changed, 12 insertions(+) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 97f6c8ba103a..21bcff6be5b0 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -518,5 +518,15 @@ int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, } EXPORT_SYMBOL_GPL(crypto_init_shash_spawn); +struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask) +{ + struct crypto_alg *alg; + + alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask); + return IS_ERR(alg) ? ERR_CAST(alg) : + container_of(alg, struct shash_alg, base); +} +EXPORT_SYMBOL_GPL(shash_attr_alg); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Synchronous cryptographic hash type"); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 6cc824b79331..e2ab35a74c62 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -60,6 +60,8 @@ int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, struct shash_alg *alg, struct crypto_instance *inst); +struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); + static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) { return crypto_tfm_ctx(&tfm->base); -- cgit v1.2.3 From 619a6ebd2547f3a8ec2fbc5245daaa1f2056eb32 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 8 Jul 2009 18:46:23 +0800 Subject: crypto: shash - Add shash_register_instance This patch adds shash_register_instance so that shash instances can be registered without bypassing the shash checks applied to normal algorithms. Signed-off-by: Herbert Xu --- crypto/shash.c | 26 +++++++++++++++++++++++++- include/crypto/internal/hash.h | 3 ++- 2 files changed, 27 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 21bcff6be5b0..3d242425d692 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -480,7 +480,7 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_shash); -int crypto_register_shash(struct shash_alg *alg) +static int shash_prepare_alg(struct shash_alg *alg) { struct crypto_alg *base = &alg->base; @@ -491,6 +491,17 @@ int crypto_register_shash(struct shash_alg *alg) base->cra_type = &crypto_shash_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + return 0; +} + +int crypto_register_shash(struct shash_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = shash_prepare_alg(alg); + if (err) + return err; return crypto_register_alg(base); } @@ -502,6 +513,19 @@ int crypto_unregister_shash(struct shash_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_shash); +int shash_register_instance(struct crypto_template *tmpl, + struct shash_instance *inst) +{ + int err; + + err = shash_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, shash_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(shash_register_instance); + void shash_free_instance(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index e2ab35a74c62..fa5c9fb7ce5a 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -53,7 +53,8 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, int crypto_register_shash(struct shash_alg *alg); int crypto_unregister_shash(struct shash_alg *alg); - +int shash_register_instance(struct crypto_template *tmpl, + struct shash_instance *inst); void shash_free_instance(struct crypto_instance *inst); int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, -- cgit v1.2.3 From f88ad8de282a9c4afd79fb0b4b536025f24a2005 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 8 Jul 2009 23:32:08 +0800 Subject: crypto: shash - Use finup in default digest This patch simplifies the default digest function by using finup. Signed-off-by: Herbert Xu --- crypto/shash.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 3d242425d692..783231090005 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -154,8 +154,7 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return crypto_shash_init(desc) ?: - crypto_shash_update(desc, data, len) ?: - crypto_shash_final(desc, out); + crypto_shash_finup(desc, data, len, out); } int crypto_shash_digest(struct shash_desc *desc, const u8 *data, -- cgit v1.2.3 From deee2289b932d512035f579b8f8e178796564ba1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 8 Jul 2009 23:39:01 +0800 Subject: crypto: shash - Propagate reinit return value This patch fixes crypto_shash_import to propagate the value returned by reinit. Signed-off-by: Herbert Xu --- crypto/shash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 783231090005..23e05a1e9038 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -180,7 +180,7 @@ int crypto_shash_import(struct shash_desc *desc, const u8 *in) memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); if (alg->reinit) - alg->reinit(desc); + return alg->reinit(desc); return 0; } -- cgit v1.2.3 From 7ede5a5ba55a696a6e1d8456526e44635e966a81 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 9 Jul 2009 11:34:06 +0800 Subject: crypto: api - Fix crypto_drop_spawn crash on blank spawns This patch allows crypto_drop_spawn to be called on spawns that have not been initialised or have failed initialisation. This fixes potential crashes during initialisation without adding special case code. Signed-off-by: Herbert Xu --- crypto/algapi.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 2154815201a7..6a98076d9d2a 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -507,6 +507,9 @@ EXPORT_SYMBOL_GPL(crypto_init_spawn2); void crypto_drop_spawn(struct crypto_spawn *spawn) { + if (!spawn->alg) + return; + down_write(&crypto_alg_sem); list_del(&spawn->list); up_write(&crypto_alg_sem); -- cgit v1.2.3 From 99d27e1c59e34869605de625b033c52163f5bfa7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 9 Jul 2009 20:30:57 +0800 Subject: crypto: shash - Export/import hash state only This patch replaces the full descriptor export with an export of the partial hash state. This allows the use of a consistent export format across all implementations of a given algorithm. This is useful because a number of cases require the use of the partial hash state, e.g., PadLock can use the SHA1 hash state to get around the fact that it can only hash contiguous data chunks. Signed-off-by: Herbert Xu --- crypto/shash.c | 25 ++++++++++++++----------- include/crypto/hash.h | 18 ++++++++++++++---- 2 files changed, 28 insertions(+), 15 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 23e05a1e9038..14a3b707a31f 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -172,19 +172,15 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_digest); -int crypto_shash_import(struct shash_desc *desc, const u8 *in) +static int shash_no_export(struct shash_desc *desc, void *out) { - struct crypto_shash *tfm = desc->tfm; - struct shash_alg *alg = crypto_shash_alg(tfm); - - memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); - - if (alg->reinit) - return alg->reinit(desc); + return -ENOSYS; +} - return 0; +static int shash_no_import(struct shash_desc *desc, const void *in) +{ + return -ENOSYS; } -EXPORT_SYMBOL_GPL(crypto_shash_import); static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) @@ -484,12 +480,19 @@ static int shash_prepare_alg(struct shash_alg *alg) struct crypto_alg *base = &alg->base; if (alg->digestsize > PAGE_SIZE / 8 || - alg->descsize > PAGE_SIZE / 8) + alg->descsize > PAGE_SIZE / 8 || + alg->statesize > PAGE_SIZE / 8) return -EINVAL; base->cra_type = &crypto_shash_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + + if (!alg->import) + alg->import = shash_no_import; + if (!alg->export) + alg->export = shash_no_export; + return 0; } diff --git a/include/crypto/hash.h b/include/crypto/hash.h index d56bb71617c3..3c4cce6a425c 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -24,7 +24,6 @@ struct shash_desc { struct shash_alg { int (*init)(struct shash_desc *desc); - int (*reinit)(struct shash_desc *desc); int (*update)(struct shash_desc *desc, const u8 *data, unsigned int len); int (*final)(struct shash_desc *desc, u8 *out); @@ -32,11 +31,14 @@ struct shash_alg { unsigned int len, u8 *out); int (*digest)(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); + int (*export)(struct shash_desc *desc, void *out); + int (*import)(struct shash_desc *desc, const void *in); int (*setkey)(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); unsigned int descsize; unsigned int digestsize; + unsigned int statesize; struct crypto_alg base; }; @@ -251,6 +253,11 @@ static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) return crypto_shash_alg(tfm)->digestsize; } +static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->statesize; +} + static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) { return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); @@ -281,12 +288,15 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); -static inline void crypto_shash_export(struct shash_desc *desc, u8 *out) +static inline int crypto_shash_export(struct shash_desc *desc, void *out) { - memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); + return crypto_shash_alg(desc->tfm)->export(desc, out); } -int crypto_shash_import(struct shash_desc *desc, const u8 *in); +static inline int crypto_shash_import(struct shash_desc *desc, const void *in) +{ + return crypto_shash_alg(desc->tfm)->import(desc, in); +} static inline int crypto_shash_init(struct shash_desc *desc) { -- cgit v1.2.3 From 8267adab9433593adb09d94626475c2a5921f111 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 9 Jul 2009 20:36:44 +0800 Subject: crypto: shash - Move finup/digest null checks to registration time This patch moves the run-time null finup/digest checks to the shash_prepare_alg function which is run at registration time. Signed-off-by: Herbert Xu --- crypto/shash.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 14a3b707a31f..f7fcc652ff32 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -142,8 +142,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); - if (((unsigned long)data | (unsigned long)out) & alignmask || - !shash->finup) + if (((unsigned long)data | (unsigned long)out) & alignmask) return shash_finup_unaligned(desc, data, len, out); return shash->finup(desc, data, len, out); @@ -164,8 +163,7 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); - if (((unsigned long)data | (unsigned long)out) & alignmask || - !shash->digest) + if (((unsigned long)data | (unsigned long)out) & alignmask) return shash_digest_unaligned(desc, data, len, out); return shash->digest(desc, data, len, out); @@ -488,6 +486,10 @@ static int shash_prepare_alg(struct shash_alg *alg) base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + if (!alg->finup) + alg->finup = shash_finup_unaligned; + if (!alg->digest) + alg->digest = shash_digest_unaligned; if (!alg->import) alg->import = shash_no_import; if (!alg->export) -- cgit v1.2.3 From e2a7ce4e185a94462698cc0e5192495ee3d22a2f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 9 Jul 2009 21:27:13 +0800 Subject: crypto: sha1_generic - Add export/import support This patch adds export/import support to sha1_generic. The exported type is defined by struct sha1_state, which is basically the entire descriptor state of sha1_generic. Signed-off-by: Herbert Xu --- crypto/sha1_generic.c | 41 +++++++++++++++++++++++++---------------- include/crypto/sha.h | 8 ++++++++ 2 files changed, 33 insertions(+), 16 deletions(-) (limited to 'crypto') diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 9efef20454cb..0416091bf45a 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -25,31 +25,21 @@ #include #include -struct sha1_ctx { - u64 count; - u32 state[5]; - u8 buffer[64]; -}; - static int sha1_init(struct shash_desc *desc) { - struct sha1_ctx *sctx = shash_desc_ctx(desc); + struct sha1_state *sctx = shash_desc_ctx(desc); - static const struct sha1_ctx initstate = { - 0, - { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, - { 0, } + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, }; - *sctx = initstate; - return 0; } static int sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha1_ctx *sctx = shash_desc_ctx(desc); + struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int partial, done; const u8 *src; @@ -85,7 +75,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, /* Add padding and return the message digest. */ static int sha1_final(struct shash_desc *desc, u8 *out) { - struct sha1_ctx *sctx = shash_desc_ctx(desc); + struct sha1_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; u32 i, index, padlen; __be64 bits; @@ -111,12 +101,31 @@ static int sha1_final(struct shash_desc *desc, u8 *out) return 0; } +static int sha1_export(struct shash_desc *desc, void *out) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, sizeof(*sctx)); + return 0; +} + +static int sha1_import(struct shash_desc *desc, const void *in) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, sizeof(*sctx)); + return 0; +} + static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_init, .update = sha1_update, .final = sha1_final, - .descsize = sizeof(struct sha1_ctx), + .export = sha1_export, + .import = sha1_import, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-generic", diff --git a/include/crypto/sha.h b/include/crypto/sha.h index c0ccc2b1a2d8..922a248bd04d 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -5,6 +5,8 @@ #ifndef _CRYPTO_SHA_H #define _CRYPTO_SHA_H +#include + #define SHA1_DIGEST_SIZE 20 #define SHA1_BLOCK_SIZE 64 @@ -62,4 +64,10 @@ #define SHA512_H6 0x1f83d9abfb41bd6bULL #define SHA512_H7 0x5be0cd19137e2179ULL +struct sha1_state { + u64 count; + u32 state[SHA1_DIGEST_SIZE / 4]; + u8 buffer[SHA1_BLOCK_SIZE]; +}; + #endif -- cgit v1.2.3 From 3d4d277cf88e1980d905707b9c8ca61e8ad6bf0b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 10 Jul 2009 12:54:20 +0800 Subject: crypto: sha256_generic - Use 64-bit counter like sha1 This patch replaces the two 32-bit counter code in sha256_generic with the simpler 64-bit counter code from sha1. Signed-off-by: Herbert Xu --- crypto/sha256_generic.c | 65 +++++++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 35 deletions(-) (limited to 'crypto') diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 6349d8339d37..e58c71bdf333 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -26,7 +26,7 @@ #include struct sha256_ctx { - u32 count[2]; + u64 count; u32 state[8]; u8 buf[128]; }; @@ -231,8 +231,7 @@ static int sha224_init(struct shash_desc *desc) sctx->state[5] = SHA224_H5; sctx->state[6] = SHA224_H6; sctx->state[7] = SHA224_H7; - sctx->count[0] = 0; - sctx->count[1] = 0; + sctx->count = 0; return 0; } @@ -248,7 +247,7 @@ static int sha256_init(struct shash_desc *desc) sctx->state[5] = SHA256_H5; sctx->state[6] = SHA256_H6; sctx->state[7] = SHA256_H7; - sctx->count[0] = sctx->count[1] = 0; + sctx->count = 0; return 0; } @@ -257,33 +256,30 @@ static int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_ctx *sctx = shash_desc_ctx(desc); - unsigned int i, index, part_len; - - /* Compute number of bytes mod 128 */ - index = (unsigned int)((sctx->count[0] >> 3) & 0x3f); - - /* Update number of bits */ - if ((sctx->count[0] += (len << 3)) < (len << 3)) { - sctx->count[1]++; - sctx->count[1] += (len >> 29); + unsigned int partial, done; + const u8 *src; + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + + if ((partial + len) > 63) { + if (partial) { + done = -partial; + memcpy(sctx->buf + partial, data, done + 64); + src = sctx->buf; + } + + do { + sha256_transform(sctx->state, src); + done += 64; + src = data + done; + } while (done + 63 < len); + + partial = 0; } - - part_len = 64 - index; - - /* Transform as many times as possible. */ - if (len >= part_len) { - memcpy(&sctx->buf[index], data, part_len); - sha256_transform(sctx->state, sctx->buf); - - for (i = part_len; i + 63 < len; i += 64) - sha256_transform(sctx->state, &data[i]); - index = 0; - } else { - i = 0; - } - - /* Buffer remaining input */ - memcpy(&sctx->buf[index], &data[i], len-i); + memcpy(sctx->buf + partial, src, len - done); return 0; } @@ -292,22 +288,21 @@ static int sha256_final(struct shash_desc *desc, u8 *out) { struct sha256_ctx *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; - __be32 bits[2]; + __be64 bits; unsigned int index, pad_len; int i; static const u8 padding[64] = { 0x80, }; /* Save number of bits */ - bits[1] = cpu_to_be32(sctx->count[0]); - bits[0] = cpu_to_be32(sctx->count[1]); + bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64. */ - index = (sctx->count[0] >> 3) & 0x3f; + index = sctx->count & 0x3f; pad_len = (index < 56) ? (56 - index) : ((64+56) - index); sha256_update(desc, padding, pad_len); /* Append length (before padding) */ - sha256_update(desc, (const u8 *)bits, sizeof(bits)); + sha256_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 8; i++) -- cgit v1.2.3 From 9b2fda7b94a769af13c24582739e50664b0e27a8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 10 Jul 2009 13:00:27 +0800 Subject: crypto: sha256_generic - Add export/import support This patch adds export/import support to sha256_generic. The exported type is defined by struct sha256_state, which is basically the entire descriptor state of sha256_generic. Signed-off-by: Herbert Xu --- crypto/sha256_generic.c | 37 +++++++++++++++++++++++++------------ include/crypto/sha.h | 6 ++++++ 2 files changed, 31 insertions(+), 12 deletions(-) (limited to 'crypto') diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index e58c71bdf333..c48459ebf05b 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -25,12 +25,6 @@ #include #include -struct sha256_ctx { - u64 count; - u32 state[8]; - u8 buf[128]; -}; - static inline u32 Ch(u32 x, u32 y, u32 z) { return z ^ (x & (y ^ z)); @@ -222,7 +216,7 @@ static void sha256_transform(u32 *state, const u8 *input) static int sha224_init(struct shash_desc *desc) { - struct sha256_ctx *sctx = shash_desc_ctx(desc); + struct sha256_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA224_H0; sctx->state[1] = SHA224_H1; sctx->state[2] = SHA224_H2; @@ -238,7 +232,7 @@ static int sha224_init(struct shash_desc *desc) static int sha256_init(struct shash_desc *desc) { - struct sha256_ctx *sctx = shash_desc_ctx(desc); + struct sha256_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA256_H0; sctx->state[1] = SHA256_H1; sctx->state[2] = SHA256_H2; @@ -255,7 +249,7 @@ static int sha256_init(struct shash_desc *desc) static int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha256_ctx *sctx = shash_desc_ctx(desc); + struct sha256_state *sctx = shash_desc_ctx(desc); unsigned int partial, done; const u8 *src; @@ -286,7 +280,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data, static int sha256_final(struct shash_desc *desc, u8 *out) { - struct sha256_ctx *sctx = shash_desc_ctx(desc); + struct sha256_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; __be64 bits; unsigned int index, pad_len; @@ -326,12 +320,31 @@ static int sha224_final(struct shash_desc *desc, u8 *hash) return 0; } +static int sha256_export(struct shash_desc *desc, void *out) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, sizeof(*sctx)); + return 0; +} + +static int sha256_import(struct shash_desc *desc, const void *in) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, sizeof(*sctx)); + return 0; +} + static struct shash_alg sha256 = { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_init, .update = sha256_update, .final = sha256_final, - .descsize = sizeof(struct sha256_ctx), + .export = sha256_export, + .import = sha256_import, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name= "sha256-generic", @@ -346,7 +359,7 @@ static struct shash_alg sha224 = { .init = sha224_init, .update = sha256_update, .final = sha224_final, - .descsize = sizeof(struct sha256_ctx), + .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name= "sha224-generic", diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 922a248bd04d..88ef5eb9514d 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -70,4 +70,10 @@ struct sha1_state { u8 buffer[SHA1_BLOCK_SIZE]; }; +struct sha256_state { + u64 count; + u32 state[SHA256_DIGEST_SIZE / 4]; + u8 buf[SHA256_BLOCK_SIZE]; +}; + #endif -- cgit v1.2.3 From 57cfe44bccb0e38ddb44a34a42f517deef1f4e82 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 11 Jul 2009 22:17:39 +0800 Subject: crypto: shash - Move null setkey check to registration time This patch moves the run-time null setkey check to shash_prepare_alg just like we did for finup/digest. Signed-off-by: Herbert Xu --- crypto/shash.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index f7fcc652ff32..131e14d2b572 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -22,6 +22,12 @@ static const struct crypto_type crypto_shash_type; +static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + return -ENOSYS; +} + static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -50,9 +56,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); - if (!shash->setkey) - return -ENOSYS; - if ((unsigned long)key & alignmask) return shash_setkey_unaligned(tfm, key, keylen); @@ -494,6 +497,8 @@ static int shash_prepare_alg(struct shash_alg *alg) alg->import = shash_no_import; if (!alg->export) alg->export = shash_no_export; + if (!alg->setkey) + alg->setkey = shash_no_setkey; return 0; } -- cgit v1.2.3 From 113adefc73c291f93f875fe515a46d8f76252fff Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 12:50:12 +0800 Subject: crypto: shash - Make descsize a run-time attribute This patch changes descsize to a run-time attribute so that implementations can change it in their init functions. Signed-off-by: Herbert Xu --- crypto/shash.c | 39 ++++++++++++++++++++++++++++----------- include/crypto/hash.h | 3 ++- include/crypto/internal/hash.h | 2 +- 3 files changed, 31 insertions(+), 13 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 131e14d2b572..d8e0f8f8d672 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -300,14 +300,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen) { - struct shash_desc *desc = crypto_hash_ctx(tfm); + struct shash_desc **descp = crypto_hash_ctx(tfm); + struct shash_desc *desc = *descp; return crypto_shash_setkey(desc->tfm, key, keylen); } static int shash_compat_init(struct hash_desc *hdesc) { - struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); + struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); + struct shash_desc *desc = *descp; desc->flags = hdesc->flags; @@ -317,7 +319,8 @@ static int shash_compat_init(struct hash_desc *hdesc) static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, unsigned int len) { - struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); + struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); + struct shash_desc *desc = *descp; struct crypto_hash_walk walk; int nbytes; @@ -330,7 +333,9 @@ static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, static int shash_compat_final(struct hash_desc *hdesc, u8 *out) { - return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out); + struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); + + return crypto_shash_final(*descp, out); } static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, @@ -340,7 +345,8 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, int err; if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { - struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); + struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm); + struct shash_desc *desc = *descp; void *data; desc->flags = hdesc->flags; @@ -368,9 +374,11 @@ out: static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) { - struct shash_desc *desc= crypto_tfm_ctx(tfm); + struct shash_desc **descp = crypto_tfm_ctx(tfm); + struct shash_desc *desc = *descp; crypto_free_shash(desc->tfm); + kzfree(desc); } static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) @@ -378,8 +386,9 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) struct hash_tfm *crt = &tfm->crt_hash; struct crypto_alg *calg = tfm->__crt_alg; struct shash_alg *alg = __crypto_shash_alg(calg); - struct shash_desc *desc = crypto_tfm_ctx(tfm); + struct shash_desc **descp = crypto_tfm_ctx(tfm); struct crypto_shash *shash; + struct shash_desc *desc; if (!crypto_mod_get(calg)) return -EAGAIN; @@ -390,6 +399,14 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) return PTR_ERR(shash); } + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash), + GFP_KERNEL); + if (!desc) { + crypto_free_shash(shash); + return -ENOMEM; + } + + *descp = desc; desc->tfm = shash; tfm->exit = crypto_exit_shash_ops_compat; @@ -419,11 +436,9 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { - struct shash_alg *salg = __crypto_shash_alg(alg); - switch (mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_HASH_MASK: - return sizeof(struct shash_desc) + salg->descsize; + return sizeof(struct shash_desc *); case CRYPTO_ALG_TYPE_AHASH_MASK: return sizeof(struct crypto_shash *); } @@ -434,6 +449,9 @@ static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, static int crypto_shash_init_tfm(struct crypto_tfm *tfm, const struct crypto_type *frontend) { + struct crypto_shash *hash = __crypto_shash_cast(tfm); + + hash->descsize = crypto_shash_alg(hash)->descsize; return 0; } @@ -452,7 +470,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "type : shash\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "digestsize : %u\n", salg->digestsize); - seq_printf(m, "descsize : %u\n", salg->descsize); } static const struct crypto_type crypto_shash_type = { diff --git a/include/crypto/hash.h b/include/crypto/hash.h index f74214a4b01b..fcc02d978231 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -48,6 +48,7 @@ struct crypto_ahash { }; struct crypto_shash { + unsigned int descsize; struct crypto_tfm base; }; @@ -275,7 +276,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) { - return crypto_shash_alg(tfm)->descsize; + return tfm->descsize; } static inline void *shash_desc_ctx(struct shash_desc *desc) diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 069b93e1a8ec..2d1b10f23c91 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -135,7 +135,7 @@ static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm) static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) { - return (struct crypto_shash *)tfm; + return container_of(tfm, struct crypto_shash, base); } #endif /* _CRYPTO_INTERNAL_HASH_H */ -- cgit v1.2.3 From 8bd1209cfff246ce6dfae86837467a01236f9cb6 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 9 Jul 2009 12:43:37 +0800 Subject: crypto: hmac - Switch to shash This patch changes hmac to the new shash interface. Signed-off-by: Herbert Xu --- crypto/hmac.c | 271 ++++++++++++++++++++++++---------------------------------- 1 file changed, 114 insertions(+), 157 deletions(-) (limited to 'crypto') diff --git a/crypto/hmac.c b/crypto/hmac.c index 0ad39c374963..e37342748f2f 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -27,7 +27,7 @@ #include struct hmac_ctx { - struct crypto_hash *child; + struct shash_desc *desc; }; static inline void *align_ptr(void *p, unsigned int align) @@ -35,57 +35,33 @@ static inline void *align_ptr(void *p, unsigned int align) return (void *)ALIGN((unsigned long)p, align); } -static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm) +static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) { - return align_ptr(crypto_hash_ctx_aligned(tfm) + - crypto_hash_blocksize(tfm) * 2 + - crypto_hash_digestsize(tfm), sizeof(void *)); + return align_ptr(crypto_shash_ctx_aligned(tfm) + + crypto_shash_blocksize(tfm) * 2 + + crypto_shash_digestsize(tfm), + crypto_tfm_ctx_alignment()); } -static int hmac_setkey(struct crypto_hash *parent, +static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { - int bs = crypto_hash_blocksize(parent); - int ds = crypto_hash_digestsize(parent); - char *ipad = crypto_hash_ctx_aligned(parent); + int bs = crypto_shash_blocksize(parent); + int ds = crypto_shash_digestsize(parent); + char *ipad = crypto_shash_ctx_aligned(parent); char *opad = ipad + bs; char *digest = opad + bs; - struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); - struct crypto_hash *tfm = ctx->child; + struct hmac_ctx *ctx = align_ptr(digest + ds, + crypto_tfm_ctx_alignment()); unsigned int i; if (keylen > bs) { - struct hash_desc desc; - struct scatterlist tmp; - int tmplen; int err; - desc.tfm = tfm; - desc.flags = crypto_hash_get_flags(parent); - desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; + ctx->desc->flags = crypto_shash_get_flags(parent) & + CRYPTO_TFM_REQ_MAY_SLEEP; - err = crypto_hash_init(&desc); - if (err) - return err; - - tmplen = bs * 2 + ds; - sg_init_one(&tmp, ipad, tmplen); - - for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) { - memcpy(ipad, inkey, tmplen); - err = crypto_hash_update(&desc, &tmp, tmplen); - if (err) - return err; - } - - if (keylen) { - memcpy(ipad, inkey, keylen); - err = crypto_hash_update(&desc, &tmp, keylen); - if (err) - return err; - } - - err = crypto_hash_final(&desc, digest); + err = crypto_shash_digest(ctx->desc, inkey, keylen, digest); if (err) return err; @@ -105,181 +81,162 @@ static int hmac_setkey(struct crypto_hash *parent, return 0; } -static int hmac_init(struct hash_desc *pdesc) +static int hmac_init(struct shash_desc *pdesc) { - struct crypto_hash *parent = pdesc->tfm; - int bs = crypto_hash_blocksize(parent); - int ds = crypto_hash_digestsize(parent); - char *ipad = crypto_hash_ctx_aligned(parent); - struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *)); - struct hash_desc desc; - struct scatterlist tmp; - int err; - - desc.tfm = ctx->child; - desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; - sg_init_one(&tmp, ipad, bs); - - err = crypto_hash_init(&desc); - if (unlikely(err)) - return err; - - return crypto_hash_update(&desc, &tmp, bs); + struct crypto_shash *parent = pdesc->tfm; + int bs = crypto_shash_blocksize(parent); + int ds = crypto_shash_digestsize(parent); + char *ipad = crypto_shash_ctx_aligned(parent); + struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, + crypto_tfm_ctx_alignment()); + struct shash_desc *desc = shash_desc_ctx(pdesc); + + desc->tfm = ctx->desc->tfm; + desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_shash_init(desc) ?: + crypto_shash_update(desc, ipad, bs); } -static int hmac_update(struct hash_desc *pdesc, - struct scatterlist *sg, unsigned int nbytes) +static int hmac_update(struct shash_desc *pdesc, + const u8 *data, unsigned int nbytes) { - struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); - struct hash_desc desc; + struct shash_desc *desc = shash_desc_ctx(pdesc); - desc.tfm = ctx->child; - desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; - return crypto_hash_update(&desc, sg, nbytes); + return crypto_shash_update(desc, data, nbytes); } -static int hmac_final(struct hash_desc *pdesc, u8 *out) +static int hmac_final(struct shash_desc *pdesc, u8 *out) { - struct crypto_hash *parent = pdesc->tfm; - int bs = crypto_hash_blocksize(parent); - int ds = crypto_hash_digestsize(parent); - char *opad = crypto_hash_ctx_aligned(parent) + bs; + struct crypto_shash *parent = pdesc->tfm; + int bs = crypto_shash_blocksize(parent); + int ds = crypto_shash_digestsize(parent); + char *opad = crypto_shash_ctx_aligned(parent) + bs; char *digest = opad + bs; - struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); - struct hash_desc desc; - struct scatterlist tmp; - int err; - - desc.tfm = ctx->child; - desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; - sg_init_one(&tmp, opad, bs + ds); + struct shash_desc *desc = shash_desc_ctx(pdesc); - err = crypto_hash_final(&desc, digest); - if (unlikely(err)) - return err; + desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; - return crypto_hash_digest(&desc, &tmp, bs + ds, out); + return crypto_shash_final(desc, digest) ?: + crypto_shash_digest(desc, opad, bs + ds, out); } -static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, - unsigned int nbytes, u8 *out) +static int hmac_finup(struct shash_desc *pdesc, const u8 *data, + unsigned int nbytes, u8 *out) { - struct crypto_hash *parent = pdesc->tfm; - int bs = crypto_hash_blocksize(parent); - int ds = crypto_hash_digestsize(parent); - char *ipad = crypto_hash_ctx_aligned(parent); - char *opad = ipad + bs; - char *digest = opad + bs; - struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); - struct hash_desc desc; - struct scatterlist sg1[2]; - struct scatterlist sg2[1]; - int err; - desc.tfm = ctx->child; - desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; - - sg_init_table(sg1, 2); - sg_set_buf(sg1, ipad, bs); - scatterwalk_sg_chain(sg1, 2, sg); + struct crypto_shash *parent = pdesc->tfm; + int bs = crypto_shash_blocksize(parent); + int ds = crypto_shash_digestsize(parent); + char *opad = crypto_shash_ctx_aligned(parent) + bs; + char *digest = opad + bs; + struct shash_desc *desc = shash_desc_ctx(pdesc); - sg_init_table(sg2, 1); - sg_set_buf(sg2, opad, bs + ds); + desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; - err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest); - if (unlikely(err)) - return err; - - return crypto_hash_digest(&desc, sg2, bs + ds, out); + return crypto_shash_finup(desc, data, nbytes, digest) ?: + crypto_shash_digest(desc, opad, bs + ds, out); } static int hmac_init_tfm(struct crypto_tfm *tfm) { - struct crypto_hash *hash; + struct crypto_shash *parent = __crypto_shash_cast(tfm); + struct crypto_shash *hash; struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); + struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst); + struct hmac_ctx *ctx = hmac_ctx(parent); - hash = crypto_spawn_hash(spawn); + hash = crypto_spawn_shash(spawn); if (IS_ERR(hash)) return PTR_ERR(hash); - ctx->child = hash; + parent->descsize = sizeof(struct shash_desc) + + crypto_shash_descsize(hash); + + ctx->desc = kmalloc(parent->descsize, GFP_KERNEL); + if (!ctx->desc) { + crypto_free_shash(hash); + return -ENOMEM; + } + + ctx->desc->tfm = hash; return 0; } static void hmac_exit_tfm(struct crypto_tfm *tfm) { - struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); - crypto_free_hash(ctx->child); + struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); + crypto_free_shash(ctx->desc->tfm); + kzfree(ctx->desc); } -static void hmac_free(struct crypto_instance *inst) +static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) { - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(inst); -} - -static struct crypto_instance *hmac_alloc(struct rtattr **tb) -{ - struct crypto_instance *inst; + struct shash_instance *inst; struct crypto_alg *alg; + struct shash_alg *salg; int err; int ds; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); if (err) - return ERR_PTR(err); - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_HASH_MASK); - if (IS_ERR(alg)) - return ERR_CAST(alg); - - inst = ERR_PTR(-EINVAL); - ds = alg->cra_type == &crypto_hash_type ? - alg->cra_hash.digestsize : - alg->cra_type ? - __crypto_shash_alg(alg)->digestsize : - alg->cra_digest.dia_digestsize; + return err; + + salg = shash_attr_alg(tb[1], 0, 0); + if (IS_ERR(salg)) + return PTR_ERR(salg); + + err = -EINVAL; + ds = salg->digestsize; + alg = &salg->base; if (ds > alg->cra_blocksize) goto out_put_alg; - inst = crypto_alloc_instance("hmac", alg); + inst = shash_alloc_instance("hmac", alg); if (IS_ERR(inst)) goto out_put_alg; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_hash_type; + err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg, + shash_crypto_instance(inst)); + if (err) + goto out_free_inst; + + inst->alg.base.cra_priority = alg->cra_priority; + inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_hash.digestsize = ds; + inst->alg.digestsize = ds; - inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + - ALIGN(inst->alg.cra_blocksize * 2 + ds, - sizeof(void *)); + inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + + ALIGN(alg->cra_blocksize * 2 + ds, + crypto_tfm_ctx_alignment()); - inst->alg.cra_init = hmac_init_tfm; - inst->alg.cra_exit = hmac_exit_tfm; + inst->alg.base.cra_init = hmac_init_tfm; + inst->alg.base.cra_exit = hmac_exit_tfm; - inst->alg.cra_hash.init = hmac_init; - inst->alg.cra_hash.update = hmac_update; - inst->alg.cra_hash.final = hmac_final; - inst->alg.cra_hash.digest = hmac_digest; - inst->alg.cra_hash.setkey = hmac_setkey; + inst->alg.init = hmac_init; + inst->alg.update = hmac_update; + inst->alg.final = hmac_final; + inst->alg.finup = hmac_finup; + inst->alg.setkey = hmac_setkey; + + err = shash_register_instance(tmpl, inst); + if (err) { +out_free_inst: + shash_free_instance(shash_crypto_instance(inst)); + } out_put_alg: crypto_mod_put(alg); - return inst; + return err; } static struct crypto_template hmac_tmpl = { .name = "hmac", - .alloc = hmac_alloc, - .free = hmac_free, + .create = hmac_create, + .free = shash_free_instance, .module = THIS_MODULE, }; -- cgit v1.2.3 From 3106caab617c75c9a47706af3a8017318207be2d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 12 Jul 2009 12:48:32 +0800 Subject: crypto: xcbc - Switch to shash This patch converts the xcbc algorithm to the new shash type. Signed-off-by: Herbert Xu --- crypto/xcbc.c | 235 ++++++++++++++++++++++------------------------------------ 1 file changed, 87 insertions(+), 148 deletions(-) (limited to 'crypto') diff --git a/crypto/xcbc.c b/crypto/xcbc.c index b63b633e549c..b2cc8551b99f 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -19,15 +19,9 @@ * Kazunori Miyazawa */ -#include -#include +#include #include -#include #include -#include -#include -#include -#include static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x02020202, 0x02020202, 0x02020202, 0x02020202, @@ -66,10 +60,10 @@ static void xor_128(u8 *a, const u8 *b, unsigned int bs) ((u32 *)a)[3] ^= ((u32 *)b)[3]; } -static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent, +static int _crypto_xcbc_digest_setkey(struct crypto_shash *parent, struct crypto_xcbc_ctx *ctx) { - int bs = crypto_hash_blocksize(parent); + int bs = crypto_shash_blocksize(parent); int err = 0; u8 key1[bs]; @@ -81,10 +75,10 @@ static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent, return crypto_cipher_setkey(ctx->child, key1, bs); } -static int crypto_xcbc_digest_setkey(struct crypto_hash *parent, +static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { - struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); + struct crypto_xcbc_ctx *ctx = crypto_shash_ctx(parent); if (keylen != crypto_cipher_blocksize(ctx->child)) return -EINVAL; @@ -96,10 +90,10 @@ static int crypto_xcbc_digest_setkey(struct crypto_hash *parent, return _crypto_xcbc_digest_setkey(parent, ctx); } -static int crypto_xcbc_digest_init(struct hash_desc *pdesc) +static int crypto_xcbc_digest_init(struct shash_desc *pdesc) { - struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm); - int bs = crypto_hash_blocksize(pdesc->tfm); + struct crypto_xcbc_ctx *ctx = crypto_shash_ctx(pdesc->tfm); + int bs = crypto_shash_blocksize(pdesc->tfm); ctx->len = 0; memset(ctx->odds, 0, bs); @@ -108,102 +102,55 @@ static int crypto_xcbc_digest_init(struct hash_desc *pdesc) return 0; } -static int crypto_xcbc_digest_update2(struct hash_desc *pdesc, - struct scatterlist *sg, - unsigned int nbytes) +static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, + unsigned int len) { - struct crypto_hash *parent = pdesc->tfm; - struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); + struct crypto_shash *parent = pdesc->tfm; + struct crypto_xcbc_ctx *ctx = crypto_shash_ctx(parent); struct crypto_cipher *tfm = ctx->child; - int bs = crypto_hash_blocksize(parent); - - for (;;) { - struct page *pg = sg_page(sg); - unsigned int offset = sg->offset; - unsigned int slen = sg->length; - - if (unlikely(slen > nbytes)) - slen = nbytes; - - nbytes -= slen; - - while (slen > 0) { - unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset); - char *p = crypto_kmap(pg, 0) + offset; - - /* checking the data can fill the block */ - if ((ctx->len + len) <= bs) { - memcpy(ctx->odds + ctx->len, p, len); - ctx->len += len; - slen -= len; - - /* checking the rest of the page */ - if (len + offset >= PAGE_SIZE) { - offset = 0; - pg++; - } else - offset += len; - - crypto_kunmap(p, 0); - crypto_yield(pdesc->flags); - continue; - } - - /* filling odds with new data and encrypting it */ - memcpy(ctx->odds + ctx->len, p, bs - ctx->len); - len -= bs - ctx->len; - p += bs - ctx->len; - - ctx->xor(ctx->prev, ctx->odds, bs); - crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); - - /* clearing the length */ - ctx->len = 0; - - /* encrypting the rest of data */ - while (len > bs) { - ctx->xor(ctx->prev, p, bs); - crypto_cipher_encrypt_one(tfm, ctx->prev, - ctx->prev); - p += bs; - len -= bs; - } - - /* keeping the surplus of blocksize */ - if (len) { - memcpy(ctx->odds, p, len); - ctx->len = len; - } - crypto_kunmap(p, 0); - crypto_yield(pdesc->flags); - slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset); - offset = 0; - pg++; - } - - if (!nbytes) - break; - sg = scatterwalk_sg_next(sg); + int bs = crypto_shash_blocksize(parent); + + /* checking the data can fill the block */ + if ((ctx->len + len) <= bs) { + memcpy(ctx->odds + ctx->len, p, len); + ctx->len += len; + return 0; } - return 0; -} + /* filling odds with new data and encrypting it */ + memcpy(ctx->odds + ctx->len, p, bs - ctx->len); + len -= bs - ctx->len; + p += bs - ctx->len; -static int crypto_xcbc_digest_update(struct hash_desc *pdesc, - struct scatterlist *sg, - unsigned int nbytes) -{ - if (WARN_ON_ONCE(in_irq())) - return -EDEADLK; - return crypto_xcbc_digest_update2(pdesc, sg, nbytes); + ctx->xor(ctx->prev, ctx->odds, bs); + crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); + + /* clearing the length */ + ctx->len = 0; + + /* encrypting the rest of data */ + while (len > bs) { + ctx->xor(ctx->prev, p, bs); + crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); + p += bs; + len -= bs; + } + + /* keeping the surplus of blocksize */ + if (len) { + memcpy(ctx->odds, p, len); + ctx->len = len; + } + + return 0; } -static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) +static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) { - struct crypto_hash *parent = pdesc->tfm; - struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); + struct crypto_shash *parent = pdesc->tfm; + struct crypto_xcbc_ctx *ctx = crypto_shash_ctx(parent); struct crypto_cipher *tfm = ctx->child; - int bs = crypto_hash_blocksize(parent); + int bs = crypto_shash_blocksize(parent); int err = 0; if (ctx->len == bs) { @@ -248,24 +195,13 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) return 0; } -static int crypto_xcbc_digest(struct hash_desc *pdesc, - struct scatterlist *sg, unsigned int nbytes, u8 *out) -{ - if (WARN_ON_ONCE(in_irq())) - return -EDEADLK; - - crypto_xcbc_digest_init(pdesc); - crypto_xcbc_digest_update2(pdesc, sg, nbytes); - return crypto_xcbc_digest_final(pdesc, out); -} - static int xcbc_init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); - int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm)); + struct crypto_xcbc_ctx *ctx = crypto_tfm_ctx(tfm); + int bs = crypto_tfm_alg_blocksize(tfm); cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) @@ -289,70 +225,73 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm) static void xcbc_exit_tfm(struct crypto_tfm *tfm) { - struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); + struct crypto_xcbc_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->child); } -static struct crypto_instance *xcbc_alloc(struct rtattr **tb) +static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_instance *inst; + struct shash_instance *inst; struct crypto_alg *alg; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); if (err) - return ERR_PTR(err); + return err; alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) - return ERR_CAST(alg); + return PTR_ERR(alg); switch(alg->cra_blocksize) { case 16: break; default: - inst = ERR_PTR(-EINVAL); goto out_put_alg; } - inst = crypto_alloc_instance("xcbc", alg); + inst = shash_alloc_instance("xcbc", alg); if (IS_ERR(inst)) goto out_put_alg; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_hash_type; - - inst->alg.cra_hash.digestsize = alg->cra_blocksize; - inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) + - ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *)); - inst->alg.cra_init = xcbc_init_tfm; - inst->alg.cra_exit = xcbc_exit_tfm; - - inst->alg.cra_hash.init = crypto_xcbc_digest_init; - inst->alg.cra_hash.update = crypto_xcbc_digest_update; - inst->alg.cra_hash.final = crypto_xcbc_digest_final; - inst->alg.cra_hash.digest = crypto_xcbc_digest; - inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey; + err = crypto_init_spawn(shash_instance_ctx(inst), alg, + shash_crypto_instance(inst), + CRYPTO_ALG_TYPE_MASK); + if (err) + goto out_free_inst; + + inst->alg.base.cra_priority = alg->cra_priority; + inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_alignmask = alg->cra_alignmask; + + inst->alg.digestsize = alg->cra_blocksize; + inst->alg.base.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) + + ALIGN(alg->cra_blocksize * 3, + sizeof(void *)); + inst->alg.base.cra_init = xcbc_init_tfm; + inst->alg.base.cra_exit = xcbc_exit_tfm; + + inst->alg.init = crypto_xcbc_digest_init; + inst->alg.update = crypto_xcbc_digest_update; + inst->alg.final = crypto_xcbc_digest_final; + inst->alg.setkey = crypto_xcbc_digest_setkey; + + err = shash_register_instance(tmpl, inst); + if (err) { +out_free_inst: + shash_free_instance(shash_crypto_instance(inst)); + } out_put_alg: crypto_mod_put(alg); - return inst; -} - -static void xcbc_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(inst); + return err; } static struct crypto_template crypto_xcbc_tmpl = { .name = "xcbc", - .alloc = xcbc_alloc, - .free = xcbc_free, + .create = xcbc_create, + .free = shash_free_instance, .module = THIS_MODULE, }; -- cgit v1.2.3 From 9ef074fa9b525f7fc7a35d0761f3d4ed01fe8252 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 12 Jul 2009 12:50:11 +0800 Subject: crypto: authenc - Remove reference to crypto_hash Now that there are no more legacy hash implementations we can remove the reference to crypto_hash. Signed-off-by: Herbert Xu --- crypto/authenc.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/authenc.c b/crypto/authenc.c index 5793b64c81a8..2e16ce0089cb 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -436,11 +436,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; - inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ? - auth->cra_hash.digestsize : - auth->cra_type ? - __crypto_shash_alg(auth)->digestsize : - auth->cra_digest.dia_digestsize; + inst->alg.cra_aead.maxauthsize = __crypto_shash_alg(auth)->digestsize; inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); -- cgit v1.2.3 From 6941c3a0aabb6ad4167827360f384e9daed7dd7f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 12 Jul 2009 13:58:04 +0800 Subject: crypto: hash - Remove legacy hash/digest implementaion This patch removes the implementation of hash and digest now that no algorithms use them anymore. The interface though will remain until the users are converted across. Signed-off-by: Herbert Xu --- crypto/Makefile | 3 +-- crypto/api.c | 19 ++----------------- crypto/internal.h | 15 --------------- 3 files changed, 3 insertions(+), 34 deletions(-) (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index 673d9f7c1bda..3c961b4d8046 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -3,7 +3,7 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-objs := api.o cipher.o digest.o compress.o +crypto-objs := api.o cipher.o compress.o obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o @@ -22,7 +22,6 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o -crypto_hash-objs := hash.o crypto_hash-objs += ahash.o crypto_hash-objs += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o diff --git a/crypto/api.c b/crypto/api.c index ba81221b3bef..c8ac18f7ac1e 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -285,13 +285,6 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask) switch (crypto_tfm_alg_type(tfm)) { case CRYPTO_ALG_TYPE_CIPHER: return crypto_init_cipher_ops(tfm); - - case CRYPTO_ALG_TYPE_DIGEST: - if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != - CRYPTO_ALG_TYPE_HASH_MASK) - return crypto_init_digest_ops_async(tfm); - else - return crypto_init_digest_ops(tfm); case CRYPTO_ALG_TYPE_COMPRESS: return crypto_init_compress_ops(tfm); @@ -318,11 +311,7 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) case CRYPTO_ALG_TYPE_CIPHER: crypto_exit_cipher_ops(tfm); break; - - case CRYPTO_ALG_TYPE_DIGEST: - crypto_exit_digest_ops(tfm); - break; - + case CRYPTO_ALG_TYPE_COMPRESS: crypto_exit_compress_ops(tfm); break; @@ -349,11 +338,7 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) case CRYPTO_ALG_TYPE_CIPHER: len += crypto_cipher_ctxsize(alg); break; - - case CRYPTO_ALG_TYPE_DIGEST: - len += crypto_digest_ctxsize(alg); - break; - + case CRYPTO_ALG_TYPE_COMPRESS: len += crypto_compress_ctxsize(alg); break; diff --git a/crypto/internal.h b/crypto/internal.h index 7efa4d0533ff..030e22921c30 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -60,18 +60,6 @@ static inline void crypto_exit_proc(void) { } #endif -static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg) -{ - unsigned int len = alg->cra_ctxsize; - - if (alg->cra_alignmask) { - len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); - len += alg->cra_digest.dia_digestsize; - } - - return len; -} - static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) { return alg->cra_ctxsize; @@ -86,12 +74,9 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); -int crypto_init_digest_ops(struct crypto_tfm *tfm); -int crypto_init_digest_ops_async(struct crypto_tfm *tfm); int crypto_init_cipher_ops(struct crypto_tfm *tfm); int crypto_init_compress_ops(struct crypto_tfm *tfm); -void crypto_exit_digest_ops(struct crypto_tfm *tfm); void crypto_exit_cipher_ops(struct crypto_tfm *tfm); void crypto_exit_compress_ops(struct crypto_tfm *tfm); -- cgit v1.2.3 From 7eddf95ec5440d60f10963f453e27f82f394044e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 12 Jul 2009 21:25:20 +0800 Subject: crypto: shash - Export async functions This patch exports the async functions so that they can be reused by cryptd when it switches over to using shash. Signed-off-by: Herbert Xu --- crypto/shash.c | 42 ++++++++++++++++++++++-------------------- include/crypto/internal/hash.h | 3 +++ 2 files changed, 25 insertions(+), 20 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index d8e0f8f8d672..3b1b06b3dcf3 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -202,9 +202,8 @@ static int shash_async_init(struct ahash_request *req) return crypto_shash_init(desc); } -static int shash_async_update(struct ahash_request *req) +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) { - struct shash_desc *desc = ahash_request_ctx(req); struct crypto_hash_walk walk; int nbytes; @@ -214,13 +213,19 @@ static int shash_async_update(struct ahash_request *req) return nbytes; } +EXPORT_SYMBOL_GPL(shash_ahash_update); + +static int shash_async_update(struct ahash_request *req) +{ + return shash_ahash_update(req, ahash_request_ctx(req)); +} static int shash_async_final(struct ahash_request *req) { return crypto_shash_final(ahash_request_ctx(req), req->result); } -static int shash_async_digest(struct ahash_request *req) +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) { struct scatterlist *sg = req->src; unsigned int offset = sg->offset; @@ -228,34 +233,31 @@ static int shash_async_digest(struct ahash_request *req) int err; if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { - struct crypto_shash **ctx = - crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - struct shash_desc *desc = ahash_request_ctx(req); void *data; - desc->tfm = *ctx; - desc->flags = req->base.flags; - data = crypto_kmap(sg_page(sg), 0); err = crypto_shash_digest(desc, data + offset, nbytes, req->result); crypto_kunmap(data, 0); crypto_yield(desc->flags); - goto out; - } + } else + err = crypto_shash_init(desc) ?: + shash_ahash_update(req, desc) ?: + crypto_shash_final(desc, req->result); - err = shash_async_init(req); - if (err) - goto out; + return err; +} +EXPORT_SYMBOL_GPL(shash_ahash_digest); - err = shash_async_update(req); - if (err) - goto out; +static int shash_async_digest(struct ahash_request *req) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); - err = shash_async_final(req); + desc->tfm = *ctx; + desc->flags = req->base.flags; -out: - return err; + return shash_ahash_digest(req, desc); } static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 2d1b10f23c91..6e283495374e 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -63,6 +63,9 @@ int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn, struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); +int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); + static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) { return crypto_tfm_ctx(&tfm->base); -- cgit v1.2.3 From 46309d8938122dff2fe59bf163307989cd22ea4a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 12 Jul 2009 21:38:59 +0800 Subject: crypto: cryptd - Use shash algorithms This patch changes cryptd to use shash algorithms instead of the legacy hash interface. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 164 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 84 insertions(+), 80 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index ae5fa99d5d36..ef5720cf1216 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -39,6 +39,11 @@ struct cryptd_instance_ctx { struct cryptd_queue *queue; }; +struct hashd_instance_ctx { + struct crypto_shash_spawn spawn; + struct cryptd_queue *queue; +}; + struct cryptd_blkcipher_ctx { struct crypto_blkcipher *child; }; @@ -48,11 +53,12 @@ struct cryptd_blkcipher_request_ctx { }; struct cryptd_hash_ctx { - struct crypto_hash *child; + struct crypto_shash *child; }; struct cryptd_hash_request_ctx { crypto_completion_t complete; + struct shash_desc desc; }; static void cryptd_queue_worker(struct work_struct *work); @@ -250,13 +256,12 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) } static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, - struct cryptd_queue *queue) + unsigned int tail) { struct crypto_instance *inst; - struct cryptd_instance_ctx *ctx; int err; - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + inst = kzalloc(sizeof(*inst) + tail, GFP_KERNEL); if (!inst) { inst = ERR_PTR(-ENOMEM); goto out; @@ -267,14 +272,6 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_free_inst; - ctx = crypto_instance_ctx(inst); - err = crypto_init_spawn(&ctx->spawn, alg, inst, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); - if (err) - goto out_free_inst; - - ctx->queue = queue; - memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); inst->alg.cra_priority = alg->cra_priority + 50; @@ -293,18 +290,28 @@ out_free_inst: static struct crypto_instance *cryptd_alloc_blkcipher( struct rtattr **tb, struct cryptd_queue *queue) { + struct cryptd_instance_ctx *ctx; struct crypto_instance *inst; struct crypto_alg *alg; + int err; alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); - inst = cryptd_alloc_instance(alg, queue); + inst = cryptd_alloc_instance(alg, sizeof(*ctx)); if (IS_ERR(inst)) goto out_put_alg; + ctx = crypto_instance_ctx(inst); + ctx->queue = queue; + + err = crypto_init_spawn(&ctx->spawn, alg, inst, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + if (err) + goto out_free_inst; + inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; inst->alg.cra_type = &crypto_ablkcipher_type; @@ -326,23 +333,28 @@ static struct crypto_instance *cryptd_alloc_blkcipher( out_put_alg: crypto_mod_put(alg); return inst; + +out_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out_put_alg; } static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); - struct crypto_spawn *spawn = &ictx->spawn; + struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_shash_spawn *spawn = &ictx->spawn; struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_hash *cipher; + struct crypto_shash *hash; - cipher = crypto_spawn_hash(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); + hash = crypto_spawn_shash(spawn); + if (IS_ERR(hash)) + return PTR_ERR(hash); - ctx->child = cipher; - tfm->crt_ahash.reqsize = - sizeof(struct cryptd_hash_request_ctx); + ctx->child = hash; + tfm->crt_ahash.reqsize = sizeof(struct cryptd_hash_request_ctx) + + crypto_shash_descsize(hash); return 0; } @@ -350,22 +362,22 @@ static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) { struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_hash(ctx->child); + crypto_free_shash(ctx->child); } static int cryptd_hash_setkey(struct crypto_ahash *parent, const u8 *key, unsigned int keylen) { struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); - struct crypto_hash *child = ctx->child; + struct crypto_shash *child = ctx->child; int err; - crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_hash_setkey(child, key, keylen); - crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & - CRYPTO_TFM_RES_MASK); + crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_shash_setkey(child, key, keylen); + crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & + CRYPTO_TFM_RES_MASK); return err; } @@ -385,21 +397,19 @@ static int cryptd_hash_enqueue(struct ahash_request *req, static void cryptd_hash_init(struct crypto_async_request *req_async, int err) { - struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); - struct crypto_hash *child = ctx->child; - struct ahash_request *req = ahash_request_cast(req_async); - struct cryptd_hash_request_ctx *rctx; - struct hash_desc desc; - - rctx = ahash_request_ctx(req); + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_shash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + struct shash_desc *desc = &rctx->desc; if (unlikely(err == -EINPROGRESS)) goto out; - desc.tfm = child; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->tfm = child; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; - err = crypto_hash_crt(child)->init(&desc); + err = crypto_shash_init(desc); req->base.complete = rctx->complete; @@ -416,23 +426,15 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req) static void cryptd_hash_update(struct crypto_async_request *req_async, int err) { - struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); - struct crypto_hash *child = ctx->child; - struct ahash_request *req = ahash_request_cast(req_async); + struct ahash_request *req = ahash_request_cast(req_async); struct cryptd_hash_request_ctx *rctx; - struct hash_desc desc; rctx = ahash_request_ctx(req); if (unlikely(err == -EINPROGRESS)) goto out; - desc.tfm = child; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; - - err = crypto_hash_crt(child)->update(&desc, - req->src, - req->nbytes); + err = shash_ahash_update(req, &rctx->desc); req->base.complete = rctx->complete; @@ -449,21 +451,13 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req) static void cryptd_hash_final(struct crypto_async_request *req_async, int err) { - struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); - struct crypto_hash *child = ctx->child; - struct ahash_request *req = ahash_request_cast(req_async); - struct cryptd_hash_request_ctx *rctx; - struct hash_desc desc; - - rctx = ahash_request_ctx(req); + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); if (unlikely(err == -EINPROGRESS)) goto out; - desc.tfm = child; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; - - err = crypto_hash_crt(child)->final(&desc, req->result); + err = crypto_shash_final(&rctx->desc, req->result); req->base.complete = rctx->complete; @@ -480,24 +474,19 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req) static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) { - struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); - struct crypto_hash *child = ctx->child; - struct ahash_request *req = ahash_request_cast(req_async); - struct cryptd_hash_request_ctx *rctx; - struct hash_desc desc; - - rctx = ahash_request_ctx(req); + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); + struct crypto_shash *child = ctx->child; + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + struct shash_desc *desc = &rctx->desc; if (unlikely(err == -EINPROGRESS)) goto out; - desc.tfm = child; - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->tfm = child; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; - err = crypto_hash_crt(child)->digest(&desc, - req->src, - req->nbytes, - req->result); + err = shash_ahash_digest(req, desc); req->base.complete = rctx->complete; @@ -515,22 +504,32 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req) static struct crypto_instance *cryptd_alloc_hash( struct rtattr **tb, struct cryptd_queue *queue) { + struct hashd_instance_ctx *ctx; struct crypto_instance *inst; + struct shash_alg *salg; struct crypto_alg *alg; + int err; - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_HASH_MASK); - if (IS_ERR(alg)) - return ERR_PTR(PTR_ERR(alg)); + salg = shash_attr_alg(tb[1], 0, 0); + if (IS_ERR(salg)) + return ERR_CAST(salg); - inst = cryptd_alloc_instance(alg, queue); + alg = &salg->base; + inst = cryptd_alloc_instance(alg, sizeof(*ctx)); if (IS_ERR(inst)) goto out_put_alg; + ctx = crypto_instance_ctx(inst); + ctx->queue = queue; + + err = crypto_init_shash_spawn(&ctx->spawn, salg, inst); + if (err) + goto out_free_inst; + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; inst->alg.cra_type = &crypto_ahash_type; - inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; + inst->alg.cra_ahash.digestsize = salg->digestsize; inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); inst->alg.cra_init = cryptd_hash_init_tfm; @@ -545,6 +544,11 @@ static struct crypto_instance *cryptd_alloc_hash( out_put_alg: crypto_mod_put(alg); return inst; + +out_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out_put_alg; } static struct cryptd_queue queue; -- cgit v1.2.3 From 0d6669e2ba60ce5f5d4def6ab453f03567cc738e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 12 Jul 2009 23:06:33 +0800 Subject: crypto: cryptd - Use crypto_ahash_set_reqsize This patch makes cryptd use crypto_ahash_set_reqsize to avoid accessing crypto_ahash directly. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index ef5720cf1216..6e6722edd0d9 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -353,8 +353,9 @@ static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) return PTR_ERR(hash); ctx->child = hash; - tfm->crt_ahash.reqsize = sizeof(struct cryptd_hash_request_ctx) + - crypto_shash_descsize(hash); + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct cryptd_hash_request_ctx) + + crypto_shash_descsize(hash)); return 0; } -- cgit v1.2.3 From 2ca33da1dea3ba53d1425226a6bac073c5e8568c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 13 Jul 2009 20:46:25 +0800 Subject: crypto: api - Remove frontend argument from extsize/init_tfm As the extsize and init_tfm functions belong to the frontend the frontend argument is superfluous. Signed-off-by: Herbert Xu --- crypto/api.c | 4 ++-- crypto/pcompress.c | 6 ++---- crypto/shash.c | 6 ++---- include/crypto/algapi.h | 6 ++---- 4 files changed, 8 insertions(+), 14 deletions(-) (limited to 'crypto') diff --git a/crypto/api.c b/crypto/api.c index c8ac18f7ac1e..798526d90538 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -457,7 +457,7 @@ void *crypto_create_tfm(struct crypto_alg *alg, int err = -ENOMEM; tfmsize = frontend->tfmsize; - total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend); + total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); mem = kzalloc(total, GFP_KERNEL); if (mem == NULL) @@ -466,7 +466,7 @@ void *crypto_create_tfm(struct crypto_alg *alg, tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; - err = frontend->init_tfm(tfm, frontend); + err = frontend->init_tfm(tfm); if (err) goto out_free_tfm; diff --git a/crypto/pcompress.c b/crypto/pcompress.c index bcadc03726b7..f7c4a7d7412e 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c @@ -36,14 +36,12 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) return 0; } -static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg, - const struct crypto_type *frontend) +static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg) { return alg->cra_ctxsize; } -static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm, - const struct crypto_type *frontend) +static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) { return 0; } diff --git a/crypto/shash.c b/crypto/shash.c index 3b1b06b3dcf3..7063e1421504 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -448,8 +448,7 @@ static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, return 0; } -static int crypto_shash_init_tfm(struct crypto_tfm *tfm, - const struct crypto_type *frontend) +static int crypto_shash_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); @@ -457,8 +456,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm, return 0; } -static unsigned int crypto_shash_extsize(struct crypto_alg *alg, - const struct crypto_type *frontend) +static unsigned int crypto_shash_extsize(struct crypto_alg *alg) { return alg->cra_ctxsize; } diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 1d15a926041e..7635fde7b1a2 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -22,11 +22,9 @@ struct seq_file; struct crypto_type { unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); - unsigned int (*extsize)(struct crypto_alg *alg, - const struct crypto_type *frontend); + unsigned int (*extsize)(struct crypto_alg *alg); int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); - int (*init_tfm)(struct crypto_tfm *tfm, - const struct crypto_type *frontend); + int (*init_tfm)(struct crypto_tfm *tfm); void (*show)(struct seq_file *m, struct crypto_alg *alg); struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); -- cgit v1.2.3 From 88056ec346ccf41f63dbc7080b24b5fd19d1358d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 12:28:26 +0800 Subject: crypto: ahash - Convert to new style algorithms This patch converts crypto_ahash to the new style. The old ahash algorithm type is retained until the existing ahash implementations are also converted. All ahash users will automatically get the new crypto_ahash type. Signed-off-by: Herbert Xu --- crypto/ahash.c | 82 +++++++++++++++++++++---------- crypto/shash.c | 8 +-- include/crypto/hash.h | 109 +++++++++++++++++++++++++++++------------ include/crypto/internal/hash.h | 11 +++-- include/linux/crypto.h | 29 ++--------- 5 files changed, 148 insertions(+), 91 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index f3476374f764..838519386215 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -24,6 +24,12 @@ #include "internal.h" +static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) +{ + return container_of(crypto_hash_alg_common(hash), struct ahash_alg, + halg); +} + static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int alignmask = walk->alignmask; @@ -169,30 +175,11 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, return -ENOSYS; } -int crypto_ahash_import(struct ahash_request *req, const u8 *in) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ahash_alg *alg = crypto_ahash_alg(tfm); - - memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm)); - - if (alg->reinit) - alg->reinit(req); - - return 0; -} -EXPORT_SYMBOL_GPL(crypto_ahash_import); - -static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type, - u32 mask) -{ - return alg->cra_ctxsize; -} - static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { - struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash; - struct ahash_tfm *crt = &tfm->crt_ahash; + struct old_ahash_alg *alg = &tfm->__crt_alg->cra_ahash; + struct crypto_ahash *crt = __crypto_ahash_cast(tfm); + struct ahash_alg *nalg = crypto_ahash_alg(crt); if (alg->digestsize > PAGE_SIZE / 8) return -EINVAL; @@ -204,9 +191,42 @@ static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; crt->digestsize = alg->digestsize; + nalg->setkey = alg->setkey; + nalg->halg.digestsize = alg->digestsize; + return 0; } +static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_ahash *hash = __crypto_ahash_cast(tfm); + struct ahash_alg *alg = crypto_ahash_alg(hash); + struct old_ahash_alg *oalg = crypto_old_ahash_alg(hash); + + if (tfm->__crt_alg->cra_type != &crypto_ahash_type) + return crypto_init_shash_ops_async(tfm); + + if (oalg->init) + return crypto_init_ahash_ops(tfm, 0, 0); + + hash->init = alg->init; + hash->update = alg->update; + hash->final = alg->final; + hash->digest = alg->digest; + hash->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; + hash->digestsize = alg->halg.digestsize; + + return 0; +} + +static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) +{ + if (alg->cra_type == &crypto_ahash_type) + return alg->cra_ctxsize; + + return sizeof(struct crypto_shash *); +} + static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) @@ -215,17 +235,29 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); - seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize); + seq_printf(m, "digestsize : %u\n", + __crypto_hash_alg_common(alg)->digestsize); } const struct crypto_type crypto_ahash_type = { - .ctxsize = crypto_ahash_ctxsize, - .init = crypto_init_ahash_ops, + .extsize = crypto_ahash_extsize, + .init_tfm = crypto_ahash_init_tfm, #ifdef CONFIG_PROC_FS .show = crypto_ahash_show, #endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, + .type = CRYPTO_ALG_TYPE_AHASH, + .tfmsize = offsetof(struct crypto_ahash, base), }; EXPORT_SYMBOL_GPL(crypto_ahash_type); +struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, + u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_ahash); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/shash.c b/crypto/shash.c index 7063e1421504..615a5f4d9b7a 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -267,11 +267,11 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) crypto_free_shash(*ctx); } -static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) +int crypto_init_shash_ops_async(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; struct shash_alg *alg = __crypto_shash_alg(calg); - struct ahash_tfm *crt = &tfm->crt_ahash; + struct crypto_ahash *crt = __crypto_ahash_cast(tfm); struct crypto_shash **ctx = crypto_tfm_ctx(tfm); struct crypto_shash *shash; @@ -428,8 +428,6 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) switch (mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_HASH_MASK: return crypto_init_shash_ops_compat(tfm); - case CRYPTO_ALG_TYPE_AHASH_MASK: - return crypto_init_shash_ops_async(tfm); } return -EINVAL; @@ -441,8 +439,6 @@ static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, switch (mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_HASH_MASK: return sizeof(struct shash_desc *); - case CRYPTO_ALG_TYPE_AHASH_MASK: - return sizeof(struct crypto_shash *); } return 0; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index fcc02d978231..262861d8f0cb 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -15,6 +15,39 @@ #include +struct crypto_ahash; + +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + + struct crypto_alg base; +}; + +struct ahash_request { + struct crypto_async_request base; + + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct ahash_alg { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + struct hash_alg_common halg; +}; + struct shash_desc { struct crypto_shash *tfm; u32 flags; @@ -37,6 +70,8 @@ struct shash_alg { unsigned int keylen); unsigned int descsize; + + /* These fields must match hash_alg_common. */ unsigned int digestsize; unsigned int statesize; @@ -44,6 +79,18 @@ struct shash_alg { }; struct crypto_ahash { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int digestsize; + unsigned int reqsize; struct crypto_tfm base; }; @@ -54,19 +101,11 @@ struct crypto_shash { static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) { - return (struct crypto_ahash *)tfm; + return container_of(tfm, struct crypto_ahash, base); } -static inline struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - mask &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_AHASH; - mask |= CRYPTO_ALG_TYPE_AHASH_MASK; - - return __crypto_ahash_cast(crypto_alloc_base(alg_name, type, mask)); -} +struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, + u32 mask); static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) { @@ -75,7 +114,7 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) static inline void crypto_free_ahash(struct crypto_ahash *tfm) { - crypto_free_tfm(crypto_ahash_tfm(tfm)); + crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); } static inline unsigned int crypto_ahash_alignmask( @@ -84,14 +123,26 @@ static inline unsigned int crypto_ahash_alignmask( return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); } -static inline struct ahash_tfm *crypto_ahash_crt(struct crypto_ahash *tfm) +static inline struct hash_alg_common *__crypto_hash_alg_common( + struct crypto_alg *alg) { - return &crypto_ahash_tfm(tfm)->crt_ahash; + return container_of(alg, struct hash_alg_common, base); +} + +static inline struct hash_alg_common *crypto_hash_alg_common( + struct crypto_ahash *tfm) +{ + return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); } static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) { - return crypto_ahash_crt(tfm)->digestsize; + return tfm->digestsize; +} + +static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->statesize; } static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) @@ -117,7 +168,7 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm( static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) { - return crypto_ahash_crt(tfm)->reqsize; + return tfm->reqsize; } static inline void *ahash_request_ctx(struct ahash_request *req) @@ -128,41 +179,37 @@ static inline void *ahash_request_ctx(struct ahash_request *req) static inline int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - struct ahash_tfm *crt = crypto_ahash_crt(tfm); - - return crt->setkey(tfm, key, keylen); + return tfm->setkey(tfm, key, keylen); } static inline int crypto_ahash_digest(struct ahash_request *req) { - struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); - return crt->digest(req); + return crypto_ahash_reqtfm(req)->digest(req); } -static inline void crypto_ahash_export(struct ahash_request *req, u8 *out) +static inline int crypto_ahash_export(struct ahash_request *req, void *out) { - memcpy(out, ahash_request_ctx(req), - crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); + return crypto_ahash_reqtfm(req)->export(req, out); } -int crypto_ahash_import(struct ahash_request *req, const u8 *in); +static inline int crypto_ahash_import(struct ahash_request *req, const void *in) +{ + return crypto_ahash_reqtfm(req)->import(req, in); +} static inline int crypto_ahash_init(struct ahash_request *req) { - struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); - return crt->init(req); + return crypto_ahash_reqtfm(req)->init(req); } static inline int crypto_ahash_update(struct ahash_request *req) { - struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); - return crt->update(req); + return crypto_ahash_reqtfm(req)->update(req); } static inline int crypto_ahash_final(struct ahash_request *req) { - struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); - return crt->final(req); + return crypto_ahash_reqtfm(req)->final(req); } static inline void ahash_request_set_tfm(struct ahash_request *req, diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 5e45818f3351..08bdffafefad 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -51,6 +51,9 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, struct crypto_hash_walk *walk, struct scatterlist *sg, unsigned int len); +int crypto_register_ahash(struct ahash_alg *alg); +int crypto_unregister_ahash(struct ahash_alg *alg); + int crypto_register_shash(struct shash_alg *alg); int crypto_unregister_shash(struct shash_alg *alg); int shash_register_instance(struct crypto_template *tmpl, @@ -66,12 +69,14 @@ struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); +int crypto_init_shash_ops_async(struct crypto_tfm *tfm); + static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) { - return crypto_tfm_ctx(&tfm->base); + return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); } -static inline struct ahash_alg *crypto_ahash_alg( +static inline struct old_ahash_alg *crypto_old_ahash_alg( struct crypto_ahash *tfm) { return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash; @@ -80,7 +85,7 @@ static inline struct ahash_alg *crypto_ahash_alg( static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, unsigned int reqsize) { - crypto_ahash_crt(tfm)->reqsize = reqsize; + tfm->reqsize = reqsize; } static inline int ahash_enqueue_request(struct crypto_queue *queue, diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 274f9c7da90c..9e7e9b62a3dc 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -120,6 +120,7 @@ struct crypto_rng; struct crypto_tfm; struct crypto_type; struct aead_givcrypt_request; +struct ahash_request; struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); @@ -146,16 +147,6 @@ struct ablkcipher_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; -struct ahash_request { - struct crypto_async_request base; - - unsigned int nbytes; - struct scatterlist *src; - u8 *result; - - void *__ctx[] CRYPTO_MINALIGN_ATTR; -}; - /** * struct aead_request - AEAD request * @base: Common attributes for async crypto requests @@ -220,7 +211,7 @@ struct ablkcipher_alg { unsigned int ivsize; }; -struct ahash_alg { +struct old_ahash_alg { int (*init)(struct ahash_request *req); int (*reinit)(struct ahash_request *req); int (*update)(struct ahash_request *req); @@ -346,7 +337,7 @@ struct crypto_alg { struct cipher_alg cipher; struct digest_alg digest; struct hash_alg hash; - struct ahash_alg ahash; + struct old_ahash_alg ahash; struct compress_alg compress; struct rng_alg rng; } cra_u; @@ -433,18 +424,6 @@ struct hash_tfm { unsigned int digestsize; }; -struct ahash_tfm { - int (*init)(struct ahash_request *req); - int (*update)(struct ahash_request *req); - int (*final)(struct ahash_request *req); - int (*digest)(struct ahash_request *req); - int (*setkey)(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen); - - unsigned int digestsize; - unsigned int reqsize; -}; - struct compress_tfm { int (*cot_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, @@ -465,7 +444,6 @@ struct rng_tfm { #define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher #define crt_hash crt_u.hash -#define crt_ahash crt_u.ahash #define crt_compress crt_u.compress #define crt_rng crt_u.rng @@ -479,7 +457,6 @@ struct crypto_tfm { struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; struct hash_tfm hash; - struct ahash_tfm ahash; struct compress_tfm compress; struct rng_tfm rng; } crt_u; -- cgit v1.2.3 From 01c2dece4316dadc0f9fad1ad0b56d493980e492 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 14:06:06 +0800 Subject: crypto: ahash - Add instance/spawn support This patch adds support for creating ahash instances and using ahash as spawns. Signed-off-by: Herbert Xu --- crypto/ahash.c | 72 ++++++++++++++++++++++++++++++++++++++++++ include/crypto/internal/hash.h | 51 ++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 838519386215..7f599d26086a 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -259,5 +259,77 @@ struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_ahash); +static int ahash_prepare_alg(struct ahash_alg *alg) +{ + struct crypto_alg *base = &alg->halg.base; + + if (alg->halg.digestsize > PAGE_SIZE / 8 || + alg->halg.statesize > PAGE_SIZE / 8) + return -EINVAL; + + base->cra_type = &crypto_ahash_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; + + return 0; +} + +int crypto_register_ahash(struct ahash_alg *alg) +{ + struct crypto_alg *base = &alg->halg.base; + int err; + + err = ahash_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_ahash); + +int crypto_unregister_ahash(struct ahash_alg *alg) +{ + return crypto_unregister_alg(&alg->halg.base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_ahash); + +int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst) +{ + int err; + + err = ahash_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(ahash_register_instance); + +void ahash_free_instance(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(ahash_instance(inst)); +} +EXPORT_SYMBOL_GPL(ahash_free_instance); + +int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, + struct hash_alg_common *alg, + struct crypto_instance *inst) +{ + return crypto_init_spawn2(&spawn->base, &alg->base, inst, + &crypto_ahash_type); +} +EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); + +struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) +{ + struct crypto_alg *alg; + + alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); + return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); +} +EXPORT_SYMBOL_GPL(ahash_attr_alg); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 08bdffafefad..34eda233ee67 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -34,10 +34,18 @@ struct crypto_hash_walk { unsigned int flags; }; +struct ahash_instance { + struct ahash_alg alg; +}; + struct shash_instance { struct shash_alg alg; }; +struct crypto_ahash_spawn { + struct crypto_spawn base; +}; + struct crypto_shash_spawn { struct crypto_spawn base; }; @@ -53,6 +61,15 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, int crypto_register_ahash(struct ahash_alg *alg); int crypto_unregister_ahash(struct ahash_alg *alg); +int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst); +void ahash_free_instance(struct crypto_instance *inst); + +int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, + struct hash_alg_common *alg, + struct crypto_instance *inst); + +struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask); int crypto_register_shash(struct shash_alg *alg); int crypto_unregister_shash(struct shash_alg *alg); @@ -88,6 +105,40 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, tfm->reqsize = reqsize; } +static inline struct crypto_instance *ahash_crypto_instance( + struct ahash_instance *inst) +{ + return container_of(&inst->alg.halg.base, struct crypto_instance, alg); +} + +static inline struct ahash_instance *ahash_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct ahash_instance, alg.halg.base); +} + +static inline void *ahash_instance_ctx(struct ahash_instance *inst) +{ + return crypto_instance_ctx(ahash_crypto_instance(inst)); +} + +static inline unsigned int ahash_instance_headroom(void) +{ + return sizeof(struct ahash_alg) - sizeof(struct crypto_alg); +} + +static inline struct ahash_instance *ahash_alloc_instance( + const char *name, struct crypto_alg *alg) +{ + return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); +} + +static inline struct crypto_ahash *crypto_spawn_ahash( + struct crypto_ahash_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + static inline int ahash_enqueue_request(struct crypto_queue *queue, struct ahash_request *request) { -- cgit v1.2.3 From 7be380f7201064f704a128b78ac01a62dbd10162 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 16:06:54 +0800 Subject: crypto: tcrypt - Add mask parameter This patch adds a mask parameter to complement the existing type parameter. This is useful when instantiating algorithms that require a mask other than the default, e.g., ahash algorithms. Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a890a6792c7b..5a375e819d5d 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -47,6 +47,7 @@ static unsigned int sec; static char *alg = NULL; static u32 type; +static u32 mask; static int mode; static char *tvmem[TVMEMSIZE]; @@ -887,9 +888,10 @@ static int do_test(int m) return ret; } -static int do_alg_test(const char *alg, u32 type) +static int do_alg_test(const char *alg, u32 type, u32 mask) { - return crypto_has_alg(alg, type, CRYPTO_ALG_TYPE_MASK) ? 0 : -ENOENT; + return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ? + 0 : -ENOENT; } static int __init tcrypt_mod_init(void) @@ -904,7 +906,7 @@ static int __init tcrypt_mod_init(void) } if (alg) - err = do_alg_test(alg, type); + err = do_alg_test(alg, type, mask); else err = do_test(mode); @@ -941,6 +943,7 @@ module_exit(tcrypt_mod_fini); module_param(alg, charp, 0); module_param(type, uint, 0); +module_param(mask, uint, 0); module_param(mode, int, 0); module_param(sec, uint, 0); MODULE_PARM_DESC(sec, "Length in seconds of speed tests " -- cgit v1.2.3 From 9cd899a32f611eb6328014f1d9e0ba31977812d9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 18:45:45 +0800 Subject: crypto: cryptd - Switch to template create API This patch changes cryptd to use the template->create function instead of alloc in anticipation for the switch to new style ahash algorithms. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 53 +++++++++++++++++++++++++++---------------------- crypto/internal.h | 3 --- include/crypto/algapi.h | 3 +++ 3 files changed, 32 insertions(+), 27 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 6e6722edd0d9..ad58f513ba8b 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -287,8 +287,9 @@ out_free_inst: goto out; } -static struct crypto_instance *cryptd_alloc_blkcipher( - struct rtattr **tb, struct cryptd_queue *queue) +static int cryptd_create_blkcipher(struct crypto_template *tmpl, + struct rtattr **tb, + struct cryptd_queue *queue) { struct cryptd_instance_ctx *ctx; struct crypto_instance *inst; @@ -298,7 +299,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher( alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) - return ERR_CAST(alg); + return PTR_ERR(alg); inst = cryptd_alloc_instance(alg, sizeof(*ctx)); if (IS_ERR(inst)) @@ -330,14 +331,16 @@ static struct crypto_instance *cryptd_alloc_blkcipher( inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; + err = crypto_register_instance(tmpl, inst); + if (err) { + crypto_drop_spawn(&ctx->spawn); +out_free_inst: + kfree(inst); + } + out_put_alg: crypto_mod_put(alg); - return inst; - -out_free_inst: - kfree(inst); - inst = ERR_PTR(err); - goto out_put_alg; + return err; } static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) @@ -502,8 +505,8 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req) return cryptd_hash_enqueue(req, cryptd_hash_digest); } -static struct crypto_instance *cryptd_alloc_hash( - struct rtattr **tb, struct cryptd_queue *queue) +static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, + struct cryptd_queue *queue) { struct hashd_instance_ctx *ctx; struct crypto_instance *inst; @@ -513,7 +516,7 @@ static struct crypto_instance *cryptd_alloc_hash( salg = shash_attr_alg(tb[1], 0, 0); if (IS_ERR(salg)) - return ERR_CAST(salg); + return PTR_ERR(salg); alg = &salg->base; inst = cryptd_alloc_instance(alg, sizeof(*ctx)); @@ -542,34 +545,36 @@ static struct crypto_instance *cryptd_alloc_hash( inst->alg.cra_ahash.setkey = cryptd_hash_setkey; inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; + err = crypto_register_instance(tmpl, inst); + if (err) { + crypto_drop_shash(&ctx->spawn); +out_free_inst: + kfree(inst); + } + out_put_alg: crypto_mod_put(alg); - return inst; - -out_free_inst: - kfree(inst); - inst = ERR_PTR(err); - goto out_put_alg; + return err; } static struct cryptd_queue queue; -static struct crypto_instance *cryptd_alloc(struct rtattr **tb) +static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) - return ERR_CAST(algt); + return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_BLKCIPHER: - return cryptd_alloc_blkcipher(tb, &queue); + return cryptd_create_blkcipher(tmpl, tb, &queue); case CRYPTO_ALG_TYPE_DIGEST: - return cryptd_alloc_hash(tb, &queue); + return cryptd_create_hash(tmpl, tb, &queue); } - return ERR_PTR(-EINVAL); + return -EINVAL; } static void cryptd_free(struct crypto_instance *inst) @@ -582,7 +587,7 @@ static void cryptd_free(struct crypto_instance *inst) static struct crypto_template cryptd_tmpl = { .name = "cryptd", - .alloc = cryptd_alloc, + .create = cryptd_create, .free = cryptd_free, .module = THIS_MODULE, }; diff --git a/crypto/internal.h b/crypto/internal.h index 030e22921c30..2d226362e594 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -97,9 +97,6 @@ struct crypto_alg *crypto_find_alg(const char *alg_name, void *crypto_alloc_tfm(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask); -int crypto_register_instance(struct crypto_template *tmpl, - struct crypto_instance *inst); - int crypto_register_notifier(struct notifier_block *nb); int crypto_unregister_notifier(struct notifier_block *nb); int crypto_probing_notify(unsigned long val, void *v); diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 7635fde7b1a2..9de6c38f4069 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -114,6 +114,9 @@ int crypto_register_template(struct crypto_template *tmpl); void crypto_unregister_template(struct crypto_template *tmpl); struct crypto_template *crypto_lookup_template(const char *name); +int crypto_register_instance(struct crypto_template *tmpl, + struct crypto_instance *inst); + int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, struct crypto_instance *inst, u32 mask); int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, -- cgit v1.2.3 From 0b535adfb102bac1edb046444172b6b77d99bc92 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 19:11:32 +0800 Subject: crypto: cryptd - Switch to new style ahash This patch changes cryptd to use the new style ahash type. In particular, the instance is enlarged to encapsulate the new ahash_alg structure. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 64 +++++++++++++++++++++++++++++++++------------------------ 1 file changed, 37 insertions(+), 27 deletions(-) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index ad58f513ba8b..5dabb7dbad84 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -255,17 +255,18 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) crypto_free_blkcipher(ctx->child); } -static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, - unsigned int tail) +static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, + unsigned int tail) { + char *p; struct crypto_instance *inst; int err; - inst = kzalloc(sizeof(*inst) + tail, GFP_KERNEL); - if (!inst) { - inst = ERR_PTR(-ENOMEM); - goto out; - } + p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + inst = (void *)(p + head); err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, @@ -279,11 +280,11 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, inst->alg.cra_alignmask = alg->cra_alignmask; out: - return inst; + return p; out_free_inst: - kfree(inst); - inst = ERR_PTR(err); + kfree(p); + p = ERR_PTR(err); goto out; } @@ -301,7 +302,7 @@ static int cryptd_create_blkcipher(struct crypto_template *tmpl, if (IS_ERR(alg)) return PTR_ERR(alg); - inst = cryptd_alloc_instance(alg, sizeof(*ctx)); + inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); if (IS_ERR(inst)) goto out_put_alg; @@ -509,7 +510,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, struct cryptd_queue *queue) { struct hashd_instance_ctx *ctx; - struct crypto_instance *inst; + struct ahash_instance *inst; struct shash_alg *salg; struct crypto_alg *alg; int err; @@ -519,33 +520,34 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, return PTR_ERR(salg); alg = &salg->base; - inst = cryptd_alloc_instance(alg, sizeof(*ctx)); + inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), + sizeof(*ctx)); if (IS_ERR(inst)) goto out_put_alg; - ctx = crypto_instance_ctx(inst); + ctx = ahash_instance_ctx(inst); ctx->queue = queue; - err = crypto_init_shash_spawn(&ctx->spawn, salg, inst); + err = crypto_init_shash_spawn(&ctx->spawn, salg, + ahash_crypto_instance(inst)); if (err) goto out_free_inst; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; - inst->alg.cra_type = &crypto_ahash_type; + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; - inst->alg.cra_ahash.digestsize = salg->digestsize; - inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); + inst->alg.halg.digestsize = salg->digestsize; + inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); - inst->alg.cra_init = cryptd_hash_init_tfm; - inst->alg.cra_exit = cryptd_hash_exit_tfm; + inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; + inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; - inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; - inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; - inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; - inst->alg.cra_ahash.setkey = cryptd_hash_setkey; - inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; + inst->alg.init = cryptd_hash_init_enqueue; + inst->alg.update = cryptd_hash_update_enqueue; + inst->alg.final = cryptd_hash_final_enqueue; + inst->alg.setkey = cryptd_hash_setkey; + inst->alg.digest = cryptd_hash_digest_enqueue; - err = crypto_register_instance(tmpl, inst); + err = ahash_register_instance(tmpl, inst); if (err) { crypto_drop_shash(&ctx->spawn); out_free_inst: @@ -580,6 +582,14 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) static void cryptd_free(struct crypto_instance *inst) { struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); + struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); + + switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_AHASH: + crypto_drop_shash(&hctx->spawn); + kfree(ahash_instance(inst)); + return; + } crypto_drop_spawn(&ctx->spawn); kfree(inst); -- cgit v1.2.3 From 500b3e3c3dc8e4845b77ae81e5b7b085ab183ce6 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 20:29:57 +0800 Subject: crypto: ahash - Remove old_ahash_alg Now that all ahash implementations have been converted to the new ahash type, we can remove old_ahash_alg and its associated support. Signed-off-by: Herbert Xu --- crypto/ahash.c | 27 --------------------------- crypto/shash.c | 2 -- include/crypto/hash.h | 3 +-- include/crypto/internal/hash.h | 6 ------ include/linux/crypto.h | 16 ---------------- 5 files changed, 1 insertion(+), 53 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 7f599d26086a..cc824ef25830 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -175,46 +175,19 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, return -ENOSYS; } -static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) -{ - struct old_ahash_alg *alg = &tfm->__crt_alg->cra_ahash; - struct crypto_ahash *crt = __crypto_ahash_cast(tfm); - struct ahash_alg *nalg = crypto_ahash_alg(crt); - - if (alg->digestsize > PAGE_SIZE / 8) - return -EINVAL; - - crt->init = alg->init; - crt->update = alg->update; - crt->final = alg->final; - crt->digest = alg->digest; - crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; - crt->digestsize = alg->digestsize; - - nalg->setkey = alg->setkey; - nalg->halg.digestsize = alg->digestsize; - - return 0; -} - static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); - struct old_ahash_alg *oalg = crypto_old_ahash_alg(hash); if (tfm->__crt_alg->cra_type != &crypto_ahash_type) return crypto_init_shash_ops_async(tfm); - if (oalg->init) - return crypto_init_ahash_ops(tfm, 0, 0); - hash->init = alg->init; hash->update = alg->update; hash->final = alg->final; hash->digest = alg->digest; hash->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; - hash->digestsize = alg->halg.digestsize; return 0; } diff --git a/crypto/shash.c b/crypto/shash.c index 615a5f4d9b7a..fd92c03b38fc 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -270,7 +270,6 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) int crypto_init_shash_ops_async(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; - struct shash_alg *alg = __crypto_shash_alg(calg); struct crypto_ahash *crt = __crypto_ahash_cast(tfm); struct crypto_shash **ctx = crypto_tfm_ctx(tfm); struct crypto_shash *shash; @@ -293,7 +292,6 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->digest = shash_async_digest; crt->setkey = shash_async_setkey; - crt->digestsize = alg->digestsize; crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); return 0; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 262861d8f0cb..45c2bddfdf32 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -89,7 +89,6 @@ struct crypto_ahash { int (*setkey)(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); - unsigned int digestsize; unsigned int reqsize; struct crypto_tfm base; }; @@ -137,7 +136,7 @@ static inline struct hash_alg_common *crypto_hash_alg_common( static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) { - return tfm->digestsize; + return crypto_hash_alg_common(tfm)->digestsize; } static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index e3a82514d61e..179dd8fdfa14 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -109,12 +109,6 @@ static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg) halg); } -static inline struct old_ahash_alg *crypto_old_ahash_alg( - struct crypto_ahash *tfm) -{ - return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash; -} - static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, unsigned int reqsize) { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 9e7e9b62a3dc..fd929889e8dc 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -115,12 +115,10 @@ struct crypto_async_request; struct crypto_aead; struct crypto_blkcipher; struct crypto_hash; -struct crypto_ahash; struct crypto_rng; struct crypto_tfm; struct crypto_type; struct aead_givcrypt_request; -struct ahash_request; struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); @@ -211,18 +209,6 @@ struct ablkcipher_alg { unsigned int ivsize; }; -struct old_ahash_alg { - int (*init)(struct ahash_request *req); - int (*reinit)(struct ahash_request *req); - int (*update)(struct ahash_request *req); - int (*final)(struct ahash_request *req); - int (*digest)(struct ahash_request *req); - int (*setkey)(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen); - - unsigned int digestsize; -}; - struct aead_alg { int (*setkey)(struct crypto_aead *tfm, const u8 *key, unsigned int keylen); @@ -309,7 +295,6 @@ struct rng_alg { #define cra_cipher cra_u.cipher #define cra_digest cra_u.digest #define cra_hash cra_u.hash -#define cra_ahash cra_u.ahash #define cra_compress cra_u.compress #define cra_rng cra_u.rng @@ -337,7 +322,6 @@ struct crypto_alg { struct cipher_alg cipher; struct digest_alg digest; struct hash_alg hash; - struct old_ahash_alg ahash; struct compress_alg compress; struct rng_alg rng; } cra_u; -- cgit v1.2.3 From 8c32c516eb1c1f9c14d25478442137c698788975 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 21:35:36 +0800 Subject: crypto: hash - Zap unaligned buffers Some unaligned buffers on the stack weren't zapped properly which may cause secret data to be leaked. This patch fixes them by doing a zero memset. It is also possible for us to place random kernel stack contents in the digest buffer if a digest operation fails. This is fixed by only copying if the operation succeeded. Signed-off-by: Herbert Xu --- crypto/ahash.c | 3 +-- crypto/shash.c | 14 +++++++++++--- 2 files changed, 12 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index cc824ef25830..1576f95f9afe 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -152,8 +152,7 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = ahash->setkey(tfm, alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); + kzfree(buffer); return ret; } diff --git a/crypto/shash.c b/crypto/shash.c index fd92c03b38fc..e54328364a85 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -45,8 +45,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); err = shash->setkey(tfm, alignbuffer, keylen); - memset(alignbuffer, 0, keylen); - kfree(buffer); + kzfree(buffer); return err; } @@ -79,13 +78,16 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, ((unsigned long)data & alignmask); u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] __attribute__ ((aligned)); + int err; if (unaligned_len > len) unaligned_len = len; memcpy(buf, data, unaligned_len); + err = shash->update(desc, buf, unaligned_len); + memset(buf, 0, unaligned_len); - return shash->update(desc, buf, unaligned_len) ?: + return err ?: shash->update(desc, data + unaligned_len, len - unaligned_len); } @@ -114,7 +116,13 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out) int err; err = shash->final(desc, buf); + if (err) + goto out; + memcpy(out, buf, ds); + +out: + memset(buf, 0, ds); return err; } -- cgit v1.2.3 From 0e2d3a126338ebb213c8e32d8d1d8936d8e62d43 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 21:43:56 +0800 Subject: crypto: shash - Fix alignment in unaligned operations When we encounter an unaligned pointer we are supposed to copy it to a temporary aligned location. However the temporary buffer isn't aligned properly. This patch fixes that. Signed-off-by: Herbert Xu --- crypto/shash.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index e54328364a85..171c8f052f89 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -76,8 +76,9 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, unsigned long alignmask = crypto_shash_alignmask(tfm); unsigned int unaligned_len = alignmask + 1 - ((unsigned long)data & alignmask); - u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] + u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)] __attribute__ ((aligned)); + u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); int err; if (unaligned_len > len) @@ -111,8 +112,9 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out) unsigned long alignmask = crypto_shash_alignmask(tfm); struct shash_alg *shash = crypto_shash_alg(tfm); unsigned int ds = crypto_shash_digestsize(tfm); - u8 buf[shash_align_buffer_size(ds, alignmask)] + u8 ubuf[shash_align_buffer_size(ds, alignmask)] __attribute__ ((aligned)); + u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); int err; err = shash->final(desc, buf); -- cgit v1.2.3 From 093900c2b964da73daf234374225b5ce5d49f941 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 14 Jul 2009 21:48:35 +0800 Subject: crypto: ahash - Use GFP_KERNEL in unaligned setkey We currently use GFP_ATOMIC in the unaligned setkey function to allocate the temporary aligned buffer. Since setkey must be called in a sleepable context, we can use GFP_KERNEL instead. Signed-off-by: Herbert Xu --- crypto/ahash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 1576f95f9afe..a196055b73d3 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -145,7 +145,7 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, unsigned long absize; absize = keylen + alignmask; - buffer = kmalloc(absize, GFP_ATOMIC); + buffer = kmalloc(absize, GFP_KERNEL); if (!buffer) return -ENOMEM; -- cgit v1.2.3 From 66f6ce5e52f2f209d5bf1f06167cec888f4f4c13 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 15 Jul 2009 12:40:40 +0800 Subject: crypto: ahash - Add unaligned handling and default operations This patch exports the finup operation where available and adds a default finup operation for ahash. The operations final, finup and digest also will now deal with unaligned result pointers by copying it. Finally export/import operations are will now be exported too. Signed-off-by: Herbert Xu --- crypto/ahash.c | 204 ++++++++++++++++++++++++++++++++++++++++- crypto/shash.c | 52 ++++++++++- include/crypto/hash.h | 23 ++--- include/crypto/internal/hash.h | 6 ++ 4 files changed, 263 insertions(+), 22 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index a196055b73d3..ac0798d2824e 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -24,6 +24,13 @@ #include "internal.h" +struct ahash_request_priv { + crypto_completion_t complete; + void *data; + u8 *result; + void *ubuf[] CRYPTO_MINALIGN_ATTR; +}; + static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) { return container_of(crypto_hash_alg_common(hash), struct ahash_alg, @@ -156,7 +163,7 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, return ret; } -static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, +int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct ahash_alg *ahash = crypto_ahash_alg(tfm); @@ -167,6 +174,7 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, return ahash->setkey(tfm, key, keylen); } +EXPORT_SYMBOL_GPL(crypto_ahash_setkey); static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) @@ -174,19 +182,209 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, return -ENOSYS; } +static inline unsigned int ahash_align_buffer_size(unsigned len, + unsigned long mask) +{ + return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); +} + +static void ahash_op_unaligned_finish(struct ahash_request *req, int err) +{ + struct ahash_request_priv *priv = req->priv; + + if (err == -EINPROGRESS) + return; + + if (!err) + memcpy(priv->result, req->result, + crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + + kzfree(priv); +} + +static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) +{ + struct ahash_request *areq = req->data; + struct ahash_request_priv *priv = areq->priv; + crypto_completion_t complete = priv->complete; + void *data = priv->data; + + ahash_op_unaligned_finish(areq, err); + + complete(data, err); +} + +static int ahash_op_unaligned(struct ahash_request *req, + int (*op)(struct ahash_request *)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + unsigned long alignmask = crypto_ahash_alignmask(tfm); + unsigned int ds = crypto_ahash_digestsize(tfm); + struct ahash_request_priv *priv; + int err; + + priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_ATOMIC : GFP_ATOMIC); + if (!priv) + return -ENOMEM; + + priv->result = req->result; + priv->complete = req->base.complete; + priv->data = req->base.data; + + req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); + req->base.complete = ahash_op_unaligned_done; + req->base.data = req; + req->priv = priv; + + err = op(req); + ahash_op_unaligned_finish(req, err); + + return err; +} + +static int crypto_ahash_op(struct ahash_request *req, + int (*op)(struct ahash_request *)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + unsigned long alignmask = crypto_ahash_alignmask(tfm); + + if ((unsigned long)req->result & alignmask) + return ahash_op_unaligned(req, op); + + return op(req); +} + +int crypto_ahash_final(struct ahash_request *req) +{ + return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); +} +EXPORT_SYMBOL_GPL(crypto_ahash_final); + +int crypto_ahash_finup(struct ahash_request *req) +{ + return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); +} +EXPORT_SYMBOL_GPL(crypto_ahash_finup); + +int crypto_ahash_digest(struct ahash_request *req) +{ + return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); +} +EXPORT_SYMBOL_GPL(crypto_ahash_digest); + +static void ahash_def_finup_finish2(struct ahash_request *req, int err) +{ + struct ahash_request_priv *priv = req->priv; + + if (err == -EINPROGRESS) + return; + + if (!err) + memcpy(priv->result, req->result, + crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + + kzfree(priv); +} + +static void ahash_def_finup_done2(struct crypto_async_request *req, int err) +{ + struct ahash_request *areq = req->data; + struct ahash_request_priv *priv = areq->priv; + crypto_completion_t complete = priv->complete; + void *data = priv->data; + + ahash_def_finup_finish2(areq, err); + + complete(data, err); +} + +static int ahash_def_finup_finish1(struct ahash_request *req, int err) +{ + if (err) + goto out; + + req->base.complete = ahash_def_finup_done2; + req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_ahash_reqtfm(req)->final(req); + +out: + ahash_def_finup_finish2(req, err); + return err; +} + +static void ahash_def_finup_done1(struct crypto_async_request *req, int err) +{ + struct ahash_request *areq = req->data; + struct ahash_request_priv *priv = areq->priv; + crypto_completion_t complete = priv->complete; + void *data = priv->data; + + err = ahash_def_finup_finish1(areq, err); + + complete(data, err); +} + +static int ahash_def_finup(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + unsigned long alignmask = crypto_ahash_alignmask(tfm); + unsigned int ds = crypto_ahash_digestsize(tfm); + struct ahash_request_priv *priv; + + priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_ATOMIC : GFP_ATOMIC); + if (!priv) + return -ENOMEM; + + priv->result = req->result; + priv->complete = req->base.complete; + priv->data = req->base.data; + + req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); + req->base.complete = ahash_def_finup_done1; + req->base.data = req; + req->priv = priv; + + return ahash_def_finup_finish1(req, tfm->update(req)); +} + +static int ahash_no_export(struct ahash_request *req, void *out) +{ + return -ENOSYS; +} + +static int ahash_no_import(struct ahash_request *req, const void *in) +{ + return -ENOSYS; +} + static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); + hash->setkey = ahash_nosetkey; + hash->export = ahash_no_export; + hash->import = ahash_no_import; + if (tfm->__crt_alg->cra_type != &crypto_ahash_type) return crypto_init_shash_ops_async(tfm); hash->init = alg->init; hash->update = alg->update; - hash->final = alg->final; + hash->final = alg->final; + hash->finup = alg->finup ?: ahash_def_finup; hash->digest = alg->digest; - hash->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; + + if (alg->setkey) + hash->setkey = alg->setkey; + if (alg->export) + hash->export = alg->export; + if (alg->import) + hash->import = alg->import; return 0; } diff --git a/crypto/shash.c b/crypto/shash.c index 171c8f052f89..834d9d24cdae 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -235,6 +235,33 @@ static int shash_async_final(struct ahash_request *req) return crypto_shash_final(ahash_request_ctx(req), req->result); } +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) +{ + struct crypto_hash_walk walk; + int nbytes; + + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; + nbytes = crypto_hash_walk_done(&walk, nbytes)) + nbytes = crypto_hash_walk_last(&walk) ? + crypto_shash_finup(desc, walk.data, nbytes, + req->result) : + crypto_shash_update(desc, walk.data, nbytes); + + return nbytes; +} +EXPORT_SYMBOL_GPL(shash_ahash_finup); + +static int shash_async_finup(struct ahash_request *req) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + desc->flags = req->base.flags; + + return shash_ahash_finup(req, desc); +} + int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) { struct scatterlist *sg = req->src; @@ -252,8 +279,7 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) crypto_yield(desc->flags); } else err = crypto_shash_init(desc) ?: - shash_ahash_update(req, desc) ?: - crypto_shash_final(desc, req->result); + shash_ahash_finup(req, desc); return err; } @@ -270,6 +296,16 @@ static int shash_async_digest(struct ahash_request *req) return shash_ahash_digest(req, desc); } +static int shash_async_export(struct ahash_request *req, void *out) +{ + return crypto_shash_export(ahash_request_ctx(req), out); +} + +static int shash_async_import(struct ahash_request *req, const void *in) +{ + return crypto_shash_import(ahash_request_ctx(req), in); +} + static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) { struct crypto_shash **ctx = crypto_tfm_ctx(tfm); @@ -280,6 +316,7 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) int crypto_init_shash_ops_async(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; + struct shash_alg *alg = __crypto_shash_alg(calg); struct crypto_ahash *crt = __crypto_ahash_cast(tfm); struct crypto_shash **ctx = crypto_tfm_ctx(tfm); struct crypto_shash *shash; @@ -298,9 +335,16 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->init = shash_async_init; crt->update = shash_async_update; - crt->final = shash_async_final; + crt->final = shash_async_final; + crt->finup = shash_async_finup; crt->digest = shash_async_digest; - crt->setkey = shash_async_setkey; + + if (alg->setkey) + crt->setkey = shash_async_setkey; + if (alg->export) + crt->export = shash_async_export; + if (alg->setkey) + crt->import = shash_async_import; crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 45c2bddfdf32..3e89ce16b59c 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -31,6 +31,9 @@ struct ahash_request { struct scatterlist *src; u8 *result; + /* This field may only be used by the ahash API code. */ + void *priv; + void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -175,16 +178,11 @@ static inline void *ahash_request_ctx(struct ahash_request *req) return req->__ctx; } -static inline int crypto_ahash_setkey(struct crypto_ahash *tfm, - const u8 *key, unsigned int keylen) -{ - return tfm->setkey(tfm, key, keylen); -} - -static inline int crypto_ahash_digest(struct ahash_request *req) -{ - return crypto_ahash_reqtfm(req)->digest(req); -} +int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); +int crypto_ahash_finup(struct ahash_request *req); +int crypto_ahash_final(struct ahash_request *req); +int crypto_ahash_digest(struct ahash_request *req); static inline int crypto_ahash_export(struct ahash_request *req, void *out) { @@ -206,11 +204,6 @@ static inline int crypto_ahash_update(struct ahash_request *req) return crypto_ahash_reqtfm(req)->update(req); } -static inline int crypto_ahash_final(struct ahash_request *req) -{ - return crypto_ahash_reqtfm(req)->final(req); -} - static inline void ahash_request_set_tfm(struct ahash_request *req, struct crypto_ahash *tfm) { diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 179dd8fdfa14..5bfad8c80595 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -59,6 +59,11 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, struct crypto_hash_walk *walk, struct scatterlist *sg, unsigned int len); +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); +} + int crypto_register_ahash(struct ahash_alg *alg); int crypto_unregister_ahash(struct ahash_alg *alg); int ahash_register_instance(struct crypto_template *tmpl, @@ -94,6 +99,7 @@ static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn) struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask); int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc); +int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); int crypto_init_shash_ops_async(struct crypto_tfm *tfm); -- cgit v1.2.3 From 05ed8758fa30e088766905d0d600aa393e144353 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 15 Jul 2009 16:51:04 +0800 Subject: crypto: cryptd - Fix uninitialized return value If cryptd_alloc_instance() fails, the return value is uninitialized. This patch fixes this by setting the return value. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/cryptd.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 5dabb7dbad84..fbd26f9dd329 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -303,6 +303,7 @@ static int cryptd_create_blkcipher(struct crypto_template *tmpl, return PTR_ERR(alg); inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); + err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; @@ -522,6 +523,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, alg = &salg->base; inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), sizeof(*ctx)); + err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; -- cgit v1.2.3 From 3b3fc322d9c92e8bbfcecf739f1a3d10ded7f2cd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 15 Jul 2009 16:52:55 +0800 Subject: crypto: hmac - Fix incorrect error value when creating instance If shash_alloc_instance() fails, we return the wrong error value. This patch fixes it. Signed-off-by: Herbert Xu --- crypto/hmac.c | 1 + 1 file changed, 1 insertion(+) (limited to 'crypto') diff --git a/crypto/hmac.c b/crypto/hmac.c index e37342748f2f..02aa53ea14aa 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -195,6 +195,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) goto out_put_alg; inst = shash_alloc_instance("hmac", alg); + err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; -- cgit v1.2.3 From b5ebd44eb7559ea6135d536bafd02323d2ef0547 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 15 Jul 2009 16:53:33 +0800 Subject: crypto: xcbc - Fix incorrect error value when creating instance If shash_alloc_instance() fails, we return the wrong error value. This patch fixes it. Signed-off-by: Herbert Xu --- crypto/xcbc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'crypto') diff --git a/crypto/xcbc.c b/crypto/xcbc.c index b2cc8551b99f..3b991bf2fd92 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -252,6 +252,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) } inst = shash_alloc_instance("xcbc", alg); + err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; -- cgit v1.2.3 From a70c522520d967844c01fa01459edc698fc54544 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 15 Jul 2009 20:39:05 +0800 Subject: crypto: ahash - Fix setkey crash When the alignment check was made unconditional for ahash we may end up crashing on shash algorithms because we're always calling alg->setkey instead of tfm->setkey. This patch fixes it. Signed-off-by: Herbert Xu --- crypto/ahash.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index ac0798d2824e..28a33d06c274 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -145,7 +145,6 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc, static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - struct ahash_alg *ahash = crypto_ahash_alg(tfm); unsigned long alignmask = crypto_ahash_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; @@ -158,7 +157,7 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); - ret = ahash->setkey(tfm, alignbuffer, keylen); + ret = tfm->setkey(tfm, alignbuffer, keylen); kzfree(buffer); return ret; } @@ -166,13 +165,12 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - struct ahash_alg *ahash = crypto_ahash_alg(tfm); unsigned long alignmask = crypto_ahash_alignmask(tfm); if ((unsigned long)key & alignmask) return ahash_setkey_unaligned(tfm, key, keylen); - return ahash->setkey(tfm, key, keylen); + return tfm->setkey(tfm, key, keylen); } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); -- cgit v1.2.3 From cbc86b9161b40f95caee0e56381b68956fc28cc4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 15 Jul 2009 21:26:41 +0800 Subject: crypto: shash - Fix async finup handling of null digest When shash_ahash_finup encounters a null request, we end up not calling the underlying final function. This patch fixes that. Signed-off-by: Herbert Xu --- crypto/shash.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 834d9d24cdae..7713b520bc98 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -240,12 +240,17 @@ int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) struct crypto_hash_walk walk; int nbytes; - for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; - nbytes = crypto_hash_walk_done(&walk, nbytes)) + nbytes = crypto_hash_walk_first(req, &walk); + if (!nbytes) + return crypto_shash_final(desc, req->result); + + do { nbytes = crypto_hash_walk_last(&walk) ? crypto_shash_finup(desc, walk.data, nbytes, req->result) : crypto_shash_update(desc, walk.data, nbytes); + nbytes = crypto_hash_walk_done(&walk, nbytes); + } while (nbytes > 0); return nbytes; } -- cgit v1.2.3 From 6fba00d176ab73b15bb8e31f261582943429a92b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Jul 2009 11:10:22 +0800 Subject: crypto: cryptd - Add finup/export/import for hash This patch adds the finup/export/import functions to the cryptd ahash implementation. We simply invoke the underlying shash operations. Signed-off-by: Herbert Xu --- crypto/cryptd.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index fbd26f9dd329..2eb705822f2b 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -478,6 +478,29 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req) return cryptd_hash_enqueue(req, cryptd_hash_final); } +static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) +{ + struct ahash_request *req = ahash_request_cast(req_async); + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) + goto out; + + err = shash_ahash_finup(req, &rctx->desc); + + req->base.complete = rctx->complete; + +out: + local_bh_disable(); + rctx->complete(&req->base, err); + local_bh_enable(); +} + +static int cryptd_hash_finup_enqueue(struct ahash_request *req) +{ + return cryptd_hash_enqueue(req, cryptd_hash_finup); +} + static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) { struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); @@ -507,6 +530,20 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req) return cryptd_hash_enqueue(req, cryptd_hash_digest); } +static int cryptd_hash_export(struct ahash_request *req, void *out) +{ + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + + return crypto_shash_export(&rctx->desc, out); +} + +static int cryptd_hash_import(struct ahash_request *req, const void *in) +{ + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + + return crypto_shash_import(&rctx->desc, in); +} + static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, struct cryptd_queue *queue) { @@ -546,6 +583,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.init = cryptd_hash_init_enqueue; inst->alg.update = cryptd_hash_update_enqueue; inst->alg.final = cryptd_hash_final_enqueue; + inst->alg.finup = cryptd_hash_finup_enqueue; + inst->alg.export = cryptd_hash_export; + inst->alg.import = cryptd_hash_import; inst->alg.setkey = cryptd_hash_setkey; inst->alg.digest = cryptd_hash_digest_enqueue; -- cgit v1.2.3 From b588ef6e69bfc0944a17dc673ee166a00fa23de2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Jul 2009 13:04:37 +0800 Subject: crypto: xcbc - Use crypto_xor This patch replaces the local xor function with the generic crypto_xor function. Signed-off-by: Herbert Xu --- crypto/xcbc.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'crypto') diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 3b991bf2fd92..9d502e67a5c0 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -47,19 +47,10 @@ struct crypto_xcbc_ctx { u8 *prev; u8 *key; u8 *consts; - void (*xor)(u8 *a, const u8 *b, unsigned int bs); unsigned int keylen; unsigned int len; }; -static void xor_128(u8 *a, const u8 *b, unsigned int bs) -{ - ((u32 *)a)[0] ^= ((u32 *)b)[0]; - ((u32 *)a)[1] ^= ((u32 *)b)[1]; - ((u32 *)a)[2] ^= ((u32 *)b)[2]; - ((u32 *)a)[3] ^= ((u32 *)b)[3]; -} - static int _crypto_xcbc_digest_setkey(struct crypto_shash *parent, struct crypto_xcbc_ctx *ctx) { @@ -122,7 +113,7 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, len -= bs - ctx->len; p += bs - ctx->len; - ctx->xor(ctx->prev, ctx->odds, bs); + crypto_xor(ctx->prev, ctx->odds, bs); crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); /* clearing the length */ @@ -130,7 +121,7 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, /* encrypting the rest of data */ while (len > bs) { - ctx->xor(ctx->prev, p, bs); + crypto_xor(ctx->prev, p, bs); crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); p += bs; len -= bs; @@ -162,8 +153,8 @@ static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) crypto_cipher_encrypt_one(tfm, key2, (u8 *)(ctx->consts + bs)); - ctx->xor(ctx->prev, ctx->odds, bs); - ctx->xor(ctx->prev, key2, bs); + crypto_xor(ctx->prev, ctx->odds, bs); + crypto_xor(ctx->prev, key2, bs); _crypto_xcbc_digest_setkey(parent, ctx); crypto_cipher_encrypt_one(tfm, out, ctx->prev); @@ -184,8 +175,8 @@ static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) crypto_cipher_encrypt_one(tfm, key3, (u8 *)(ctx->consts + bs * 2)); - ctx->xor(ctx->prev, ctx->odds, bs); - ctx->xor(ctx->prev, key3, bs); + crypto_xor(ctx->prev, ctx->odds, bs); + crypto_xor(ctx->prev, key3, bs); _crypto_xcbc_digest_setkey(parent, ctx); @@ -209,7 +200,6 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm) switch(bs) { case 16: - ctx->xor = xor_128; break; default: return -EINVAL; -- cgit v1.2.3 From ac95301f271f32901e4007096aa3516def49eed2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Jul 2009 14:37:15 +0800 Subject: crypto: xcbc - Fix shash conversion Although xcbc was converted to shash, it didn't obey the new requirement that all hash state must be stored in the descriptor rather than the transform. This patch fixes this issue and also optimises away the rekeying by precomputing K2 and K3 within setkey. Signed-off-by: Herbert Xu --- crypto/xcbc.c | 164 ++++++++++++++++++++++++++-------------------------------- 1 file changed, 73 insertions(+), 91 deletions(-) (limited to 'crypto') diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 9d502e67a5c0..1e30b31f33c6 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -26,69 +26,67 @@ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x02020202, 0x02020202, 0x02020202, 0x02020202, 0x03030303, 0x03030303, 0x03030303, 0x03030303}; + /* * +------------------------ * | * +------------------------ - * | crypto_xcbc_ctx + * | xcbc_tfm_ctx * +------------------------ - * | odds (block size) + * | consts (block size * 2) * +------------------------ - * | prev (block size) + */ +struct xcbc_tfm_ctx { + struct crypto_cipher *child; + u8 ctx[]; +}; + +/* * +------------------------ - * | key (block size) + * | * +------------------------ - * | consts (block size * 3) + * | xcbc_desc_ctx + * +------------------------ + * | odds (block size) + * +------------------------ + * | prev (block size) * +------------------------ */ -struct crypto_xcbc_ctx { - struct crypto_cipher *child; - u8 *odds; - u8 *prev; - u8 *key; - u8 *consts; - unsigned int keylen; +struct xcbc_desc_ctx { unsigned int len; + u8 ctx[]; }; -static int _crypto_xcbc_digest_setkey(struct crypto_shash *parent, - struct crypto_xcbc_ctx *ctx) +static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, + const u8 *inkey, unsigned int keylen) { + unsigned long alignmask = crypto_shash_alignmask(parent); + struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); int bs = crypto_shash_blocksize(parent); + u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); int err = 0; u8 key1[bs]; - if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen))) - return err; + if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) + return err; - crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts); + crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs); + crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2); + crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks); return crypto_cipher_setkey(ctx->child, key1, bs); -} - -static int crypto_xcbc_digest_setkey(struct crypto_shash *parent, - const u8 *inkey, unsigned int keylen) -{ - struct crypto_xcbc_ctx *ctx = crypto_shash_ctx(parent); - - if (keylen != crypto_cipher_blocksize(ctx->child)) - return -EINVAL; - ctx->keylen = keylen; - memcpy(ctx->key, inkey, keylen); - ctx->consts = (u8*)ks; - - return _crypto_xcbc_digest_setkey(parent, ctx); } static int crypto_xcbc_digest_init(struct shash_desc *pdesc) { - struct crypto_xcbc_ctx *ctx = crypto_shash_ctx(pdesc->tfm); + unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm); + struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); int bs = crypto_shash_blocksize(pdesc->tfm); + u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; ctx->len = 0; - memset(ctx->odds, 0, bs); - memset(ctx->prev, 0, bs); + memset(prev, 0, bs); return 0; } @@ -97,39 +95,43 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; - struct crypto_xcbc_ctx *ctx = crypto_shash_ctx(parent); - struct crypto_cipher *tfm = ctx->child; + unsigned long alignmask = crypto_shash_alignmask(parent); + struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); + struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); + struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); + u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); + u8 *prev = odds + bs; /* checking the data can fill the block */ if ((ctx->len + len) <= bs) { - memcpy(ctx->odds + ctx->len, p, len); + memcpy(odds + ctx->len, p, len); ctx->len += len; return 0; } /* filling odds with new data and encrypting it */ - memcpy(ctx->odds + ctx->len, p, bs - ctx->len); + memcpy(odds + ctx->len, p, bs - ctx->len); len -= bs - ctx->len; p += bs - ctx->len; - crypto_xor(ctx->prev, ctx->odds, bs); - crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); + crypto_xor(prev, odds, bs); + crypto_cipher_encrypt_one(tfm, prev, prev); /* clearing the length */ ctx->len = 0; /* encrypting the rest of data */ while (len > bs) { - crypto_xor(ctx->prev, p, bs); - crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev); + crypto_xor(prev, p, bs); + crypto_cipher_encrypt_one(tfm, prev, prev); p += bs; len -= bs; } /* keeping the surplus of blocksize */ if (len) { - memcpy(ctx->odds, p, len); + memcpy(odds, p, len); ctx->len = len; } @@ -139,29 +141,20 @@ static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p, static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; - struct crypto_xcbc_ctx *ctx = crypto_shash_ctx(parent); - struct crypto_cipher *tfm = ctx->child; + unsigned long alignmask = crypto_shash_alignmask(parent); + struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent); + struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); + struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_blocksize(parent); - int err = 0; + u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); + u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); + u8 *prev = odds + bs; + unsigned int offset = 0; - if (ctx->len == bs) { - u8 key2[bs]; - - if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) - return err; - - crypto_cipher_encrypt_one(tfm, key2, - (u8 *)(ctx->consts + bs)); - - crypto_xor(ctx->prev, ctx->odds, bs); - crypto_xor(ctx->prev, key2, bs); - _crypto_xcbc_digest_setkey(parent, ctx); - - crypto_cipher_encrypt_one(tfm, out, ctx->prev); - } else { - u8 key3[bs]; + if (ctx->len != bs) { unsigned int rlen; - u8 *p = ctx->odds + ctx->len; + u8 *p = odds + ctx->len; + *p = 0x80; p++; @@ -169,19 +162,13 @@ static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out) if (rlen) memset(p, 0, rlen); - if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) - return err; - - crypto_cipher_encrypt_one(tfm, key3, - (u8 *)(ctx->consts + bs * 2)); + offset += bs; + } - crypto_xor(ctx->prev, ctx->odds, bs); - crypto_xor(ctx->prev, key3, bs); + crypto_xor(prev, odds, bs); + crypto_xor(prev, consts + offset, bs); - _crypto_xcbc_digest_setkey(parent, ctx); - - crypto_cipher_encrypt_one(tfm, out, ctx->prev); - } + crypto_cipher_encrypt_one(tfm, out, prev); return 0; } @@ -191,31 +178,20 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm) struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_xcbc_ctx *ctx = crypto_tfm_ctx(tfm); - int bs = crypto_tfm_alg_blocksize(tfm); + struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); - switch(bs) { - case 16: - break; - default: - return -EINVAL; - } - ctx->child = cipher; - ctx->odds = (u8*)(ctx+1); - ctx->prev = ctx->odds + bs; - ctx->key = ctx->prev + bs; return 0; }; static void xcbc_exit_tfm(struct crypto_tfm *tfm) { - struct crypto_xcbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_cipher(ctx->child); } @@ -254,12 +230,18 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; + inst->alg.base.cra_alignmask = alg->cra_alignmask | 3; inst->alg.digestsize = alg->cra_blocksize; - inst->alg.base.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) + - ALIGN(alg->cra_blocksize * 3, - sizeof(void *)); + inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), + crypto_tfm_ctx_alignment()) + + (alg->cra_alignmask & + ~(crypto_tfm_ctx_alignment() - 1)) + + alg->cra_blocksize * 2; + + inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), + alg->cra_alignmask) + + alg->cra_blocksize * 2; inst->alg.base.cra_init = xcbc_init_tfm; inst->alg.base.cra_exit = xcbc_exit_tfm; -- cgit v1.2.3 From 1f38ad8389bbca038d320c29d30aa1d6ed96b48d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Jul 2009 11:48:18 +0800 Subject: crypto: sha512 - Export struct sha512_state This patch renames struct sha512_ctx and exports it as struct sha512_state so that other sha512 implementations can use it as the reference structure for exporting their state. Signed-off-by: Herbert Xu --- crypto/sha512_generic.c | 20 +++++++------------- include/crypto/sha.h | 6 ++++++ 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'crypto') diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 3bea38d12242..4fe95eb03226 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -21,12 +21,6 @@ #include #include -struct sha512_ctx { - u64 state[8]; - u32 count[4]; - u8 buf[128]; -}; - static DEFINE_PER_CPU(u64[80], msg_schedule); static inline u64 Ch(u64 x, u64 y, u64 z) @@ -141,7 +135,7 @@ sha512_transform(u64 *state, const u8 *input) static int sha512_init(struct shash_desc *desc) { - struct sha512_ctx *sctx = shash_desc_ctx(desc); + struct sha512_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA512_H0; sctx->state[1] = SHA512_H1; sctx->state[2] = SHA512_H2; @@ -158,7 +152,7 @@ sha512_init(struct shash_desc *desc) static int sha384_init(struct shash_desc *desc) { - struct sha512_ctx *sctx = shash_desc_ctx(desc); + struct sha512_state *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA384_H0; sctx->state[1] = SHA384_H1; sctx->state[2] = SHA384_H2; @@ -175,7 +169,7 @@ sha384_init(struct shash_desc *desc) static int sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha512_ctx *sctx = shash_desc_ctx(desc); + struct sha512_state *sctx = shash_desc_ctx(desc); unsigned int i, index, part_len; @@ -214,7 +208,7 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) static int sha512_final(struct shash_desc *desc, u8 *hash) { - struct sha512_ctx *sctx = shash_desc_ctx(desc); + struct sha512_state *sctx = shash_desc_ctx(desc); static u8 padding[128] = { 0x80, }; __be64 *dst = (__be64 *)hash; __be32 bits[4]; @@ -240,7 +234,7 @@ sha512_final(struct shash_desc *desc, u8 *hash) dst[i] = cpu_to_be64(sctx->state[i]); /* Zeroize sensitive information. */ - memset(sctx, 0, sizeof(struct sha512_ctx)); + memset(sctx, 0, sizeof(struct sha512_state)); return 0; } @@ -262,7 +256,7 @@ static struct shash_alg sha512 = { .init = sha512_init, .update = sha512_update, .final = sha512_final, - .descsize = sizeof(struct sha512_ctx), + .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_flags = CRYPTO_ALG_TYPE_SHASH, @@ -276,7 +270,7 @@ static struct shash_alg sha384 = { .init = sha384_init, .update = sha512_update, .final = sha384_final, - .descsize = sizeof(struct sha512_ctx), + .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_flags = CRYPTO_ALG_TYPE_SHASH, diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 88ef5eb9514d..45b25ccf7cc6 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -76,4 +76,10 @@ struct sha256_state { u8 buf[SHA256_BLOCK_SIZE]; }; +struct sha512_state { + u64 state[8]; + u32 count[4]; + u8 buf[128]; +}; + #endif -- cgit v1.2.3 From 13887ed6888dad1608eb9530ebd83b6ba29db577 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Jul 2009 12:22:43 +0800 Subject: crypto: sha512_generic - Use 64-bit counters This patch replaces the 32-bit counters in sha512_generic with 64-bit counters. It also switches the bit count to the simpler byte count. Signed-off-by: Herbert Xu --- crypto/sha512_generic.c | 28 +++++++++++----------------- include/crypto/sha.h | 6 +++--- 2 files changed, 14 insertions(+), 20 deletions(-) (limited to 'crypto') diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 4fe95eb03226..9ed9f60316e5 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -144,7 +144,7 @@ sha512_init(struct shash_desc *desc) sctx->state[5] = SHA512_H5; sctx->state[6] = SHA512_H6; sctx->state[7] = SHA512_H7; - sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; + sctx->count[0] = sctx->count[1] = 0; return 0; } @@ -161,7 +161,7 @@ sha384_init(struct shash_desc *desc) sctx->state[5] = SHA384_H5; sctx->state[6] = SHA384_H6; sctx->state[7] = SHA384_H7; - sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; + sctx->count[0] = sctx->count[1] = 0; return 0; } @@ -174,15 +174,11 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) unsigned int i, index, part_len; /* Compute number of bytes mod 128 */ - index = (unsigned int)((sctx->count[0] >> 3) & 0x7F); - - /* Update number of bits */ - if ((sctx->count[0] += (len << 3)) < (len << 3)) { - if ((sctx->count[1] += 1) < 1) - if ((sctx->count[2] += 1) < 1) - sctx->count[3]++; - sctx->count[1] += (len >> 29); - } + index = sctx->count[0] & 0x7f; + + /* Update number of bytes */ + if (!(sctx->count[0] += len)) + sctx->count[1]++; part_len = 128 - index; @@ -211,18 +207,16 @@ sha512_final(struct shash_desc *desc, u8 *hash) struct sha512_state *sctx = shash_desc_ctx(desc); static u8 padding[128] = { 0x80, }; __be64 *dst = (__be64 *)hash; - __be32 bits[4]; + __be64 bits[2]; unsigned int index, pad_len; int i; /* Save number of bits */ - bits[3] = cpu_to_be32(sctx->count[0]); - bits[2] = cpu_to_be32(sctx->count[1]); - bits[1] = cpu_to_be32(sctx->count[2]); - bits[0] = cpu_to_be32(sctx->count[3]); + bits[1] = cpu_to_be64(sctx->count[0] << 3); + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); /* Pad out to 112 mod 128. */ - index = (sctx->count[0] >> 3) & 0x7f; + index = sctx->count[0] & 0x7f; pad_len = (index < 112) ? (112 - index) : ((128+112) - index); sha512_update(desc, padding, pad_len); diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 45b25ccf7cc6..069e85ba97e1 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -77,9 +77,9 @@ struct sha256_state { }; struct sha512_state { - u64 state[8]; - u32 count[4]; - u8 buf[128]; + u64 count[2]; + u64 state[SHA512_DIGEST_SIZE / 8]; + u8 buf[SHA512_BLOCK_SIZE]; }; #endif -- cgit v1.2.3 From f592682f9fca18d336ac068a1abc8507b4a1d936 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Jul 2009 12:37:06 +0800 Subject: crypto: shash - Require all algorithms to support export/import This patch provides a default export/import function for all shash algorithms. It simply copies the descriptor context as is done by sha1_generic. This in essence means that all existing shash algorithms now support export/import. This is something that will be depended upon in implementations such as hmac. Therefore all new shash and ahash implementations must support export/import. For those that cannot obtain a partial result, padlock-sha's fallback model should be used so that a partial result is always available. Signed-off-by: Herbert Xu --- crypto/shash.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 7713b520bc98..98b7f46ca1ed 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -183,14 +183,16 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_digest); -static int shash_no_export(struct shash_desc *desc, void *out) +static int shash_default_export(struct shash_desc *desc, void *out) { - return -ENOSYS; + memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); + return 0; } -static int shash_no_import(struct shash_desc *desc, const void *in) +static int shash_default_import(struct shash_desc *desc, const void *in) { - return -ENOSYS; + memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm)); + return 0; } static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, @@ -563,10 +565,11 @@ static int shash_prepare_alg(struct shash_alg *alg) alg->finup = shash_finup_unaligned; if (!alg->digest) alg->digest = shash_digest_unaligned; - if (!alg->import) - alg->import = shash_no_import; - if (!alg->export) - alg->export = shash_no_export; + if (!alg->export) { + alg->export = shash_default_export; + alg->import = shash_default_import; + alg->statesize = alg->descsize; + } if (!alg->setkey) alg->setkey = shash_no_setkey; -- cgit v1.2.3 From 5befbd5a7e9c814d145f15b4281c88da96fb1aa9 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 24 Jul 2009 13:56:31 +0800 Subject: crypto: ahash - Use GFP_KERNEL on allocation if the request can sleep ahash_op_unaligned() and ahash_def_finup() allocate memory atomically, regardless whether the request can sleep or not. This patch changes this to use GFP_KERNEL if the request can sleep. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/ahash.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 28a33d06c274..33a4ff45f842 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -223,7 +223,7 @@ static int ahash_op_unaligned(struct ahash_request *req, priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_ATOMIC : GFP_ATOMIC); + GFP_KERNEL : GFP_ATOMIC); if (!priv) return -ENOMEM; @@ -333,7 +333,7 @@ static int ahash_def_finup(struct ahash_request *req) priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_ATOMIC : GFP_ATOMIC); + GFP_KERNEL : GFP_ATOMIC); if (!priv) return -ENOMEM; -- cgit v1.2.3 From 0044f3eda9a778ab63c2a5eafede3803f01b0b97 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 24 Jul 2009 13:57:13 +0800 Subject: crypto: shash - Test for the algorithms import function before exporting it crypto_init_shash_ops_async() tests for setkey and not for import before exporting the algorithms import function to ahash. This patch fixes this. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/shash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 98b7f46ca1ed..91f7b9d83881 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -350,7 +350,7 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->setkey = shash_async_setkey; if (alg->export) crt->export = shash_async_export; - if (alg->setkey) + if (alg->import) crt->import = shash_async_import; crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); -- cgit v1.2.3 From 7b4ffcf953f091a815df081911c5e75c8a38418d Mon Sep 17 00:00:00 2001 From: Phil Carmody Date: Fri, 24 Jul 2009 13:59:17 +0800 Subject: crypto: aes - Undefined behaviour in crypto_aes_expand_key It's undefined behaviour in C to write outside the bounds of an array. The key expansion routine takes a shortcut of creating 8 words at a time, but this creates 4 additional words which don't fit in the array. As everyone is hopefully now aware, GCC is at liberty to make any assumptions and optimisations it likes in situations where it can detect that UB has occured, up to and including nasal demons, and as the indices being accessed in the array are trivially calculable, it's rash to invite gcc to do take any liberties at all. Signed-off-by: Phil Carmody Signed-off-by: Herbert Xu --- crypto/aes_generic.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index b8b66ec3883b..e78b7ee44a74 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -1174,7 +1174,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); ctx->key_enc[6 * i + 11] = t; \ } while (0) -#define loop8(i) do { \ +#define loop8tophalf(i) do { \ t = ror32(t, 8); \ t = ls_box(t) ^ rco_tab[i]; \ t ^= ctx->key_enc[8 * i]; \ @@ -1185,6 +1185,10 @@ EXPORT_SYMBOL_GPL(crypto_il_tab); ctx->key_enc[8 * i + 10] = t; \ t ^= ctx->key_enc[8 * i + 3]; \ ctx->key_enc[8 * i + 11] = t; \ +} while (0) + +#define loop8(i) do { \ + loop8tophalf(i); \ t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ ctx->key_enc[8 * i + 12] = t; \ t ^= ctx->key_enc[8 * i + 5]; \ @@ -1245,8 +1249,9 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, ctx->key_enc[5] = le32_to_cpu(key[5]); ctx->key_enc[6] = le32_to_cpu(key[6]); t = ctx->key_enc[7] = le32_to_cpu(key[7]); - for (i = 0; i < 7; ++i) + for (i = 0; i < 6; ++i) loop8(i); + loop8tophalf(i); break; } -- cgit v1.2.3 From 0b767b4df360bd442434d9d40b8a495e64202254 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 24 Jul 2009 15:18:41 +0800 Subject: crypto: hmac - Prehash ipad/opad This patch uses crypto_shash_export/crypto_shash_import to prehash ipad/opad to speed up hmac. This is partly based on a similar patch by Steffen Klassert. Signed-off-by: Herbert Xu --- crypto/hmac.c | 108 +++++++++++++++++++++++++++++++++------------------------- 1 file changed, 62 insertions(+), 46 deletions(-) (limited to 'crypto') diff --git a/crypto/hmac.c b/crypto/hmac.c index 02aa53ea14aa..15c2eb534541 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -27,7 +27,7 @@ #include struct hmac_ctx { - struct shash_desc *desc; + struct crypto_shash *hash; }; static inline void *align_ptr(void *p, unsigned int align) @@ -38,8 +38,7 @@ static inline void *align_ptr(void *p, unsigned int align) static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) { return align_ptr(crypto_shash_ctx_aligned(tfm) + - crypto_shash_blocksize(tfm) * 2 + - crypto_shash_digestsize(tfm), + crypto_shash_statesize(tfm) * 2, crypto_tfm_ctx_alignment()); } @@ -48,28 +47,33 @@ static int hmac_setkey(struct crypto_shash *parent, { int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); + int ss = crypto_shash_statesize(parent); char *ipad = crypto_shash_ctx_aligned(parent); - char *opad = ipad + bs; - char *digest = opad + bs; - struct hmac_ctx *ctx = align_ptr(digest + ds, + char *opad = ipad + ss; + struct hmac_ctx *ctx = align_ptr(opad + ss, crypto_tfm_ctx_alignment()); + struct crypto_shash *hash = ctx->hash; + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(hash)]; + } desc; unsigned int i; + desc.shash.tfm = hash; + desc.shash.flags = crypto_shash_get_flags(parent) & + CRYPTO_TFM_REQ_MAY_SLEEP; + if (keylen > bs) { int err; - ctx->desc->flags = crypto_shash_get_flags(parent) & - CRYPTO_TFM_REQ_MAY_SLEEP; - - err = crypto_shash_digest(ctx->desc, inkey, keylen, digest); + err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad); if (err) return err; - inkey = digest; keylen = ds; - } + } else + memcpy(ipad, inkey, keylen); - memcpy(ipad, inkey, keylen); memset(ipad + keylen, 0, bs - keylen); memcpy(opad, ipad, bs); @@ -78,24 +82,37 @@ static int hmac_setkey(struct crypto_shash *parent, opad[i] ^= 0x5c; } - return 0; + return crypto_shash_init(&desc.shash) ?: + crypto_shash_update(&desc.shash, ipad, bs) ?: + crypto_shash_export(&desc.shash, ipad) ?: + crypto_shash_init(&desc.shash) ?: + crypto_shash_update(&desc.shash, opad, bs) ?: + crypto_shash_export(&desc.shash, opad); } -static int hmac_init(struct shash_desc *pdesc) +static int hmac_export(struct shash_desc *pdesc, void *out) +{ + struct shash_desc *desc = shash_desc_ctx(pdesc); + + desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_shash_export(desc, out); +} + +static int hmac_import(struct shash_desc *pdesc, const void *in) { - struct crypto_shash *parent = pdesc->tfm; - int bs = crypto_shash_blocksize(parent); - int ds = crypto_shash_digestsize(parent); - char *ipad = crypto_shash_ctx_aligned(parent); - struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, - crypto_tfm_ctx_alignment()); struct shash_desc *desc = shash_desc_ctx(pdesc); + struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); - desc->tfm = ctx->desc->tfm; + desc->tfm = ctx->hash; desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; - return crypto_shash_init(desc) ?: - crypto_shash_update(desc, ipad, bs); + return crypto_shash_import(desc, in); +} + +static int hmac_init(struct shash_desc *pdesc) +{ + return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); } static int hmac_update(struct shash_desc *pdesc, @@ -111,16 +128,16 @@ static int hmac_update(struct shash_desc *pdesc, static int hmac_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; - int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); - char *opad = crypto_shash_ctx_aligned(parent) + bs; - char *digest = opad + bs; + int ss = crypto_shash_statesize(parent); + char *opad = crypto_shash_ctx_aligned(parent) + ss; struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; - return crypto_shash_final(desc, digest) ?: - crypto_shash_digest(desc, opad, bs + ds, out); + return crypto_shash_final(desc, out) ?: + crypto_shash_import(desc, opad) ?: + crypto_shash_finup(desc, out, ds, out); } static int hmac_finup(struct shash_desc *pdesc, const u8 *data, @@ -128,16 +145,16 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data, { struct crypto_shash *parent = pdesc->tfm; - int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); - char *opad = crypto_shash_ctx_aligned(parent) + bs; - char *digest = opad + bs; + int ss = crypto_shash_statesize(parent); + char *opad = crypto_shash_ctx_aligned(parent) + ss; struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; - return crypto_shash_finup(desc, data, nbytes, digest) ?: - crypto_shash_digest(desc, opad, bs + ds, out); + return crypto_shash_finup(desc, data, nbytes, out) ?: + crypto_shash_import(desc, opad) ?: + crypto_shash_finup(desc, out, ds, out); } static int hmac_init_tfm(struct crypto_tfm *tfm) @@ -155,21 +172,14 @@ static int hmac_init_tfm(struct crypto_tfm *tfm) parent->descsize = sizeof(struct shash_desc) + crypto_shash_descsize(hash); - ctx->desc = kmalloc(parent->descsize, GFP_KERNEL); - if (!ctx->desc) { - crypto_free_shash(hash); - return -ENOMEM; - } - - ctx->desc->tfm = hash; + ctx->hash = hash; return 0; } static void hmac_exit_tfm(struct crypto_tfm *tfm) { struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); - crypto_free_shash(ctx->desc->tfm); - kzfree(ctx->desc); + crypto_free_shash(ctx->hash); } static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) @@ -179,6 +189,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) struct shash_alg *salg; int err; int ds; + int ss; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); if (err) @@ -190,8 +201,10 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) err = -EINVAL; ds = salg->digestsize; + ss = salg->statesize; alg = &salg->base; - if (ds > alg->cra_blocksize) + if (ds > alg->cra_blocksize || + ss < alg->cra_blocksize) goto out_put_alg; inst = shash_alloc_instance("hmac", alg); @@ -208,11 +221,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_alignmask = alg->cra_alignmask; + ss = ALIGN(ss, alg->cra_alignmask + 1); inst->alg.digestsize = ds; + inst->alg.statesize = ss; inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + - ALIGN(alg->cra_blocksize * 2 + ds, - crypto_tfm_ctx_alignment()); + ALIGN(ss * 2, crypto_tfm_ctx_alignment()); inst->alg.base.cra_init = hmac_init_tfm; inst->alg.base.cra_exit = hmac_exit_tfm; @@ -221,6 +235,8 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.update = hmac_update; inst->alg.final = hmac_final; inst->alg.finup = hmac_finup; + inst->alg.export = hmac_export; + inst->alg.import = hmac_import; inst->alg.setkey = hmac_setkey; err = shash_register_instance(tmpl, inst); -- cgit v1.2.3 From cbdcf80d8b9486ddb699a044c6f87f25821708cb Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Wed, 5 Aug 2009 19:35:34 +1000 Subject: crypto: authenc - Convert to ahash This patch converts authenc to the new ahash interface. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/authenc.c | 354 ++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 285 insertions(+), 69 deletions(-) (limited to 'crypto') diff --git a/crypto/authenc.c b/crypto/authenc.c index 2e16ce0089cb..4d6f49a5daeb 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -23,24 +23,36 @@ #include #include +typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags); + struct authenc_instance_ctx { - struct crypto_spawn auth; + struct crypto_ahash_spawn auth; struct crypto_skcipher_spawn enc; }; struct crypto_authenc_ctx { - spinlock_t auth_lock; - struct crypto_hash *auth; + unsigned int reqoff; + struct crypto_ahash *auth; struct crypto_ablkcipher *enc; }; +struct authenc_request_ctx { + unsigned int cryptlen; + struct scatterlist *sg; + struct scatterlist asg[2]; + struct scatterlist cipher[2]; + crypto_completion_t complete; + crypto_completion_t update_complete; + char tail[]; +}; + static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, unsigned int keylen) { unsigned int authkeylen; unsigned int enckeylen; struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - struct crypto_hash *auth = ctx->auth; + struct crypto_ahash *auth = ctx->auth; struct crypto_ablkcipher *enc = ctx->enc; struct rtattr *rta = (void *)key; struct crypto_authenc_key_param *param; @@ -64,11 +76,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, authkeylen = keylen - enckeylen; - crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); - crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) & + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); - err = crypto_hash_setkey(auth, key, authkeylen); - crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) & + err = crypto_ahash_setkey(auth, key, authkeylen); + crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & CRYPTO_TFM_RES_MASK); if (err) @@ -103,40 +115,198 @@ static void authenc_chain(struct scatterlist *head, struct scatterlist *sg, sg_mark_end(head); } -static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags, - struct scatterlist *cipher, - unsigned int cryptlen) +static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + + if (err) + goto out; + + ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, + areq_ctx->cryptlen); + ahash_request_set_callback(ahreq, aead_request_flags(req) & + CRYPTO_TFM_REQ_MAY_SLEEP, + areq_ctx->complete, req); + + err = crypto_ahash_finup(ahreq); + if (err) + goto out; + + scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, + areq_ctx->cryptlen, + crypto_aead_authsize(authenc), 1); + +out: + aead_request_complete(req, err); +} + +static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err) +{ + struct aead_request *req = areq->data; + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + + if (err) + goto out; + + scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, + areq_ctx->cryptlen, + crypto_aead_authsize(authenc), 1); + +out: + aead_request_complete(req, err); +} + +static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, + int err) { + u8 *ihash; + unsigned int authsize; + struct ablkcipher_request *abreq; + struct aead_request *req = areq->data; struct crypto_aead *authenc = crypto_aead_reqtfm(req); struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - struct crypto_hash *auth = ctx->auth; - struct hash_desc desc = { - .tfm = auth, - .flags = aead_request_flags(req) & flags, - }; - u8 *hash = aead_request_ctx(req); + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + + if (err) + goto out; + + ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, + areq_ctx->cryptlen); + ahash_request_set_callback(ahreq, aead_request_flags(req) & + CRYPTO_TFM_REQ_MAY_SLEEP, + areq_ctx->complete, req); + + err = crypto_ahash_finup(ahreq); + if (err) + goto out; + + authsize = crypto_aead_authsize(authenc); + ihash = ahreq->result + authsize; + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); + + err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; + if (err) + goto out; + + abreq = aead_request_ctx(req); + ablkcipher_request_set_tfm(abreq, ctx->enc); + ablkcipher_request_set_callback(abreq, aead_request_flags(req), + req->base.complete, req->base.data); + ablkcipher_request_set_crypt(abreq, req->src, req->dst, + req->cryptlen, req->iv); + + err = crypto_ablkcipher_decrypt(abreq); + +out: + aead_request_complete(req, err); +} + +static void authenc_verify_ahash_done(struct crypto_async_request *areq, + int err) +{ + u8 *ihash; + unsigned int authsize; + struct ablkcipher_request *abreq; + struct aead_request *req = areq->data; + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + + if (err) + goto out; + + authsize = crypto_aead_authsize(authenc); + ihash = ahreq->result + authsize; + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); + + err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0; + if (err) + goto out; + + abreq = aead_request_ctx(req); + ablkcipher_request_set_tfm(abreq, ctx->enc); + ablkcipher_request_set_callback(abreq, aead_request_flags(req), + req->base.complete, req->base.data); + ablkcipher_request_set_crypt(abreq, req->src, req->dst, + req->cryptlen, req->iv); + + err = crypto_ablkcipher_decrypt(abreq); + +out: + aead_request_complete(req, err); +} + +static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_ahash *auth = ctx->auth; + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + u8 *hash = areq_ctx->tail; int err; - hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), - crypto_hash_alignmask(auth) + 1); + hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), + crypto_ahash_alignmask(auth) + 1); + + ahash_request_set_tfm(ahreq, auth); - spin_lock_bh(&ctx->auth_lock); - err = crypto_hash_init(&desc); + err = crypto_ahash_init(ahreq); if (err) - goto auth_unlock; + return ERR_PTR(err); + + ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen); + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, + areq_ctx->update_complete, req); - err = crypto_hash_update(&desc, req->assoc, req->assoclen); + err = crypto_ahash_update(ahreq); if (err) - goto auth_unlock; + return ERR_PTR(err); + + ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, + areq_ctx->cryptlen); + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, + areq_ctx->complete, req); - err = crypto_hash_update(&desc, cipher, cryptlen); + err = crypto_ahash_finup(ahreq); if (err) - goto auth_unlock; + return ERR_PTR(err); - err = crypto_hash_final(&desc, hash); -auth_unlock: - spin_unlock_bh(&ctx->auth_lock); + return hash; +} + +static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_ahash *auth = ctx->auth; + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); + u8 *hash = areq_ctx->tail; + int err; + hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), + crypto_ahash_alignmask(auth) + 1); + + ahash_request_set_tfm(ahreq, auth); + ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, + areq_ctx->cryptlen); + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, + areq_ctx->complete, req); + + err = crypto_ahash_digest(ahreq); if (err) return ERR_PTR(err); @@ -147,11 +317,15 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, unsigned int flags) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct scatterlist *dst = req->dst; - struct scatterlist cipher[2]; - struct page *dstp; + struct scatterlist *assoc = req->assoc; + struct scatterlist *cipher = areq_ctx->cipher; + struct scatterlist *asg = areq_ctx->asg; unsigned int ivsize = crypto_aead_ivsize(authenc); - unsigned int cryptlen; + unsigned int cryptlen = req->cryptlen; + authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; + struct page *dstp; u8 *vdst; u8 *hash; @@ -163,10 +337,25 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, sg_set_buf(cipher, iv, ivsize); authenc_chain(cipher, dst, vdst == iv + ivsize); dst = cipher; + cryptlen += ivsize; } - cryptlen = req->cryptlen + ivsize; - hash = crypto_authenc_hash(req, flags, dst, cryptlen); + if (sg_is_last(assoc)) { + authenc_ahash_fn = crypto_authenc_ahash; + sg_init_table(asg, 2); + sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); + authenc_chain(asg, dst, 0); + dst = asg; + cryptlen += req->assoclen; + } + + areq_ctx->cryptlen = cryptlen; + areq_ctx->sg = dst; + + areq_ctx->complete = authenc_geniv_ahash_done; + areq_ctx->update_complete = authenc_geniv_ahash_update_done; + + hash = authenc_ahash_fn(req, flags); if (IS_ERR(hash)) return PTR_ERR(hash); @@ -256,22 +445,25 @@ static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) } static int crypto_authenc_verify(struct aead_request *req, - struct scatterlist *cipher, - unsigned int cryptlen) + authenc_ahash_t authenc_ahash_fn) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); u8 *ohash; u8 *ihash; unsigned int authsize; - ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher, - cryptlen); + areq_ctx->complete = authenc_verify_ahash_done; + areq_ctx->complete = authenc_verify_ahash_update_done; + + ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); if (IS_ERR(ohash)) return PTR_ERR(ohash); authsize = crypto_aead_authsize(authenc); ihash = ohash + authsize; - scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0); + scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, + authsize, 0); return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; } @@ -279,10 +471,14 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, unsigned int cryptlen) { struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); struct scatterlist *src = req->src; - struct scatterlist cipher[2]; - struct page *srcp; + struct scatterlist *assoc = req->assoc; + struct scatterlist *cipher = areq_ctx->cipher; + struct scatterlist *asg = areq_ctx->asg; unsigned int ivsize = crypto_aead_ivsize(authenc); + authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb; + struct page *srcp; u8 *vsrc; srcp = sg_page(src); @@ -293,9 +489,22 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, sg_set_buf(cipher, iv, ivsize); authenc_chain(cipher, src, vsrc == iv + ivsize); src = cipher; + cryptlen += ivsize; + } + + if (sg_is_last(assoc)) { + authenc_ahash_fn = crypto_authenc_ahash; + sg_init_table(asg, 2); + sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); + authenc_chain(asg, src, 0); + src = asg; + cryptlen += req->assoclen; } - return crypto_authenc_verify(req, src, cryptlen + ivsize); + areq_ctx->cryptlen = cryptlen; + areq_ctx->sg = src; + + return crypto_authenc_verify(req, authenc_ahash_fn); } static int crypto_authenc_decrypt(struct aead_request *req) @@ -326,38 +535,41 @@ static int crypto_authenc_decrypt(struct aead_request *req) static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) { - struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_hash *auth; + struct crypto_ahash *auth; struct crypto_ablkcipher *enc; int err; - auth = crypto_spawn_hash(&ictx->auth); + auth = crypto_spawn_ahash(&ictx->auth); if (IS_ERR(auth)) return PTR_ERR(auth); + ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) + + crypto_ahash_alignmask(auth), + crypto_ahash_alignmask(auth) + 1); + enc = crypto_spawn_skcipher(&ictx->enc); err = PTR_ERR(enc); if (IS_ERR(enc)) - goto err_free_hash; + goto err_free_ahash; ctx->auth = auth; ctx->enc = enc; + tfm->crt_aead.reqsize = max_t(unsigned int, - (crypto_hash_alignmask(auth) & - ~(crypto_tfm_ctx_alignment() - 1)) + - crypto_hash_digestsize(auth) * 2, - sizeof(struct skcipher_givcrypt_request) + - crypto_ablkcipher_reqsize(enc) + - crypto_ablkcipher_ivsize(enc)); - - spin_lock_init(&ctx->auth_lock); + crypto_ahash_reqsize(auth) + ctx->reqoff + + sizeof(struct authenc_request_ctx) + + sizeof(struct ahash_request), + sizeof(struct skcipher_givcrypt_request) + + crypto_ablkcipher_reqsize(enc) + + crypto_ablkcipher_ivsize(enc)); return 0; -err_free_hash: - crypto_free_hash(auth); +err_free_ahash: + crypto_free_ahash(auth); return err; } @@ -365,7 +577,7 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm) { struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_hash(ctx->auth); + crypto_free_ahash(ctx->auth); crypto_free_ablkcipher(ctx->enc); } @@ -373,7 +585,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) { struct crypto_attr_type *algt; struct crypto_instance *inst; - struct crypto_alg *auth; + struct hash_alg_common *auth; + struct crypto_alg *auth_base; struct crypto_alg *enc; struct authenc_instance_ctx *ctx; const char *enc_name; @@ -387,11 +600,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); - auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_HASH_MASK); + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_AHASH_MASK); if (IS_ERR(auth)) return ERR_PTR(PTR_ERR(auth)); + auth_base = &auth->base; + enc_name = crypto_attr_alg_name(tb[2]); err = PTR_ERR(enc_name); if (IS_ERR(enc_name)) @@ -404,7 +619,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) ctx = crypto_instance_ctx(inst); - err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK); + err = crypto_init_ahash_spawn(&ctx->auth, auth, inst); if (err) goto err_free_inst; @@ -419,24 +634,25 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "authenc(%s,%s)", auth->cra_name, enc->cra_name) >= + "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "authenc(%s,%s)", auth->cra_driver_name, + "authenc(%s,%s)", auth_base->cra_driver_name, enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_enc; inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority; + inst->alg.cra_priority = enc->cra_priority * + 10 + auth_base->cra_priority; inst->alg.cra_blocksize = enc->cra_blocksize; - inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask; + inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask; inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; - inst->alg.cra_aead.maxauthsize = __crypto_shash_alg(auth)->digestsize; + inst->alg.cra_aead.maxauthsize = auth->digestsize; inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); @@ -449,13 +665,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; out: - crypto_mod_put(auth); + crypto_mod_put(auth_base); return inst; err_drop_enc: crypto_drop_skcipher(&ctx->enc); err_drop_auth: - crypto_drop_spawn(&ctx->auth); + crypto_drop_ahash(&ctx->auth); err_free_inst: kfree(inst); out_put_auth: @@ -468,7 +684,7 @@ static void crypto_authenc_free(struct crypto_instance *inst) struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); crypto_drop_skcipher(&ctx->enc); - crypto_drop_spawn(&ctx->auth); + crypto_drop_ahash(&ctx->auth); kfree(inst); } -- cgit v1.2.3 From 2cdc6899a88e2b9c6cb82ebd547bf58932d534df Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Thu, 6 Aug 2009 15:32:38 +1000 Subject: crypto: ghash - Add GHASH digest algorithm for GCM GHASH is implemented as a shash algorithm. The actual implementation is copied from gcm.c. This makes it possible to add architecture/hardware accelerated GHASH implementation. Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- crypto/Kconfig | 7 ++ crypto/Makefile | 1 + crypto/ghash-generic.c | 170 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 178 insertions(+) create mode 100644 crypto/ghash-generic.c (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index f2002d8e5f67..0ae170e01051 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -290,6 +290,13 @@ config CRYPTO_CRC32C_INTEL gain performance compared with software implementation. Module will be crc32c-intel. +config CRYPTO_GHASH + tristate "GHASH digest algorithm" + select CRYPTO_SHASH + select CRYPTO_GF128MUL + help + GHASH is message digest algorithm for GCM (Galois/Counter Mode). + config CRYPTO_MD4 tristate "MD4 digest algorithm" select CRYPTO_HASH diff --git a/crypto/Makefile b/crypto/Makefile index 3c961b4d8046..c2ca721eea9d 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -82,6 +82,7 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o obj-$(CONFIG_CRYPTO_RNG2) += krng.o obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o +obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o # # generic algorithms and the async_tx api diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c new file mode 100644 index 000000000000..be4425616931 --- /dev/null +++ b/crypto/ghash-generic.c @@ -0,0 +1,170 @@ +/* + * GHASH: digest algorithm for GCM (Galois/Counter Mode). + * + * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen + * Copyright (c) 2009 Intel Corp. + * Author: Huang Ying + * + * The algorithm implementation is copied from gcm.c. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define GHASH_BLOCK_SIZE 16 +#define GHASH_DIGEST_SIZE 16 + +struct ghash_ctx { + struct gf128mul_4k *gf128; +}; + +struct ghash_desc_ctx { + u8 buffer[GHASH_BLOCK_SIZE]; + u32 bytes; +}; + +static int ghash_init(struct shash_desc *desc) +{ + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + memset(dctx, 0, sizeof(*dctx)); + + return 0; +} + +static int ghash_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct ghash_ctx *ctx = crypto_shash_ctx(tfm); + + if (keylen != GHASH_BLOCK_SIZE) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + if (ctx->gf128) + gf128mul_free_4k(ctx->gf128); + ctx->gf128 = gf128mul_init_4k_lle((be128 *)key); + if (!ctx->gf128) + return -ENOMEM; + + return 0; +} + +static int ghash_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); + u8 *dst = dctx->buffer; + + if (dctx->bytes) { + int n = min(srclen, dctx->bytes); + u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); + + dctx->bytes -= n; + srclen -= n; + + while (n--) + *pos++ ^= *src++; + + if (!dctx->bytes) + gf128mul_4k_lle((be128 *)dst, ctx->gf128); + } + + while (srclen >= GHASH_BLOCK_SIZE) { + crypto_xor(dst, src, GHASH_BLOCK_SIZE); + gf128mul_4k_lle((be128 *)dst, ctx->gf128); + src += GHASH_BLOCK_SIZE; + srclen -= GHASH_BLOCK_SIZE; + } + + if (srclen) { + dctx->bytes = GHASH_BLOCK_SIZE - srclen; + while (srclen--) + *dst++ ^= *src++; + } + + return 0; +} + +static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) +{ + u8 *dst = dctx->buffer; + + if (dctx->bytes) { + u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); + + while (dctx->bytes--) + *tmp++ ^= 0; + + gf128mul_4k_lle((be128 *)dst, ctx->gf128); + } + + dctx->bytes = 0; +} + +static int ghash_final(struct shash_desc *desc, u8 *dst) +{ + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); + u8 *buf = dctx->buffer; + + ghash_flush(ctx, dctx); + memcpy(dst, buf, GHASH_BLOCK_SIZE); + + return 0; +} + +static void ghash_exit_tfm(struct crypto_tfm *tfm) +{ + struct ghash_ctx *ctx = crypto_tfm_ctx(tfm); + if (ctx->gf128) + gf128mul_free_4k(ctx->gf128); +} + +static struct shash_alg ghash_alg = { + .digestsize = GHASH_DIGEST_SIZE, + .init = ghash_init, + .update = ghash_update, + .final = ghash_final, + .setkey = ghash_setkey, + .descsize = sizeof(struct ghash_desc_ctx), + .base = { + .cra_name = "ghash", + .cra_driver_name = "ghash-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = GHASH_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct ghash_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list), + .cra_exit = ghash_exit_tfm, + }, +}; + +static int __init ghash_mod_init(void) +{ + return crypto_register_shash(&ghash_alg); +} + +static void __exit ghash_mod_exit(void) +{ + crypto_unregister_shash(&ghash_alg); +} + +module_init(ghash_mod_init); +module_exit(ghash_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); +MODULE_ALIAS("ghash"); -- cgit v1.2.3 From 9382d97af586a47dad312765e35c61aa7ad7fcdd Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Thu, 6 Aug 2009 15:34:26 +1000 Subject: crypto: gcm - Use GHASH digest algorithm Remove the dedicated GHASH implementation in GCM, and uses the GHASH digest algorithm instead. This will make GCM uses hardware accelerated GHASH implementation automatically if available. ahash instead of shash interface is used, because some hardware accelerated GHASH implementation needs asynchronous interface. Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/gcm.c | 580 ++++++++++++++++++++++++++++++++++++++++----------------- 2 files changed, 408 insertions(+), 174 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 0ae170e01051..5105cf15834f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -157,7 +157,7 @@ config CRYPTO_GCM tristate "GCM/GMAC support" select CRYPTO_CTR select CRYPTO_AEAD - select CRYPTO_GF128MUL + select CRYPTO_GHASH help Support for Galois/Counter Mode (GCM) and Galois Message Authentication Code (GMAC). Required for IPSec. diff --git a/crypto/gcm.c b/crypto/gcm.c index e70afd0c73dd..5fc3292483ef 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -11,7 +11,10 @@ #include #include #include +#include #include +#include +#include "internal.h" #include #include #include @@ -21,11 +24,12 @@ struct gcm_instance_ctx { struct crypto_skcipher_spawn ctr; + struct crypto_ahash_spawn ghash; }; struct crypto_gcm_ctx { struct crypto_ablkcipher *ctr; - struct gf128mul_4k *gf128; + struct crypto_ahash *ghash; }; struct crypto_rfc4106_ctx { @@ -34,10 +38,9 @@ struct crypto_rfc4106_ctx { }; struct crypto_gcm_ghash_ctx { - u32 bytes; - u32 flags; - struct gf128mul_4k *gf128; - u8 buffer[16]; + unsigned int cryptlen; + struct scatterlist *src; + crypto_completion_t complete; }; struct crypto_gcm_req_priv_ctx { @@ -45,8 +48,11 @@ struct crypto_gcm_req_priv_ctx { u8 iauth_tag[16]; struct scatterlist src[2]; struct scatterlist dst[2]; - struct crypto_gcm_ghash_ctx ghash; - struct ablkcipher_request abreq; + struct crypto_gcm_ghash_ctx ghash_ctx; + union { + struct ahash_request ahreq; + struct ablkcipher_request abreq; + } u; }; struct crypto_gcm_setkey_result { @@ -54,6 +60,8 @@ struct crypto_gcm_setkey_result { struct completion completion; }; +static void *gcm_zeroes; + static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( struct aead_request *req) { @@ -62,113 +70,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); } -static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags, - struct gf128mul_4k *gf128) -{ - ctx->bytes = 0; - ctx->flags = flags; - ctx->gf128 = gf128; - memset(ctx->buffer, 0, 16); -} - -static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx, - const u8 *src, unsigned int srclen) -{ - u8 *dst = ctx->buffer; - - if (ctx->bytes) { - int n = min(srclen, ctx->bytes); - u8 *pos = dst + (16 - ctx->bytes); - - ctx->bytes -= n; - srclen -= n; - - while (n--) - *pos++ ^= *src++; - - if (!ctx->bytes) - gf128mul_4k_lle((be128 *)dst, ctx->gf128); - } - - while (srclen >= 16) { - crypto_xor(dst, src, 16); - gf128mul_4k_lle((be128 *)dst, ctx->gf128); - src += 16; - srclen -= 16; - } - - if (srclen) { - ctx->bytes = 16 - srclen; - while (srclen--) - *dst++ ^= *src++; - } -} - -static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx, - struct scatterlist *sg, int len) -{ - struct scatter_walk walk; - u8 *src; - int n; - - if (!len) - return; - - scatterwalk_start(&walk, sg); - - while (len) { - n = scatterwalk_clamp(&walk, len); - - if (!n) { - scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); - n = scatterwalk_clamp(&walk, len); - } - - src = scatterwalk_map(&walk, 0); - - crypto_gcm_ghash_update(ctx, src, n); - len -= n; - - scatterwalk_unmap(src, 0); - scatterwalk_advance(&walk, n); - scatterwalk_done(&walk, 0, len); - if (len) - crypto_yield(ctx->flags); - } -} - -static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx) -{ - u8 *dst = ctx->buffer; - - if (ctx->bytes) { - u8 *tmp = dst + (16 - ctx->bytes); - - while (ctx->bytes--) - *tmp++ ^= 0; - - gf128mul_4k_lle((be128 *)dst, ctx->gf128); - } - - ctx->bytes = 0; -} - -static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx, - unsigned int authlen, - unsigned int cryptlen, u8 *dst) -{ - u8 *buf = ctx->buffer; - u128 lengths; - - lengths.a = cpu_to_be64(authlen * 8); - lengths.b = cpu_to_be64(cryptlen * 8); - - crypto_gcm_ghash_flush(ctx); - crypto_xor(buf, (u8 *)&lengths, 16); - gf128mul_4k_lle((be128 *)buf, ctx->gf128); - crypto_xor(dst, buf, 16); -} - static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) { struct crypto_gcm_setkey_result *result = req->data; @@ -184,6 +85,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); + struct crypto_ahash *ghash = ctx->ghash; struct crypto_ablkcipher *ctr = ctx->ctr; struct { be128 hash; @@ -233,13 +135,12 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, if (err) goto out; - if (ctx->gf128 != NULL) - gf128mul_free_4k(ctx->gf128); - - ctx->gf128 = gf128mul_init_4k_lle(&data->hash); - - if (ctx->gf128 == NULL) - err = -ENOMEM; + crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & + CRYPTO_TFM_REQ_MASK); + err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); + crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) & + CRYPTO_TFM_RES_MASK); out: kfree(data); @@ -272,8 +173,6 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); - u32 flags = req->base.tfm->crt_flags; - struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; struct scatterlist *dst; __be32 counter = cpu_to_be32(1); @@ -296,108 +195,398 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, cryptlen + sizeof(pctx->auth_tag), req->iv); +} + +static inline unsigned int gcm_remain(unsigned int len) +{ + len &= 0xfU; + return len ? 16 - len : 0; +} + +static void gcm_hash_len_done(struct crypto_async_request *areq, int err); +static void gcm_hash_final_done(struct crypto_async_request *areq, int err); - crypto_gcm_ghash_init(ghash, flags, ctx->gf128); +static int gcm_hash_update(struct aead_request *req, + struct crypto_gcm_req_priv_ctx *pctx, + crypto_completion_t complete, + struct scatterlist *src, + unsigned int len) +{ + struct ahash_request *ahreq = &pctx->u.ahreq; - crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); - crypto_gcm_ghash_flush(ghash); + ahash_request_set_callback(ahreq, aead_request_flags(req), + complete, req); + ahash_request_set_crypt(ahreq, src, NULL, len); + + return crypto_ahash_update(ahreq); } -static int crypto_gcm_hash(struct aead_request *req) +static int gcm_hash_remain(struct aead_request *req, + struct crypto_gcm_req_priv_ctx *pctx, + unsigned int remain, + crypto_completion_t complete) { - struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct ahash_request *ahreq = &pctx->u.ahreq; + + ahash_request_set_callback(ahreq, aead_request_flags(req), + complete, req); + sg_init_one(pctx->src, gcm_zeroes, remain); + ahash_request_set_crypt(ahreq, pctx->src, NULL, remain); + + return crypto_ahash_update(ahreq); +} + +static int gcm_hash_len(struct aead_request *req, + struct crypto_gcm_req_priv_ctx *pctx) +{ + struct ahash_request *ahreq = &pctx->u.ahreq; + struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; + u128 lengths; + + lengths.a = cpu_to_be64(req->assoclen * 8); + lengths.b = cpu_to_be64(gctx->cryptlen * 8); + memcpy(pctx->iauth_tag, &lengths, 16); + sg_init_one(pctx->src, pctx->iauth_tag, 16); + ahash_request_set_callback(ahreq, aead_request_flags(req), + gcm_hash_len_done, req); + ahash_request_set_crypt(ahreq, pctx->src, + NULL, sizeof(lengths)); + + return crypto_ahash_update(ahreq); +} + +static int gcm_hash_final(struct aead_request *req, + struct crypto_gcm_req_priv_ctx *pctx) +{ + struct ahash_request *ahreq = &pctx->u.ahreq; + + ahash_request_set_callback(ahreq, aead_request_flags(req), + gcm_hash_final_done, req); + ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0); + + return crypto_ahash_final(ahreq); +} + +static void gcm_hash_final_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); - u8 *auth_tag = pctx->auth_tag; - struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; + struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; + + if (!err) + crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); - crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen); - crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, - auth_tag); + gctx->complete(areq, err); +} + +static void gcm_hash_len_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); + + if (!err) { + err = gcm_hash_final(req, pctx); + if (err == -EINPROGRESS || err == -EBUSY) + return; + } + + gcm_hash_final_done(areq, err); +} + +static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); + + if (!err) { + err = gcm_hash_len(req, pctx); + if (err == -EINPROGRESS || err == -EBUSY) + return; + } + + gcm_hash_len_done(areq, err); +} + +static void gcm_hash_crypt_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); + struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; + unsigned int remain; + + if (!err) { + remain = gcm_remain(gctx->cryptlen); + BUG_ON(!remain); + err = gcm_hash_remain(req, pctx, remain, + gcm_hash_crypt_remain_done); + if (err == -EINPROGRESS || err == -EBUSY) + return; + } + + gcm_hash_crypt_remain_done(areq, err); +} + +static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); + struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; + crypto_completion_t complete; + unsigned int remain = 0; + + if (!err && gctx->cryptlen) { + remain = gcm_remain(gctx->cryptlen); + complete = remain ? gcm_hash_crypt_done : + gcm_hash_crypt_remain_done; + err = gcm_hash_update(req, pctx, complete, + gctx->src, gctx->cryptlen); + if (err == -EINPROGRESS || err == -EBUSY) + return; + } + + if (remain) + gcm_hash_crypt_done(areq, err); + else + gcm_hash_crypt_remain_done(areq, err); +} + +static void gcm_hash_assoc_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); + unsigned int remain; + + if (!err) { + remain = gcm_remain(req->assoclen); + BUG_ON(!remain); + err = gcm_hash_remain(req, pctx, remain, + gcm_hash_assoc_remain_done); + if (err == -EINPROGRESS || err == -EBUSY) + return; + } + + gcm_hash_assoc_remain_done(areq, err); +} + +static void gcm_hash_init_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); + crypto_completion_t complete; + unsigned int remain = 0; + + if (!err && req->assoclen) { + remain = gcm_remain(req->assoclen); + complete = remain ? gcm_hash_assoc_done : + gcm_hash_assoc_remain_done; + err = gcm_hash_update(req, pctx, complete, + req->assoc, req->assoclen); + if (err == -EINPROGRESS || err == -EBUSY) + return; + } + + if (remain) + gcm_hash_assoc_done(areq, err); + else + gcm_hash_assoc_remain_done(areq, err); +} + +static int gcm_hash(struct aead_request *req, + struct crypto_gcm_req_priv_ctx *pctx) +{ + struct ahash_request *ahreq = &pctx->u.ahreq; + struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; + struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + unsigned int remain; + crypto_completion_t complete; + int err; + + ahash_request_set_tfm(ahreq, ctx->ghash); + + ahash_request_set_callback(ahreq, aead_request_flags(req), + gcm_hash_init_done, req); + err = crypto_ahash_init(ahreq); + if (err) + return err; + remain = gcm_remain(req->assoclen); + complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done; + err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen); + if (err) + return err; + if (remain) { + err = gcm_hash_remain(req, pctx, remain, + gcm_hash_assoc_remain_done); + if (err) + return err; + } + remain = gcm_remain(gctx->cryptlen); + complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done; + err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen); + if (err) + return err; + if (remain) { + err = gcm_hash_remain(req, pctx, remain, + gcm_hash_crypt_remain_done); + if (err) + return err; + } + err = gcm_hash_len(req, pctx); + if (err) + return err; + err = gcm_hash_final(req, pctx); + if (err) + return err; + + return 0; +} + +static void gcm_enc_copy_hash(struct aead_request *req, + struct crypto_gcm_req_priv_ctx *pctx) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + u8 *auth_tag = pctx->auth_tag; scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, crypto_aead_authsize(aead), 1); - return 0; } -static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) +static void gcm_enc_hash_done(struct crypto_async_request *areq, + int err) { struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); if (!err) - err = crypto_gcm_hash(req); + gcm_enc_copy_hash(req, pctx); aead_request_complete(req, err); } +static void gcm_encrypt_done(struct crypto_async_request *areq, + int err) +{ + struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); + + if (!err) { + err = gcm_hash(req, pctx); + if (err == -EINPROGRESS || err == -EBUSY) + return; + } + + gcm_enc_hash_done(areq, err); +} + static int crypto_gcm_encrypt(struct aead_request *req) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); - struct ablkcipher_request *abreq = &pctx->abreq; + struct ablkcipher_request *abreq = &pctx->u.abreq; + struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; int err; crypto_gcm_init_crypt(abreq, req, req->cryptlen); ablkcipher_request_set_callback(abreq, aead_request_flags(req), - crypto_gcm_encrypt_done, req); + gcm_encrypt_done, req); + + gctx->src = req->dst; + gctx->cryptlen = req->cryptlen; + gctx->complete = gcm_enc_hash_done; err = crypto_ablkcipher_encrypt(abreq); if (err) return err; - return crypto_gcm_hash(req); + err = gcm_hash(req, pctx); + if (err) + return err; + + crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); + gcm_enc_copy_hash(req, pctx); + + return 0; } -static int crypto_gcm_verify(struct aead_request *req) +static int crypto_gcm_verify(struct aead_request *req, + struct crypto_gcm_req_priv_ctx *pctx) { struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); - struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; u8 *auth_tag = pctx->auth_tag; u8 *iauth_tag = pctx->iauth_tag; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen - authsize; - crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag); - - authsize = crypto_aead_authsize(aead); + crypto_xor(auth_tag, iauth_tag, 16); scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; } -static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) +static void gcm_decrypt_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); if (!err) - err = crypto_gcm_verify(req); + err = crypto_gcm_verify(req, pctx); aead_request_complete(req, err); } +static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) +{ + struct aead_request *req = areq->data; + struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); + struct ablkcipher_request *abreq = &pctx->u.abreq; + struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; + + if (!err) { + ablkcipher_request_set_callback(abreq, aead_request_flags(req), + gcm_decrypt_done, req); + crypto_gcm_init_crypt(abreq, req, gctx->cryptlen); + err = crypto_ablkcipher_decrypt(abreq); + if (err == -EINPROGRESS || err == -EBUSY) + return; + } + + gcm_decrypt_done(areq, err); +} + static int crypto_gcm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); - struct ablkcipher_request *abreq = &pctx->abreq; - struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; - unsigned int cryptlen = req->cryptlen; + struct ablkcipher_request *abreq = &pctx->u.abreq; + struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; unsigned int authsize = crypto_aead_authsize(aead); + unsigned int cryptlen = req->cryptlen; int err; if (cryptlen < authsize) return -EINVAL; cryptlen -= authsize; - crypto_gcm_init_crypt(abreq, req, cryptlen); - ablkcipher_request_set_callback(abreq, aead_request_flags(req), - crypto_gcm_decrypt_done, req); + gctx->src = req->src; + gctx->cryptlen = cryptlen; + gctx->complete = gcm_dec_hash_done; - crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen); + err = gcm_hash(req, pctx); + if (err) + return err; + ablkcipher_request_set_callback(abreq, aead_request_flags(req), + gcm_decrypt_done, req); + crypto_gcm_init_crypt(abreq, req, cryptlen); err = crypto_ablkcipher_decrypt(abreq); if (err) return err; - return crypto_gcm_verify(req); + return crypto_gcm_verify(req, pctx); } static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) @@ -406,43 +595,56 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_ablkcipher *ctr; + struct crypto_ahash *ghash; unsigned long align; int err; + ghash = crypto_spawn_ahash(&ictx->ghash); + if (IS_ERR(ghash)) + return PTR_ERR(ghash); + ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) - return err; + goto err_free_hash; ctx->ctr = ctr; - ctx->gf128 = NULL; + ctx->ghash = ghash; align = crypto_tfm_alg_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); tfm->crt_aead.reqsize = align + - sizeof(struct crypto_gcm_req_priv_ctx) + - crypto_ablkcipher_reqsize(ctr); + offsetof(struct crypto_gcm_req_priv_ctx, u) + + max(sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(ctr), + sizeof(struct ahash_request) + + crypto_ahash_reqsize(ghash)); return 0; + +err_free_hash: + crypto_free_ahash(ghash); + return err; } static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) { struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->gf128 != NULL) - gf128mul_free_4k(ctx->gf128); - + crypto_free_ahash(ctx->ghash); crypto_free_ablkcipher(ctx->ctr); } static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, const char *full_name, - const char *ctr_name) + const char *ctr_name, + const char *ghash_name) { struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *ctr; + struct crypto_alg *ghash_alg; + struct ahash_alg *ghash_ahash_alg; struct gcm_instance_ctx *ctx; int err; @@ -454,17 +656,31 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); + ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, + CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_AHASH_MASK); + err = PTR_ERR(ghash_alg); + if (IS_ERR(ghash_alg)) + return ERR_PTR(err); + + err = -ENOMEM; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) - return ERR_PTR(-ENOMEM); + goto out_put_ghash; ctx = crypto_instance_ctx(inst); + ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base); + err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg, + inst); + if (err) + goto err_free_inst; + crypto_set_skcipher_spawn(&ctx->ctr, inst); err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, crypto_requires_sync(algt->type, algt->mask)); if (err) - goto err_free_inst; + goto err_drop_ghash; ctr = crypto_skcipher_spawn_alg(&ctx->ctr); @@ -479,7 +695,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "gcm_base(%s)", ctr->cra_driver_name) >= + "gcm_base(%s,%s)", ctr->cra_driver_name, + ghash_alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_put_ctr; @@ -502,12 +719,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; out: + crypto_mod_put(ghash_alg); return inst; out_put_ctr: crypto_drop_skcipher(&ctx->ctr); +err_drop_ghash: + crypto_drop_ahash(&ctx->ghash); err_free_inst: kfree(inst); +out_put_ghash: inst = ERR_PTR(err); goto out; } @@ -532,7 +753,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) CRYPTO_MAX_ALG_NAME) return ERR_PTR(-ENAMETOOLONG); - return crypto_gcm_alloc_common(tb, full_name, ctr_name); + return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash"); } static void crypto_gcm_free(struct crypto_instance *inst) @@ -540,6 +761,7 @@ static void crypto_gcm_free(struct crypto_instance *inst) struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); crypto_drop_skcipher(&ctx->ctr); + crypto_drop_ahash(&ctx->ghash); kfree(inst); } @@ -554,6 +776,7 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) { int err; const char *ctr_name; + const char *ghash_name; char full_name[CRYPTO_MAX_ALG_NAME]; ctr_name = crypto_attr_alg_name(tb[1]); @@ -561,11 +784,16 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) if (IS_ERR(ctr_name)) return ERR_PTR(err); - if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)", - ctr_name) >= CRYPTO_MAX_ALG_NAME) + ghash_name = crypto_attr_alg_name(tb[2]); + err = PTR_ERR(ghash_name); + if (IS_ERR(ghash_name)) + return ERR_PTR(err); + + if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", + ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME) return ERR_PTR(-ENAMETOOLONG); - return crypto_gcm_alloc_common(tb, full_name, ctr_name); + return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name); } static struct crypto_template crypto_gcm_base_tmpl = { @@ -784,6 +1012,10 @@ static int __init crypto_gcm_module_init(void) { int err; + gcm_zeroes = kzalloc(16, GFP_KERNEL); + if (!gcm_zeroes) + return -ENOMEM; + err = crypto_register_template(&crypto_gcm_base_tmpl); if (err) goto out; @@ -796,18 +1028,20 @@ static int __init crypto_gcm_module_init(void) if (err) goto out_undo_gcm; -out: - return err; + return 0; out_undo_gcm: crypto_unregister_template(&crypto_gcm_tmpl); out_undo_base: crypto_unregister_template(&crypto_gcm_base_tmpl); - goto out; +out: + kfree(gcm_zeroes); + return err; } static void __exit crypto_gcm_module_exit(void) { + kfree(gcm_zeroes); crypto_unregister_template(&crypto_rfc4106_tmpl); crypto_unregister_template(&crypto_gcm_tmpl); crypto_unregister_template(&crypto_gcm_base_tmpl); -- cgit v1.2.3 From ace1366369841c9c3a9788f79baa4d73f1c53107 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Thu, 6 Aug 2009 15:35:20 +1000 Subject: crypto: cryptd - Add support to access underlaying shash cryptd_alloc_ahash() will allocate a cryptd-ed ahash for specified algorithm name. The new allocated one is guaranteed to be cryptd-ed ahash, so the shash underlying can be gotten via cryptd_ahash_child(). Signed-off-by: Huang Ying Signed-off-by: Herbert Xu --- crypto/cryptd.c | 35 +++++++++++++++++++++++++++++++++++ include/crypto/cryptd.h | 17 +++++++++++++++++ 2 files changed, 52 insertions(+) (limited to 'crypto') diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 2eb705822f2b..35335825a4ef 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -682,6 +682,41 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) } EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); +struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, + u32 type, u32 mask) +{ + char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; + struct crypto_ahash *tfm; + + if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, + "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) + return ERR_PTR(-EINVAL); + tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { + crypto_free_ahash(tfm); + return ERR_PTR(-EINVAL); + } + + return __cryptd_ahash_cast(tfm); +} +EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); + +struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) +{ + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); + + return ctx->child; +} +EXPORT_SYMBOL_GPL(cryptd_ahash_child); + +void cryptd_free_ahash(struct cryptd_ahash *tfm) +{ + crypto_free_ahash(&tfm->base); +} +EXPORT_SYMBOL_GPL(cryptd_free_ahash); + static int __init cryptd_init(void) { int err; diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 55fa7bbdbc71..2f65a6e8ea4d 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h @@ -7,6 +7,7 @@ #include #include +#include struct cryptd_ablkcipher { struct crypto_ablkcipher base; @@ -24,4 +25,20 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); +struct cryptd_ahash { + struct crypto_ahash base; +}; + +static inline struct cryptd_ahash *__cryptd_ahash_cast( + struct crypto_ahash *tfm) +{ + return (struct cryptd_ahash *)tfm; +} + +/* alg_name should be algorithm to be cryptd-ed */ +struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, + u32 type, u32 mask); +struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); +void cryptd_free_ahash(struct cryptd_ahash *tfm); + #endif -- cgit v1.2.3 From a8ccc393ddcdff2e0288dc459f03d561ae27b51d Mon Sep 17 00:00:00 2001 From: Christian Kujau Date: Thu, 13 Aug 2009 11:53:56 +1000 Subject: crypto: rng - Fix typo Correct a typo in crypto/rng.c Signed-off-by: Christian Kujau Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/rng.c b/crypto/rng.c index 6e94bc735578..ba05e7380e76 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -123,4 +123,4 @@ void crypto_put_default_rng(void) EXPORT_SYMBOL_GPL(crypto_put_default_rng); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Random Number Genertor"); +MODULE_DESCRIPTION("Random Number Generator"); -- cgit v1.2.3 From 73fec1209433d6b33924c1b06122ee217ce98931 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 13 Aug 2009 21:33:06 +1000 Subject: Revert crypto: fips - Select CPRNG This reverts commit 215ccd6f55a2144bd553e0a3d12e1386f02309fd. It causes CPRNG and everything selected by it to be built-in whenever FIPS is enabled. The problem is that it is selecting a tristate from a bool, which is usually not what is intended. Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 5105cf15834f..1db09958eb71 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -23,7 +23,6 @@ comment "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" - select CRYPTO_ANSI_CPRNG help This options enables the fips boot option which is required if you want to system to operate in a FIPS 200 -- cgit v1.2.3 From aef27136b8b5e526f2e96ca1caa30a6d07e70f42 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 13 Aug 2009 23:10:39 +1000 Subject: crypto: ctr - Use chainiv on raw counter mode Raw counter mode only works with chainiv, which is no longer the default IV generator on SMP machines. This broke raw counter mode as it can no longer instantiate as a givcipher. This patch fixes it by always picking chainiv on raw counter mode. This is based on the diagnosis and a patch by Huang Ying. Signed-off-by: Herbert Xu --- crypto/ctr.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'crypto') diff --git a/crypto/ctr.c b/crypto/ctr.c index 2d7425f0e7b8..6c3bfabb9d1d 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -219,6 +219,8 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; + inst->alg.cra_blkcipher.geniv = "chainiv"; + out: crypto_mod_put(alg); return inst; -- cgit v1.2.3 From 63b5ac286d5d7f668da537cc53a552578f7674a2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 14 Aug 2009 22:55:35 +1000 Subject: crypto: blkcipher - Do not use eseqiv on stream ciphers Recently we switched to using eseqiv on SMP machines in preference over chainiv. However, eseqiv does not support stream ciphers so they should still default to chainiv. This patch applies the same check as done by eseqiv to weed out the stream ciphers. In particular, all algorithms where the IV size is not equal to the block size will now default to chainiv. Signed-off-by: Herbert Xu --- crypto/ablkcipher.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'crypto') diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 03fb5facf0b4..f6f08336df5d 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -183,6 +183,12 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type); const char *crypto_default_geniv(const struct crypto_alg *alg) { + if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize : + alg->cra_ablkcipher.ivsize) != + alg->cra_blocksize) + return "chainiv"; + return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : skcipher_default_geniv; } -- cgit v1.2.3 From 4e4ed83be6a64c8c9fe69c77f37a26bb62ed21f7 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Thu, 20 Aug 2009 17:54:16 +1000 Subject: crypto: fips - Depend on ansi_cprng What about something like this? It defaults the CPRNG to m and makes FIPS dependent on the CPRNG. That way you get a module build by default, but you can change it to y manually during config and still satisfy the dependency, and if you select N it disables FIPS as well. I rather like that better than making FIPS a tristate. I just tested it out here and it seems to work well. Let me know what you think Signed-off-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/Kconfig | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 1db09958eb71..762344202725 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -23,11 +23,13 @@ comment "Crypto core or helper" config CRYPTO_FIPS bool "FIPS 200 compliance" + depends on CRYPTO_ANSI_CPRNG help This options enables the fips boot option which is required if you want to system to operate in a FIPS 200 certification. You should say no unless you know what - this is. + this is. Note that CRYPTO_ANSI_CPRNG is requred if this + option is selected config CRYPTO_ALGAPI tristate @@ -787,12 +789,14 @@ comment "Random Number Generation" config CRYPTO_ANSI_CPRNG tristate "Pseudo Random Number Generation for Cryptographic modules" + default m select CRYPTO_AES select CRYPTO_RNG help This option enables the generic pseudo random number generator for cryptographic modules. Uses the Algorithm specified in - ANSI X9.31 A.2.4 + ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS + is selected source "drivers/crypto/Kconfig" -- cgit v1.2.3 From 36f87a4a29cb8cd291169483079fde34bad4ef16 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Thu, 20 Aug 2009 17:58:04 +1000 Subject: crypto: xcbc - Fix alignment calculation of xcbc_tfm_ctx The alignment calculation of xcbc_tfm_ctx uses alg->cra_alignmask and not alg->cra_alignmask + 1 as it should. This led to frequent crashes during the selftest of xcbc(aes-asm) on x86_64 machines. This patch fixes this. Also we use the alignmask of xcbc and not the alignmask of the underlying algorithm for the alignmnent calculation in xcbc_create now. Signed-off-by: Steffen Klassert Signed-off-by: Herbert Xu --- crypto/xcbc.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 1e30b31f33c6..bb7b67fba349 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -199,6 +199,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; struct crypto_alg *alg; + unsigned long alignmask; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); @@ -228,19 +229,20 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb) if (err) goto out_free_inst; + alignmask = alg->cra_alignmask | 3; + inst->alg.base.cra_alignmask = alignmask; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask | 3; inst->alg.digestsize = alg->cra_blocksize; inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx), crypto_tfm_ctx_alignment()) + - (alg->cra_alignmask & + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)) + alg->cra_blocksize * 2; inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx), - alg->cra_alignmask) + + alignmask + 1) + alg->cra_blocksize * 2; inst->alg.base.cra_init = xcbc_init_tfm; inst->alg.base.cra_exit = xcbc_exit_tfm; -- cgit v1.2.3 From a367b17f34e1280270a6b577c11d5ecff093f9ae Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Sat, 29 Aug 2009 17:36:25 +1000 Subject: crypto: ansi_cprng - Fix module initialization Return the value we got from crypto_register_alg() instead of returning 0 in any case. Signed-off-by: Steffen Klassert Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/ansi_cprng.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 5357ba7d821a..3aa6e3834bfe 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -408,17 +408,10 @@ static struct crypto_alg rng_alg = { /* Module initalization */ static int __init prng_mod_init(void) { - int ret = 0; - if (fips_enabled) rng_alg.cra_priority += 200; - ret = crypto_register_alg(&rng_alg); - - if (ret) - goto out; -out: - return 0; + return crypto_register_alg(&rng_alg); } static void __exit prng_mod_fini(void) -- cgit v1.2.3 From 2bf2901669a564b402cd0e40eb3f941c391e64c4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 31 Aug 2009 15:56:54 +1000 Subject: crypto: api - Do not displace newly registered algorithms We have a mechanism where newly registered algorithms of a higher priority can displace existing instances that use a different implementation of the same algorithm with a lower priority. Unfortunately the same mechanism can cause a newly registered algorithm to displace itself if it depends on an existing version of the same algorithm. This patch fixes this by keeping all algorithms that the newly reigstered algorithm depends on, thus protecting them from being removed. Signed-off-by: Herbert Xu --- crypto/algapi.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 63 insertions(+), 14 deletions(-) (limited to 'crypto') diff --git a/crypto/algapi.c b/crypto/algapi.c index 6a98076d9d2a..feb77e4bbb48 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -81,16 +81,35 @@ static void crypto_destroy_instance(struct crypto_alg *alg) crypto_tmpl_put(tmpl); } +static struct list_head *crypto_more_spawns(struct crypto_alg *alg, + struct list_head *stack, + struct list_head *top, + struct list_head *secondary_spawns) +{ + struct crypto_spawn *spawn, *n; + + if (list_empty(stack)) + return NULL; + + spawn = list_first_entry(stack, struct crypto_spawn, list); + n = list_entry(spawn->list.next, struct crypto_spawn, list); + + if (spawn->alg && &n->list != stack && !n->alg) + n->alg = (n->list.next == stack) ? alg : + &list_entry(n->list.next, struct crypto_spawn, + list)->inst->alg; + + list_move(&spawn->list, secondary_spawns); + + return &n->list == stack ? top : &n->inst->alg.cra_users; +} + static void crypto_remove_spawn(struct crypto_spawn *spawn, - struct list_head *list, - struct list_head *secondary_spawns) + struct list_head *list) { struct crypto_instance *inst = spawn->inst; struct crypto_template *tmpl = inst->tmpl; - list_del_init(&spawn->list); - spawn->alg = NULL; - if (crypto_is_dead(&inst->alg)) return; @@ -106,25 +125,55 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn, hlist_del(&inst->list); inst->alg.cra_destroy = crypto_destroy_instance; - list_splice(&inst->alg.cra_users, secondary_spawns); + BUG_ON(!list_empty(&inst->alg.cra_users)); } -static void crypto_remove_spawns(struct list_head *spawns, - struct list_head *list, u32 new_type) +static void crypto_remove_spawns(struct crypto_alg *alg, + struct list_head *list, + struct crypto_alg *nalg) { + u32 new_type = (nalg ?: alg)->cra_flags; struct crypto_spawn *spawn, *n; LIST_HEAD(secondary_spawns); + struct list_head *spawns; + LIST_HEAD(stack); + LIST_HEAD(top); + spawns = &alg->cra_users; list_for_each_entry_safe(spawn, n, spawns, list) { if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) continue; - crypto_remove_spawn(spawn, list, &secondary_spawns); + list_move(&spawn->list, &top); } - while (!list_empty(&secondary_spawns)) { - list_for_each_entry_safe(spawn, n, &secondary_spawns, list) - crypto_remove_spawn(spawn, list, &secondary_spawns); + spawns = ⊤ + do { + while (!list_empty(spawns)) { + struct crypto_instance *inst; + + spawn = list_first_entry(spawns, struct crypto_spawn, + list); + inst = spawn->inst; + + BUG_ON(&inst->alg == alg); + + list_move(&spawn->list, &stack); + + if (&inst->alg == nalg) + break; + + spawn->alg = NULL; + spawns = &inst->alg.cra_users; + } + } while ((spawns = crypto_more_spawns(alg, &stack, &top, + &secondary_spawns))); + + list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { + if (spawn->alg) + list_move(&spawn->list, &spawn->alg->cra_users); + else + crypto_remove_spawn(spawn, list); } } @@ -258,7 +307,7 @@ found: q->cra_priority > alg->cra_priority) continue; - crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags); + crypto_remove_spawns(q, &list, alg); } complete: @@ -330,7 +379,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); list_del_init(&alg->cra_list); - crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags); + crypto_remove_spawns(alg, list, NULL); return 0; } -- cgit v1.2.3 From f1939f7c56456d22a559d2c75156e91912a2e97e Mon Sep 17 00:00:00 2001 From: Shane Wang Date: Wed, 2 Sep 2009 20:05:22 +1000 Subject: crypto: vmac - New hash algorithm for intel_txt support This patch adds VMAC (a fast MAC) support into crypto framework. Signed-off-by: Shane Wang Signed-off-by: Joseph Cihula Signed-off-by: Herbert Xu --- crypto/Kconfig | 12 + crypto/Makefile | 1 + crypto/tcrypt.c | 4 + crypto/testmgr.c | 9 + crypto/testmgr.h | 16 ++ crypto/vmac.c | 678 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/crypto/vmac.h | 61 +++++ 7 files changed, 781 insertions(+) create mode 100644 crypto/vmac.c create mode 100644 include/crypto/vmac.h (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 762344202725..26b5dd0cb564 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -269,6 +269,18 @@ config CRYPTO_XCBC http://csrc.nist.gov/encryption/modes/proposedmodes/ xcbc-mac/xcbc-mac-spec.pdf +config CRYPTO_VMAC + tristate "VMAC support" + depends on EXPERIMENTAL + select CRYPTO_HASH + select CRYPTO_MANAGER + help + VMAC is a message authentication algorithm designed for + very high speed on 64-bit architectures. + + See also: + + comment "Digest" config CRYPTO_CRC32C diff --git a/crypto/Makefile b/crypto/Makefile index c2ca721eea9d..9e8f61908cb5 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -32,6 +32,7 @@ cryptomgr-objs := algboss.o testmgr.o obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o +obj-$(CONFIG_CRYPTO_VMAC) += vmac.o obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 5a375e819d5d..aa3f84ccc786 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -719,6 +719,10 @@ static int do_test(int m) ret += tcrypt_test("hmac(rmd160)"); break; + case 109: + ret += tcrypt_test("vmac(aes)"); + break; + case 150: ret += tcrypt_test("ansi_cprng"); break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 29b228d9b1a2..6d5b746637be 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2247,6 +2247,15 @@ static const struct alg_test_desc alg_test_descs[] = { .count = TGR192_TEST_VECTORS } } + }, { + .alg = "vmac(aes)", + .test = alg_test_hash, + .suite = { + .hash = { + .vecs = aes_vmac128_tv_template, + .count = VMAC_AES_TEST_VECTORS + } + } }, { .alg = "wp256", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 69316228fc19..9963b18983ab 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -1654,6 +1654,22 @@ static struct hash_testvec aes_xcbc128_tv_template[] = { } }; +#define VMAC_AES_TEST_VECTORS 1 +static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01', + '\x02', '\x03', '\x02', '\x02', + '\x02', '\x04', '\x01', '\x07', + '\x04', '\x01', '\x04', '\x03',}; +static struct hash_testvec aes_vmac128_tv_template[] = { + { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .plaintext = vmac_string, + .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7", + .psize = 128, + .ksize = 16, + }, +}; + /* * SHA384 HMAC test vectors from RFC4231 */ diff --git a/crypto/vmac.c b/crypto/vmac.c new file mode 100644 index 000000000000..0a9468e575de --- /dev/null +++ b/crypto/vmac.c @@ -0,0 +1,678 @@ +/* + * Modified to interface to the Linux kernel + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +/* -------------------------------------------------------------------------- + * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. + * This implementation is herby placed in the public domain. + * The authors offers no warranty. Use at your own risk. + * Please send bug reports to the authors. + * Last modified: 17 APR 08, 1700 PDT + * ----------------------------------------------------------------------- */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Constants and masks + */ +#define UINT64_C(x) x##ULL +const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ +const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ +const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ +const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ +const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ + +#ifdef __LITTLE_ENDIAN +#define INDEX_HIGH 1 +#define INDEX_LOW 0 +#else +#define INDEX_HIGH 0 +#define INDEX_LOW 1 +#endif + +/* + * The following routines are used in this implementation. They are + * written via macros to simulate zero-overhead call-by-reference. + * + * MUL64: 64x64->128-bit multiplication + * PMUL64: assumes top bits cleared on inputs + * ADD128: 128x128->128-bit addition + */ + +#define ADD128(rh, rl, ih, il) \ + do { \ + u64 _il = (il); \ + (rl) += (_il); \ + if ((rl) < (_il)) \ + (rh)++; \ + (rh) += (ih); \ + } while (0) + +#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) + +#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ + do { \ + u64 _i1 = (i1), _i2 = (i2); \ + u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ + rh = MUL32(_i1>>32, _i2>>32); \ + rl = MUL32(_i1, _i2); \ + ADD128(rh, rl, (m >> 32), (m << 32)); \ + } while (0) + +#define MUL64(rh, rl, i1, i2) \ + do { \ + u64 _i1 = (i1), _i2 = (i2); \ + u64 m1 = MUL32(_i1, _i2>>32); \ + u64 m2 = MUL32(_i1>>32, _i2); \ + rh = MUL32(_i1>>32, _i2>>32); \ + rl = MUL32(_i1, _i2); \ + ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ + ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ + } while (0) + +/* + * For highest performance the L1 NH and L2 polynomial hashes should be + * carefully implemented to take advantage of one's target architechture. + * Here these two hash functions are defined multiple time; once for + * 64-bit architectures, once for 32-bit SSE2 architectures, and once + * for the rest (32-bit) architectures. + * For each, nh_16 *must* be defined (works on multiples of 16 bytes). + * Optionally, nh_vmac_nhbytes can be defined (for multiples of + * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two + * NH computations at once). + */ + +#ifdef CONFIG_64BIT + +#define nh_16(mp, kp, nw, rh, rl) \ + do { \ + int i; u64 th, tl; \ + rh = rl = 0; \ + for (i = 0; i < nw; i += 2) { \ + MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ + le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + ADD128(rh, rl, th, tl); \ + } \ + } while (0) + +#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ + do { \ + int i; u64 th, tl; \ + rh1 = rl1 = rh = rl = 0; \ + for (i = 0; i < nw; i += 2) { \ + MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ + le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ + le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ + ADD128(rh1, rl1, th, tl); \ + } \ + } while (0) + +#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ +#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ + do { \ + int i; u64 th, tl; \ + rh = rl = 0; \ + for (i = 0; i < nw; i += 8) { \ + MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ + le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ + le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ + le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ + le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ + ADD128(rh, rl, th, tl); \ + } \ + } while (0) + +#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ + do { \ + int i; u64 th, tl; \ + rh1 = rl1 = rh = rl = 0; \ + for (i = 0; i < nw; i += 8) { \ + MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \ + le64_to_cpup((mp)+i+1)+(kp)[i+1]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \ + le64_to_cpup((mp)+i+1)+(kp)[i+3]); \ + ADD128(rh1, rl1, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \ + le64_to_cpup((mp)+i+3)+(kp)[i+3]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \ + le64_to_cpup((mp)+i+3)+(kp)[i+5]); \ + ADD128(rh1, rl1, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \ + le64_to_cpup((mp)+i+5)+(kp)[i+5]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \ + le64_to_cpup((mp)+i+5)+(kp)[i+7]); \ + ADD128(rh1, rl1, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \ + le64_to_cpup((mp)+i+7)+(kp)[i+7]); \ + ADD128(rh, rl, th, tl); \ + MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \ + le64_to_cpup((mp)+i+7)+(kp)[i+9]); \ + ADD128(rh1, rl1, th, tl); \ + } \ + } while (0) +#endif + +#define poly_step(ah, al, kh, kl, mh, ml) \ + do { \ + u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ + /* compute ab*cd, put bd into result registers */ \ + PMUL64(t3h, t3l, al, kh); \ + PMUL64(t2h, t2l, ah, kl); \ + PMUL64(t1h, t1l, ah, 2*kh); \ + PMUL64(ah, al, al, kl); \ + /* add 2 * ac to result */ \ + ADD128(ah, al, t1h, t1l); \ + /* add together ad + bc */ \ + ADD128(t2h, t2l, t3h, t3l); \ + /* now (ah,al), (t2l,2*t2h) need summing */ \ + /* first add the high registers, carrying into t2h */ \ + ADD128(t2h, ah, z, t2l); \ + /* double t2h and add top bit of ah */ \ + t2h = 2 * t2h + (ah >> 63); \ + ah &= m63; \ + /* now add the low registers */ \ + ADD128(ah, al, mh, ml); \ + ADD128(ah, al, z, t2h); \ + } while (0) + +#else /* ! CONFIG_64BIT */ + +#ifndef nh_16 +#define nh_16(mp, kp, nw, rh, rl) \ + do { \ + u64 t1, t2, m1, m2, t; \ + int i; \ + rh = rl = t = 0; \ + for (i = 0; i < nw; i += 2) { \ + t1 = le64_to_cpup(mp+i) + kp[i]; \ + t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \ + m2 = MUL32(t1 >> 32, t2); \ + m1 = MUL32(t1, t2 >> 32); \ + ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ + MUL32(t1, t2)); \ + rh += (u64)(u32)(m1 >> 32) \ + + (u32)(m2 >> 32); \ + t += (u64)(u32)m1 + (u32)m2; \ + } \ + ADD128(rh, rl, (t >> 32), (t << 32)); \ + } while (0) +#endif + +static void poly_step_func(u64 *ahi, u64 *alo, + const u64 *kh, const u64 *kl, + const u64 *mh, const u64 *ml) +{ +#define a0 (*(((u32 *)alo)+INDEX_LOW)) +#define a1 (*(((u32 *)alo)+INDEX_HIGH)) +#define a2 (*(((u32 *)ahi)+INDEX_LOW)) +#define a3 (*(((u32 *)ahi)+INDEX_HIGH)) +#define k0 (*(((u32 *)kl)+INDEX_LOW)) +#define k1 (*(((u32 *)kl)+INDEX_HIGH)) +#define k2 (*(((u32 *)kh)+INDEX_LOW)) +#define k3 (*(((u32 *)kh)+INDEX_HIGH)) + + u64 p, q, t; + u32 t2; + + p = MUL32(a3, k3); + p += p; + p += *(u64 *)mh; + p += MUL32(a0, k2); + p += MUL32(a1, k1); + p += MUL32(a2, k0); + t = (u32)(p); + p >>= 32; + p += MUL32(a0, k3); + p += MUL32(a1, k2); + p += MUL32(a2, k1); + p += MUL32(a3, k0); + t |= ((u64)((u32)p & 0x7fffffff)) << 32; + p >>= 31; + p += (u64)(((u32 *)ml)[INDEX_LOW]); + p += MUL32(a0, k0); + q = MUL32(a1, k3); + q += MUL32(a2, k2); + q += MUL32(a3, k1); + q += q; + p += q; + t2 = (u32)(p); + p >>= 32; + p += (u64)(((u32 *)ml)[INDEX_HIGH]); + p += MUL32(a0, k1); + p += MUL32(a1, k0); + q = MUL32(a2, k3); + q += MUL32(a3, k2); + q += q; + p += q; + *(u64 *)(alo) = (p << 32) | t2; + p >>= 32; + *(u64 *)(ahi) = p + t; + +#undef a0 +#undef a1 +#undef a2 +#undef a3 +#undef k0 +#undef k1 +#undef k2 +#undef k3 +} + +#define poly_step(ah, al, kh, kl, mh, ml) \ + poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) + +#endif /* end of specialized NH and poly definitions */ + +/* At least nh_16 is defined. Defined others as needed here */ +#ifndef nh_16_2 +#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ + do { \ + nh_16(mp, kp, nw, rh, rl); \ + nh_16(mp, ((kp)+2), nw, rh2, rl2); \ + } while (0) +#endif +#ifndef nh_vmac_nhbytes +#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ + nh_16(mp, kp, nw, rh, rl) +#endif +#ifndef nh_vmac_nhbytes_2 +#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ + do { \ + nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ + nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ + } while (0) +#endif + +static void vhash_abort(struct vmac_ctx *ctx) +{ + ctx->polytmp[0] = ctx->polykey[0] ; + ctx->polytmp[1] = ctx->polykey[1] ; + ctx->first_block_processed = 0; +} + +static u64 l3hash(u64 p1, u64 p2, + u64 k1, u64 k2, u64 len) +{ + u64 rh, rl, t, z = 0; + + /* fully reduce (p1,p2)+(len,0) mod p127 */ + t = p1 >> 63; + p1 &= m63; + ADD128(p1, p2, len, t); + /* At this point, (p1,p2) is at most 2^127+(len<<64) */ + t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); + ADD128(p1, p2, z, t); + p1 &= m63; + + /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ + t = p1 + (p2 >> 32); + t += (t >> 32); + t += (u32)t > 0xfffffffeu; + p1 += (t >> 32); + p2 += (p1 << 32); + + /* compute (p1+k1)%p64 and (p2+k2)%p64 */ + p1 += k1; + p1 += (0 - (p1 < k1)) & 257; + p2 += k2; + p2 += (0 - (p2 < k2)) & 257; + + /* compute (p1+k1)*(p2+k2)%p64 */ + MUL64(rh, rl, p1, p2); + t = rh >> 56; + ADD128(t, rl, z, rh); + rh <<= 8; + ADD128(t, rl, z, rh); + t += t << 8; + rl += t; + rl += (0 - (rl < t)) & 257; + rl += (0 - (rl > p64-1)) & 257; + return rl; +} + +static void vhash_update(const unsigned char *m, + unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ + struct vmac_ctx *ctx) +{ + u64 rh, rl, *mptr; + const u64 *kptr = (u64 *)ctx->nhkey; + int i; + u64 ch, cl; + u64 pkh = ctx->polykey[0]; + u64 pkl = ctx->polykey[1]; + + mptr = (u64 *)m; + i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ + + ch = ctx->polytmp[0]; + cl = ctx->polytmp[1]; + + if (!ctx->first_block_processed) { + ctx->first_block_processed = 1; + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); + rh &= m62; + ADD128(ch, cl, rh, rl); + mptr += (VMAC_NHBYTES/sizeof(u64)); + i--; + } + + while (i--) { + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); + rh &= m62; + poly_step(ch, cl, pkh, pkl, rh, rl); + mptr += (VMAC_NHBYTES/sizeof(u64)); + } + + ctx->polytmp[0] = ch; + ctx->polytmp[1] = cl; +} + +static u64 vhash(unsigned char m[], unsigned int mbytes, + u64 *tagl, struct vmac_ctx *ctx) +{ + u64 rh, rl, *mptr; + const u64 *kptr = (u64 *)ctx->nhkey; + int i, remaining; + u64 ch, cl; + u64 pkh = ctx->polykey[0]; + u64 pkl = ctx->polykey[1]; + + mptr = (u64 *)m; + i = mbytes / VMAC_NHBYTES; + remaining = mbytes % VMAC_NHBYTES; + + if (ctx->first_block_processed) { + ch = ctx->polytmp[0]; + cl = ctx->polytmp[1]; + } else if (i) { + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); + ch &= m62; + ADD128(ch, cl, pkh, pkl); + mptr += (VMAC_NHBYTES/sizeof(u64)); + i--; + } else if (remaining) { + nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); + ch &= m62; + ADD128(ch, cl, pkh, pkl); + mptr += (VMAC_NHBYTES/sizeof(u64)); + goto do_l3; + } else {/* Empty String */ + ch = pkh; cl = pkl; + goto do_l3; + } + + while (i--) { + nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); + rh &= m62; + poly_step(ch, cl, pkh, pkl, rh, rl); + mptr += (VMAC_NHBYTES/sizeof(u64)); + } + if (remaining) { + nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); + rh &= m62; + poly_step(ch, cl, pkh, pkl, rh, rl); + } + +do_l3: + vhash_abort(ctx); + remaining *= 8; + return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); +} + +static u64 vmac(unsigned char m[], unsigned int mbytes, + unsigned char n[16], u64 *tagl, + struct vmac_ctx_t *ctx) +{ + u64 *in_n, *out_p; + u64 p, h; + int i; + + in_n = ctx->__vmac_ctx.cached_nonce; + out_p = ctx->__vmac_ctx.cached_aes; + + i = n[15] & 1; + if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { + in_n[0] = *(u64 *)(n); + in_n[1] = *(u64 *)(n+8); + ((unsigned char *)in_n)[15] &= 0xFE; + crypto_cipher_encrypt_one(ctx->child, + (unsigned char *)out_p, (unsigned char *)in_n); + + ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); + } + p = be64_to_cpup(out_p + i); + h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); + return p + h; +} + +static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) +{ + u64 in[2] = {0}, out[2]; + unsigned i; + int err = 0; + + err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); + if (err) + return err; + + /* Fill nh key */ + ((unsigned char *)in)[0] = 0x80; + for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { + crypto_cipher_encrypt_one(ctx->child, + (unsigned char *)out, (unsigned char *)in); + ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); + ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); + ((unsigned char *)in)[15] += 1; + } + + /* Fill poly key */ + ((unsigned char *)in)[0] = 0xC0; + in[1] = 0; + for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { + crypto_cipher_encrypt_one(ctx->child, + (unsigned char *)out, (unsigned char *)in); + ctx->__vmac_ctx.polytmp[i] = + ctx->__vmac_ctx.polykey[i] = + be64_to_cpup(out) & mpoly; + ctx->__vmac_ctx.polytmp[i+1] = + ctx->__vmac_ctx.polykey[i+1] = + be64_to_cpup(out+1) & mpoly; + ((unsigned char *)in)[15] += 1; + } + + /* Fill ip key */ + ((unsigned char *)in)[0] = 0xE0; + in[1] = 0; + for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { + do { + crypto_cipher_encrypt_one(ctx->child, + (unsigned char *)out, (unsigned char *)in); + ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); + ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); + ((unsigned char *)in)[15] += 1; + } while (ctx->__vmac_ctx.l3key[i] >= p64 + || ctx->__vmac_ctx.l3key[i+1] >= p64); + } + + /* Invalidate nonce/aes cache and reset other elements */ + ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ + ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ + ctx->__vmac_ctx.first_block_processed = 0; + + return err; +} + +static int vmac_setkey(struct crypto_shash *parent, + const u8 *key, unsigned int keylen) +{ + struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); + + if (keylen != VMAC_KEY_LEN) { + crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return vmac_set_key((u8 *)key, ctx); +} + +static int vmac_init(struct shash_desc *pdesc) +{ + struct crypto_shash *parent = pdesc->tfm; + struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); + + memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); + return 0; +} + +static int vmac_update(struct shash_desc *pdesc, const u8 *p, + unsigned int len) +{ + struct crypto_shash *parent = pdesc->tfm; + struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); + + vhash_update(p, len, &ctx->__vmac_ctx); + + return 0; +} + +static int vmac_final(struct shash_desc *pdesc, u8 *out) +{ + struct crypto_shash *parent = pdesc->tfm; + struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); + vmac_t mac; + u8 nonce[16] = {}; + + mac = vmac(NULL, 0, nonce, NULL, ctx); + memcpy(out, &mac, sizeof(vmac_t)); + memset(&mac, 0, sizeof(vmac_t)); + memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); + return 0; +} + +static int vmac_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_cipher *cipher; + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); + + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + return 0; +} + +static void vmac_exit_tfm(struct crypto_tfm *tfm) +{ + struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); + crypto_free_cipher(ctx->child); +} + +static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct shash_instance *inst; + struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); + if (err) + return err; + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); + if (IS_ERR(alg)) + return PTR_ERR(alg); + + inst = shash_alloc_instance("vmac", alg); + err = PTR_ERR(inst); + if (IS_ERR(inst)) + goto out_put_alg; + + err = crypto_init_spawn(shash_instance_ctx(inst), alg, + shash_crypto_instance(inst), + CRYPTO_ALG_TYPE_MASK); + if (err) + goto out_free_inst; + + inst->alg.base.cra_priority = alg->cra_priority; + inst->alg.base.cra_blocksize = alg->cra_blocksize; + inst->alg.base.cra_alignmask = alg->cra_alignmask; + + inst->alg.digestsize = sizeof(vmac_t); + inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); + inst->alg.base.cra_init = vmac_init_tfm; + inst->alg.base.cra_exit = vmac_exit_tfm; + + inst->alg.init = vmac_init; + inst->alg.update = vmac_update; + inst->alg.final = vmac_final; + inst->alg.setkey = vmac_setkey; + + err = shash_register_instance(tmpl, inst); + if (err) { +out_free_inst: + shash_free_instance(shash_crypto_instance(inst)); + } + +out_put_alg: + crypto_mod_put(alg); + return err; +} + +static struct crypto_template vmac_tmpl = { + .name = "vmac", + .create = vmac_create, + .free = shash_free_instance, + .module = THIS_MODULE, +}; + +static int __init vmac_module_init(void) +{ + return crypto_register_template(&vmac_tmpl); +} + +static void __exit vmac_module_exit(void) +{ + crypto_unregister_template(&vmac_tmpl); +} + +module_init(vmac_module_init); +module_exit(vmac_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("VMAC hash algorithm"); + diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h new file mode 100644 index 000000000000..c4467c55df1e --- /dev/null +++ b/include/crypto/vmac.h @@ -0,0 +1,61 @@ +/* + * Modified to interface to the Linux kernel + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef __CRYPTO_VMAC_H +#define __CRYPTO_VMAC_H + +/* -------------------------------------------------------------------------- + * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. + * This implementation is herby placed in the public domain. + * The authors offers no warranty. Use at your own risk. + * Please send bug reports to the authors. + * Last modified: 17 APR 08, 1700 PDT + * ----------------------------------------------------------------------- */ + +/* + * User definable settings. + */ +#define VMAC_TAG_LEN 64 +#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ +#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) +#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ + +/* + * This implementation uses u32 and u64 as names for unsigned 32- + * and 64-bit integer types. These are defined in C99 stdint.h. The + * following may need adaptation if you are not running a C99 or + * Microsoft C environment. + */ +struct vmac_ctx { + u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; + u64 polykey[2*VMAC_TAG_LEN/64]; + u64 l3key[2*VMAC_TAG_LEN/64]; + u64 polytmp[2*VMAC_TAG_LEN/64]; + u64 cached_nonce[2]; + u64 cached_aes[2]; + int first_block_processed; +}; + +typedef u64 vmac_t; + +struct vmac_ctx_t { + struct crypto_cipher *child; + struct vmac_ctx __vmac_ctx; +}; + +#endif /* __CRYPTO_VMAC_H */ -- cgit v1.2.3