summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig77
-rw-r--r--crypto/Makefile10
-rw-r--r--crypto/ablkcipher.c310
-rw-r--r--crypto/aead.c5
-rw-r--r--crypto/aes_generic.c13
-rw-r--r--crypto/ahash.c344
-rw-r--r--crypto/algapi.c196
-rw-r--r--crypto/algboss.c10
-rw-r--r--crypto/ansi_cprng.c117
-rw-r--r--crypto/anubis.c22
-rw-r--r--crypto/api.c67
-rw-r--r--crypto/async_tx/Kconfig15
-rw-r--r--crypto/async_tx/Makefile3
-rw-r--r--crypto/async_tx/async_memcpy.c46
-rw-r--r--crypto/async_tx/async_memset.c43
-rw-r--r--crypto/async_tx/async_pq.c416
-rw-r--r--crypto/async_tx/async_raid6_recov.c505
-rw-r--r--crypto/async_tx/async_tx.c125
-rw-r--r--crypto/async_tx/async_xor.c238
-rw-r--r--crypto/async_tx/raid6test.c248
-rw-r--r--crypto/authenc.c383
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--crypto/blowfish.c18
-rw-r--r--crypto/camellia.c616
-rw-r--r--crypto/cast5.c14
-rw-r--r--crypto/cast6.c122
-rw-r--r--crypto/cipher.c2
-rw-r--r--crypto/compress.c4
-rw-r--r--crypto/crc32c.c6
-rw-r--r--crypto/cryptd.c536
-rw-r--r--crypto/crypto_null.c8
-rw-r--r--crypto/ctr.c4
-rw-r--r--crypto/deflate.c20
-rw-r--r--crypto/des_generic.c133
-rw-r--r--crypto/digest.c240
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/fcrypt.c6
-rw-r--r--crypto/gcm.c908
-rw-r--r--crypto/ghash-generic.c170
-rw-r--r--crypto/hash.c183
-rw-r--r--crypto/hmac.c303
-rw-r--r--crypto/internal.h30
-rw-r--r--crypto/md5.c41
-rw-r--r--crypto/pcompress.c6
-rw-r--r--crypto/pcrypt.c566
-rw-r--r--crypto/proc.c19
-rw-r--r--crypto/rng.c3
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/seqiv.c1
-rw-r--r--crypto/sha1_generic.c41
-rw-r--r--crypto/sha256_generic.c100
-rw-r--r--crypto/sha512_generic.c48
-rw-r--r--crypto/shash.c272
-rw-r--r--crypto/tcrypt.c367
-rw-r--r--crypto/tcrypt.h29
-rw-r--r--crypto/testmgr.c205
-rw-r--r--crypto/testmgr.h87
-rw-r--r--crypto/twofish_generic.c (renamed from crypto/twofish.c)1
-rw-r--r--crypto/vmac.c675
-rw-r--r--crypto/xcbc.c370
-rw-r--r--crypto/xor.c1
-rw-r--r--crypto/xts.c2
62 files changed, 7021 insertions, 2335 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 4dfdd03e708f..e4bac29a32e7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -23,6 +23,7 @@ comment "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
+ depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS
help
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
@@ -78,6 +79,11 @@ config CRYPTO_RNG2
config CRYPTO_PCOMP
tristate
+ select CRYPTO_PCOMP2
+ select CRYPTO_ALGAPI
+
+config CRYPTO_PCOMP2
+ tristate
select CRYPTO_ALGAPI2
config CRYPTO_MANAGER
@@ -92,7 +98,15 @@ config CRYPTO_MANAGER2
select CRYPTO_AEAD2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
- select CRYPTO_PCOMP
+ select CRYPTO_PCOMP2
+
+config CRYPTO_MANAGER_DISABLE_TESTS
+ bool "Disable run-time self tests"
+ default y
+ depends on CRYPTO_MANAGER2
+ help
+ Disable run-time self tests that normally take place at
+ algorithm registration.
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
@@ -112,6 +126,16 @@ config CRYPTO_NULL
help
These are 'Null' algorithms, used by IPsec, which do nothing.
+config CRYPTO_PCRYPT
+ tristate "Parallel crypto engine (EXPERIMENTAL)"
+ depends on SMP && EXPERIMENTAL
+ select PADATA
+ select CRYPTO_MANAGER
+ select CRYPTO_AEAD
+ help
+ This converts an arbitrary crypto algorithm into a parallel
+ algorithm that executes in kernel threads.
+
config CRYPTO_WORKQUEUE
tristate
@@ -156,7 +180,7 @@ config CRYPTO_GCM
tristate "GCM/GMAC support"
select CRYPTO_CTR
select CRYPTO_AEAD
- select CRYPTO_GF128MUL
+ select CRYPTO_GHASH
help
Support for Galois/Counter Mode (GCM) and Galois Message
Authentication Code (GMAC). Required for IPSec.
@@ -267,6 +291,18 @@ config CRYPTO_XCBC
http://csrc.nist.gov/encryption/modes/proposedmodes/
xcbc-mac/xcbc-mac-spec.pdf
+config CRYPTO_VMAC
+ tristate "VMAC support"
+ depends on EXPERIMENTAL
+ select CRYPTO_HASH
+ select CRYPTO_MANAGER
+ help
+ VMAC is a message authentication algorithm designed for
+ very high speed on 64-bit architectures.
+
+ See also:
+ <http://fastcrypto.org/vmac>
+
comment "Digest"
config CRYPTO_CRC32C
@@ -289,6 +325,13 @@ config CRYPTO_CRC32C_INTEL
gain performance compared with software implementation.
Module will be crc32c-intel.
+config CRYPTO_GHASH
+ tristate "GHASH digest algorithm"
+ select CRYPTO_SHASH
+ select CRYPTO_GF128MUL
+ help
+ GHASH is message digest algorithm for GCM (Galois/Counter Mode).
+
config CRYPTO_MD4
tristate "MD4 digest algorithm"
select CRYPTO_HASH
@@ -321,7 +364,7 @@ config CRYPTO_RMD128
RIPEMD-160 should be used.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD160
tristate "RIPEMD-160 digest algorithm"
@@ -338,7 +381,7 @@ config CRYPTO_RMD160
against RIPEMD-160.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD256
tristate "RIPEMD-256 digest algorithm"
@@ -350,7 +393,7 @@ config CRYPTO_RMD256
(than RIPEMD-128).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD320
tristate "RIPEMD-320 digest algorithm"
@@ -362,7 +405,7 @@ config CRYPTO_RMD320
(than RIPEMD-160).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_SHA1
tristate "SHA1 digest algorithm"
@@ -417,7 +460,16 @@ config CRYPTO_WP512
Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard
See also:
- <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html>
+ <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html>
+
+config CRYPTO_GHASH_CLMUL_NI_INTEL
+ tristate "GHASH digest algorithm (CLMUL-NI accelerated)"
+ depends on (X86 || UML_X86) && 64BIT
+ select CRYPTO_SHASH
+ select CRYPTO_CRYPTD
+ help
+ GHASH is message digest algorithm for GCM (Galois/Counter Mode).
+ The implementation is accelerated by CLMUL-NI of Intel.
comment "Ciphers"
@@ -526,8 +578,8 @@ config CRYPTO_ANUBIS
in the NESSIE competition.
See also:
- <https://www.cosic.esat.kuleuven.ac.be/nessie/reports/>
- <http://planeta.terra.com.br/informatica/paulobarreto/AnubisPage.html>
+ <https://www.cosic.esat.kuleuven.be/nessie/reports/>
+ <http://www.larc.usp.br/~pbarreto/AnubisPage.html>
config CRYPTO_ARC4
tristate "ARC4 cipher algorithm"
@@ -606,7 +658,7 @@ config CRYPTO_KHAZAD
on 32-bit processors. Khazad uses an 128 bit key size.
See also:
- <http://planeta.terra.com.br/informatica/paulobarreto/KhazadPage.html>
+ <http://www.larc.usp.br/~pbarreto/KhazadPage.html>
config CRYPTO_SALSA20
tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)"
@@ -780,13 +832,14 @@ comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG
tristate "Pseudo Random Number Generation for Cryptographic modules"
+ default m
select CRYPTO_AES
select CRYPTO_RNG
- select CRYPTO_FIPS
help
This option enables the generic pseudo random number generator
for cryptographic modules. Uses the Algorithm specified in
- ANSI X9.31 A.2.4
+ ANSI X9.31 A.2.4. Note that this option must be enabled if
+ CRYPTO_FIPS is selected
source "drivers/crypto/Kconfig"
diff --git a/crypto/Makefile b/crypto/Makefile
index 673d9f7c1bda..423b7de61f93 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -3,7 +3,7 @@
#
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-objs := api.o cipher.o digest.o compress.o
+crypto-objs := api.o cipher.o compress.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
@@ -22,17 +22,17 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
-crypto_hash-objs := hash.o
crypto_hash-objs += ahash.o
crypto_hash-objs += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
-obj-$(CONFIG_CRYPTO_PCOMP) += pcompress.o
+obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
cryptomgr-objs := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
+obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
@@ -56,11 +56,12 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
+obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
-obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
+obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
@@ -83,6 +84,7 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
+obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
#
# generic algorithms and the async_tx api
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e11ce37c7104..a854df2a5a4b 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -1,6 +1,6 @@
/*
* Asynchronous block chaining cipher operations.
- *
+ *
* This is the asynchronous version of blkcipher.c indicating completion
* via a callback.
*
@@ -8,12 +8,13 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/skcipher.h>
+#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -23,8 +24,287 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <crypto/scatterwalk.h>
+
#include "internal.h"
+static const char *skcipher_default_geniv __read_mostly;
+
+struct ablkcipher_buffer {
+ struct list_head entry;
+ struct scatter_walk dst;
+ unsigned int len;
+ void *data;
+};
+
+enum {
+ ABLKCIPHER_WALK_SLOW = 1 << 0,
+};
+
+static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
+{
+ scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
+}
+
+void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
+{
+ struct ablkcipher_buffer *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
+ ablkcipher_buffer_write(p);
+ list_del(&p->entry);
+ kfree(p);
+ }
+}
+EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
+
+static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
+ struct ablkcipher_buffer *p)
+{
+ p->dst = walk->out;
+ list_add_tail(&p->entry, &walk->buffers);
+}
+
+/* Get a spot of the specified length that does not straddle a page.
+ * The caller needs to ensure that there is enough space for this operation.
+ */
+static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
+{
+ u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+ return max(start, end_page);
+}
+
+static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
+ unsigned int bsize)
+{
+ unsigned int n = bsize;
+
+ for (;;) {
+ unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
+
+ if (len_this_page > n)
+ len_this_page = n;
+ scatterwalk_advance(&walk->out, n);
+ if (n == len_this_page)
+ break;
+ n -= len_this_page;
+ scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
+ }
+
+ return bsize;
+}
+
+static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
+ unsigned int n)
+{
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_advance(&walk->out, n);
+
+ return n;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk);
+
+int ablkcipher_walk_done(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk, int err)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned int nbytes = 0;
+
+ if (likely(err >= 0)) {
+ unsigned int n = walk->nbytes - err;
+
+ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
+ n = ablkcipher_done_fast(walk, n);
+ else if (WARN_ON(err)) {
+ err = -EINVAL;
+ goto err;
+ } else
+ n = ablkcipher_done_slow(walk, n);
+
+ nbytes = walk->total - n;
+ err = 0;
+ }
+
+ scatterwalk_done(&walk->in, 0, nbytes);
+ scatterwalk_done(&walk->out, 1, nbytes);
+
+err:
+ walk->total = nbytes;
+ walk->nbytes = nbytes;
+
+ if (nbytes) {
+ crypto_yield(req->base.flags);
+ return ablkcipher_walk_next(req, walk);
+ }
+
+ if (walk->iv != req->info)
+ memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
+ if (walk->iv_buffer)
+ kfree(walk->iv_buffer);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
+
+static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk,
+ unsigned int bsize,
+ unsigned int alignmask,
+ void **src_p, void **dst_p)
+{
+ unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
+ struct ablkcipher_buffer *p;
+ void *src, *dst, *base;
+ unsigned int n;
+
+ n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
+ n += (aligned_bsize * 3 - (alignmask + 1) +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
+
+ p = kmalloc(n, GFP_ATOMIC);
+ if (!p)
+ return ablkcipher_walk_done(req, walk, -ENOMEM);
+
+ base = p + 1;
+
+ dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
+ src = dst = ablkcipher_get_spot(dst, bsize);
+
+ p->len = bsize;
+ p->data = dst;
+
+ scatterwalk_copychunks(src, &walk->in, bsize, 0);
+
+ ablkcipher_queue_write(walk, p);
+
+ walk->nbytes = bsize;
+ walk->flags |= ABLKCIPHER_WALK_SLOW;
+
+ *src_p = src;
+ *dst_p = dst;
+
+ return 0;
+}
+
+static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
+ struct crypto_tfm *tfm,
+ unsigned int alignmask)
+{
+ unsigned bs = walk->blocksize;
+ unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
+ unsigned aligned_bs = ALIGN(bs, alignmask + 1);
+ unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
+ (alignmask + 1);
+ u8 *iv;
+
+ size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+ walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
+ if (!walk->iv_buffer)
+ return -ENOMEM;
+
+ iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
+ iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+ iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+ iv = ablkcipher_get_spot(iv, ivsize);
+
+ walk->iv = memcpy(iv, walk->iv, ivsize);
+ return 0;
+}
+
+static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ walk->src.page = scatterwalk_page(&walk->in);
+ walk->src.offset = offset_in_page(walk->in.offset);
+ walk->dst.page = scatterwalk_page(&walk->out);
+ walk->dst.offset = offset_in_page(walk->out.offset);
+
+ return 0;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned int alignmask, bsize, n;
+ void *src, *dst;
+ int err;
+
+ alignmask = crypto_tfm_alg_alignmask(tfm);
+ n = walk->total;
+ if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
+ req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+ return ablkcipher_walk_done(req, walk, -EINVAL);
+ }
+
+ walk->flags &= ~ABLKCIPHER_WALK_SLOW;
+ src = dst = NULL;
+
+ bsize = min(walk->blocksize, n);
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (n < bsize ||
+ !scatterwalk_aligned(&walk->in, alignmask) ||
+ !scatterwalk_aligned(&walk->out, alignmask)) {
+ err = ablkcipher_next_slow(req, walk, bsize, alignmask,
+ &src, &dst);
+ goto set_phys_lowmem;
+ }
+
+ walk->nbytes = n;
+
+ return ablkcipher_next_fast(req, walk);
+
+set_phys_lowmem:
+ if (err >= 0) {
+ walk->src.page = virt_to_page(src);
+ walk->dst.page = virt_to_page(dst);
+ walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
+ walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
+ }
+
+ return err;
+}
+
+static int ablkcipher_walk_first(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned int alignmask;
+
+ alignmask = crypto_tfm_alg_alignmask(tfm);
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
+ walk->nbytes = walk->total;
+ if (unlikely(!walk->total))
+ return 0;
+
+ walk->iv_buffer = NULL;
+ walk->iv = req->info;
+ if (unlikely(((unsigned long)walk->iv & alignmask))) {
+ int err = ablkcipher_copy_iv(walk, tfm, alignmask);
+ if (err)
+ return err;
+ }
+
+ scatterwalk_start(&walk->in, walk->in.sg);
+ scatterwalk_start(&walk->out, walk->out.sg);
+
+ return ablkcipher_walk_next(req, walk);
+}
+
+int ablkcipher_walk_phys(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
+ return ablkcipher_walk_first(req, walk);
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
+
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
@@ -180,7 +460,14 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type);
const char *crypto_default_geniv(const struct crypto_alg *alg)
{
- return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv";
+ if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+ alg->cra_ablkcipher.ivsize) !=
+ alg->cra_blocksize)
+ return "chainiv";
+
+ return alg->cra_flags & CRYPTO_ALG_ASYNC ?
+ "eseqiv" : skcipher_default_geniv;
}
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -201,8 +488,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
int err;
larval = crypto_larval_lookup(alg->cra_driver_name,
+ (type & ~CRYPTO_ALG_TYPE_MASK) |
CRYPTO_ALG_TYPE_GIVCIPHER,
- CRYPTO_ALG_TYPE_MASK);
+ mask | CRYPTO_ALG_TYPE_MASK);
err = PTR_ERR(larval);
if (IS_ERR(larval))
goto out;
@@ -360,3 +648,17 @@ err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
+
+static int __init skcipher_module_init(void)
+{
+ skcipher_default_geniv = num_possible_cpus() > 1 ?
+ "eseqiv" : "chainiv";
+ return 0;
+}
+
+static void skcipher_module_exit(void)
+{
+}
+
+module_init(skcipher_module_init);
+module_exit(skcipher_module_exit);
diff --git a/crypto/aead.c b/crypto/aead.c
index d9aa733db164..6729e8ff68e7 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -1,13 +1,13 @@
/*
* AEAD: Authenticated Encryption with Associated Data
- *
+ *
* This file provides API support for AEAD algorithms.
*
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index b8b66ec3883b..a68c73dae15a 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1,4 +1,4 @@
-/*
+/*
* Cryptographic API.
*
* AES Cipher Algorithm.
@@ -1127,7 +1127,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
-#define imix_col(y,x) do { \
+#define imix_col(y, x) do { \
u = star_x(x); \
v = star_x(u); \
w = star_x(v); \
@@ -1174,7 +1174,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
ctx->key_enc[6 * i + 11] = t; \
} while (0)
-#define loop8(i) do { \
+#define loop8tophalf(i) do { \
t = ror32(t, 8); \
t = ls_box(t) ^ rco_tab[i]; \
t ^= ctx->key_enc[8 * i]; \
@@ -1185,6 +1185,10 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
ctx->key_enc[8 * i + 10] = t; \
t ^= ctx->key_enc[8 * i + 3]; \
ctx->key_enc[8 * i + 11] = t; \
+} while (0)
+
+#define loop8(i) do { \
+ loop8tophalf(i); \
t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \
ctx->key_enc[8 * i + 12] = t; \
t ^= ctx->key_enc[8 * i + 5]; \
@@ -1245,8 +1249,9 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
ctx->key_enc[5] = le32_to_cpu(key[5]);
ctx->key_enc[6] = le32_to_cpu(key[6]);
t = ctx->key_enc[7] = le32_to_cpu(key[7]);
- for (i = 0; i < 7; ++i)
+ for (i = 0; i < 6; ++i)
loop8(i);
+ loop8tophalf(i);
break;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f3476374f764..f669822a7a44 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -24,6 +24,19 @@
#include "internal.h"
+struct ahash_request_priv {
+ crypto_completion_t complete;
+ void *data;
+ u8 *result;
+ void *ubuf[] CRYPTO_MINALIGN_ATTR;
+};
+
+static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+{
+ return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
+ halg);
+}
+
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int alignmask = walk->alignmask;
@@ -34,8 +47,11 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
walk->data = crypto_kmap(walk->pg, 0);
walk->data += offset;
- if (offset & alignmask)
- nbytes = alignmask + 1 - (offset & alignmask);
+ if (offset & alignmask) {
+ unsigned int unaligned = alignmask + 1 - (offset & alignmask);
+ if (nbytes > unaligned)
+ nbytes = unaligned;
+ }
walk->entrylen -= nbytes;
return nbytes;
@@ -65,7 +81,6 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
walk->data -= walk->offset;
if (nbytes && walk->offset & alignmask && !err) {
- walk->offset += alignmask - 1;
walk->offset = ALIGN(walk->offset, alignmask + 1);
walk->data += walk->offset;
@@ -132,36 +147,34 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_ATOMIC);
+ buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
- ret = ahash->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ ret = tfm->setkey(tfm, alignbuffer, keylen);
+ kzfree(buffer);
return ret;
}
-static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)key & alignmask)
return ahash_setkey_unaligned(tfm, key, keylen);
- return ahash->setkey(tfm, key, keylen);
+ return tfm->setkey(tfm, key, keylen);
}
+EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
@@ -169,44 +182,221 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
return -ENOSYS;
}
-int crypto_ahash_import(struct ahash_request *req, const u8 *in)
+static inline unsigned int ahash_align_buffer_size(unsigned len,
+ unsigned long mask)
+{
+ return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
+}
+
+static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+{
+ struct ahash_request_priv *priv = req->priv;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ if (!err)
+ memcpy(priv->result, req->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
+ kzfree(priv);
+}
+
+static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
+{
+ struct ahash_request *areq = req->data;
+ struct ahash_request_priv *priv = areq->priv;
+ crypto_completion_t complete = priv->complete;
+ void *data = priv->data;
+
+ ahash_op_unaligned_finish(areq, err);
+
+ complete(data, err);
+}
+
+static int ahash_op_unaligned(struct ahash_request *req,
+ int (*op)(struct ahash_request *))
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ struct ahash_request_priv *priv;
+ int err;
+
+ priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
- memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm));
+ priv->result = req->result;
+ priv->complete = req->base.complete;
+ priv->data = req->base.data;
- if (alg->reinit)
- alg->reinit(req);
+ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
+ req->base.complete = ahash_op_unaligned_done;
+ req->base.data = req;
+ req->priv = priv;
- return 0;
+ err = op(req);
+ ahash_op_unaligned_finish(req, err);
+
+ return err;
}
-EXPORT_SYMBOL_GPL(crypto_ahash_import);
-static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
+static int crypto_ahash_op(struct ahash_request *req,
+ int (*op)(struct ahash_request *))
{
- return alg->cra_ctxsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+
+ if ((unsigned long)req->result & alignmask)
+ return ahash_op_unaligned(req, op);
+
+ return op(req);
}
-static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+int crypto_ahash_final(struct ahash_request *req)
{
- struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
- struct ahash_tfm *crt = &tfm->crt_ahash;
+ return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_final);
- if (alg->digestsize > PAGE_SIZE / 8)
- return -EINVAL;
+int crypto_ahash_finup(struct ahash_request *req)
+{
+ return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_finup);
+
+int crypto_ahash_digest(struct ahash_request *req)
+{
+ return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_digest);
+
+static void ahash_def_finup_finish2(struct ahash_request *req, int err)
+{
+ struct ahash_request_priv *priv = req->priv;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ if (!err)
+ memcpy(priv->result, req->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
+ kzfree(priv);
+}
+
+static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
+{
+ struct ahash_request *areq = req->data;
+ struct ahash_request_priv *priv = areq->priv;
+ crypto_completion_t complete = priv->complete;
+ void *data = priv->data;
+
+ ahash_def_finup_finish2(areq, err);
+
+ complete(data, err);
+}
+
+static int ahash_def_finup_finish1(struct ahash_request *req, int err)
+{
+ if (err)
+ goto out;
+
+ req->base.complete = ahash_def_finup_done2;
+ req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_ahash_reqtfm(req)->final(req);
+
+out:
+ ahash_def_finup_finish2(req, err);
+ return err;
+}
+
+static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
+{
+ struct ahash_request *areq = req->data;
+ struct ahash_request_priv *priv = areq->priv;
+ crypto_completion_t complete = priv->complete;
+ void *data = priv->data;
+
+ err = ahash_def_finup_finish1(areq, err);
+
+ complete(data, err);
+}
+
+static int ahash_def_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ struct ahash_request_priv *priv;
- crt->init = alg->init;
- crt->update = alg->update;
- crt->final = alg->final;
- crt->digest = alg->digest;
- crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey;
- crt->digestsize = alg->digestsize;
+ priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->result = req->result;
+ priv->complete = req->base.complete;
+ priv->data = req->base.data;
+
+ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
+ req->base.complete = ahash_def_finup_done1;
+ req->base.data = req;
+ req->priv = priv;
+
+ return ahash_def_finup_finish1(req, tfm->update(req));
+}
+
+static int ahash_no_export(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
+static int ahash_no_import(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(hash);
+
+ hash->setkey = ahash_nosetkey;
+ hash->export = ahash_no_export;
+ hash->import = ahash_no_import;
+
+ if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
+ return crypto_init_shash_ops_async(tfm);
+
+ hash->init = alg->init;
+ hash->update = alg->update;
+ hash->final = alg->final;
+ hash->finup = alg->finup ?: ahash_def_finup;
+ hash->digest = alg->digest;
+
+ if (alg->setkey)
+ hash->setkey = alg->setkey;
+ if (alg->export)
+ hash->export = alg->export;
+ if (alg->import)
+ hash->import = alg->import;
return 0;
}
+static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
+{
+ if (alg->cra_type == &crypto_ahash_type)
+ return alg->cra_ctxsize;
+
+ return sizeof(struct crypto_shash *);
+}
+
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
@@ -215,17 +405,101 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize);
+ seq_printf(m, "digestsize : %u\n",
+ __crypto_hash_alg_common(alg)->digestsize);
}
const struct crypto_type crypto_ahash_type = {
- .ctxsize = crypto_ahash_ctxsize,
- .init = crypto_init_ahash_ops,
+ .extsize = crypto_ahash_extsize,
+ .init_tfm = crypto_ahash_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .tfmsize = offsetof(struct crypto_ahash, base),
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);
+struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
+
+static int ahash_prepare_alg(struct ahash_alg *alg)
+{
+ struct crypto_alg *base = &alg->halg.base;
+
+ if (alg->halg.digestsize > PAGE_SIZE / 8 ||
+ alg->halg.statesize > PAGE_SIZE / 8)
+ return -EINVAL;
+
+ base->cra_type = &crypto_ahash_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
+
+ return 0;
+}
+
+int crypto_register_ahash(struct ahash_alg *alg)
+{
+ struct crypto_alg *base = &alg->halg.base;
+ int err;
+
+ err = ahash_prepare_alg(alg);
+ if (err)
+ return err;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_ahash);
+
+int crypto_unregister_ahash(struct ahash_alg *alg)
+{
+ return crypto_unregister_alg(&alg->halg.base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
+
+int ahash_register_instance(struct crypto_template *tmpl,
+ struct ahash_instance *inst)
+{
+ int err;
+
+ err = ahash_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(ahash_register_instance);
+
+void ahash_free_instance(struct crypto_instance *inst)
+{
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(ahash_instance(inst));
+}
+EXPORT_SYMBOL_GPL(ahash_free_instance);
+
+int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
+ struct hash_alg_common *alg,
+ struct crypto_instance *inst)
+{
+ return crypto_init_spawn2(&spawn->base, &alg->base, inst,
+ &crypto_ahash_type);
+}
+EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
+
+struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+{
+ struct crypto_alg *alg;
+
+ alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
+ return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
+}
+EXPORT_SYMBOL_GPL(ahash_attr_alg);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 56c62e2858d5..c3cf1a69a47a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -17,6 +17,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
@@ -81,16 +82,35 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
crypto_tmpl_put(tmpl);
}
+static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
+ struct list_head *stack,
+ struct list_head *top,
+ struct list_head *secondary_spawns)
+{
+ struct crypto_spawn *spawn, *n;
+
+ if (list_empty(stack))
+ return NULL;
+
+ spawn = list_first_entry(stack, struct crypto_spawn, list);
+ n = list_entry(spawn->list.next, struct crypto_spawn, list);
+
+ if (spawn->alg && &n->list != stack && !n->alg)
+ n->alg = (n->list.next == stack) ? alg :
+ &list_entry(n->list.next, struct crypto_spawn,
+ list)->inst->alg;
+
+ list_move(&spawn->list, secondary_spawns);
+
+ return &n->list == stack ? top : &n->inst->alg.cra_users;
+}
+
static void crypto_remove_spawn(struct crypto_spawn *spawn,
- struct list_head *list,
- struct list_head *secondary_spawns)
+ struct list_head *list)
{
struct crypto_instance *inst = spawn->inst;
struct crypto_template *tmpl = inst->tmpl;
- list_del_init(&spawn->list);
- spawn->alg = NULL;
-
if (crypto_is_dead(&inst->alg))
return;
@@ -106,25 +126,55 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
- list_splice(&inst->alg.cra_users, secondary_spawns);
+ BUG_ON(!list_empty(&inst->alg.cra_users));
}
-static void crypto_remove_spawns(struct list_head *spawns,
- struct list_head *list, u32 new_type)
+static void crypto_remove_spawns(struct crypto_alg *alg,
+ struct list_head *list,
+ struct crypto_alg *nalg)
{
+ u32 new_type = (nalg ?: alg)->cra_flags;
struct crypto_spawn *spawn, *n;
LIST_HEAD(secondary_spawns);
+ struct list_head *spawns;
+ LIST_HEAD(stack);
+ LIST_HEAD(top);
+ spawns = &alg->cra_users;
list_for_each_entry_safe(spawn, n, spawns, list) {
if ((spawn->alg->cra_flags ^ new_type) & spawn->mask)
continue;
- crypto_remove_spawn(spawn, list, &secondary_spawns);
+ list_move(&spawn->list, &top);
}
- while (!list_empty(&secondary_spawns)) {
- list_for_each_entry_safe(spawn, n, &secondary_spawns, list)
- crypto_remove_spawn(spawn, list, &secondary_spawns);
+ spawns = &top;
+ do {
+ while (!list_empty(spawns)) {
+ struct crypto_instance *inst;
+
+ spawn = list_first_entry(spawns, struct crypto_spawn,
+ list);
+ inst = spawn->inst;
+
+ BUG_ON(&inst->alg == alg);
+
+ list_move(&spawn->list, &stack);
+
+ if (&inst->alg == nalg)
+ break;
+
+ spawn->alg = NULL;
+ spawns = &inst->alg.cra_users;
+ }
+ } while ((spawns = crypto_more_spawns(alg, &stack, &top,
+ &secondary_spawns)));
+
+ list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
+ if (spawn->alg)
+ list_move(&spawn->list, &spawn->alg->cra_users);
+ else
+ crypto_remove_spawn(spawn, list);
}
}
@@ -181,7 +231,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
list_add(&alg->cra_list, &crypto_alg_list);
list_add(&larval->alg.cra_list, &crypto_alg_list);
-out:
+out:
return larval;
free_larval:
@@ -258,7 +308,7 @@ found:
q->cra_priority > alg->cra_priority)
continue;
- crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags);
+ crypto_remove_spawns(q, &list, alg);
}
complete:
@@ -330,7 +380,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
- crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags);
+ crypto_remove_spawns(alg, list, NULL);
return 0;
}
@@ -339,7 +389,7 @@ int crypto_unregister_alg(struct crypto_alg *alg)
{
int ret;
LIST_HEAD(list);
-
+
down_write(&crypto_alg_sem);
ret = crypto_remove_alg(alg, &list);
up_write(&crypto_alg_sem);
@@ -488,20 +538,38 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
}
EXPORT_SYMBOL_GPL(crypto_init_spawn);
+int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
+ struct crypto_instance *inst,
+ const struct crypto_type *frontend)
+{
+ int err = -EINVAL;
+
+ if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
+ goto out;
+
+ spawn->frontend = frontend;
+ err = crypto_init_spawn(spawn, alg, inst, frontend->maskset);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_init_spawn2);
+
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
+ if (!spawn->alg)
+ return;
+
down_write(&crypto_alg_sem);
list_del(&spawn->list);
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
-struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
- u32 mask)
+static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_alg *alg2;
- struct crypto_tfm *tfm;
down_read(&crypto_alg_sem);
alg = spawn->alg;
@@ -516,6 +584,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
return ERR_PTR(-EAGAIN);
}
+ return alg;
+}
+
+struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
+ u32 mask)
+{
+ struct crypto_alg *alg;
+ struct crypto_tfm *tfm;
+
+ alg = crypto_spawn_alg(spawn);
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
tfm = ERR_PTR(-EINVAL);
if (unlikely((alg->cra_flags ^ type) & mask))
goto out_put_alg;
@@ -532,6 +613,27 @@ out_put_alg:
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
+void *crypto_spawn_tfm2(struct crypto_spawn *spawn)
+{
+ struct crypto_alg *alg;
+ struct crypto_tfm *tfm;
+
+ alg = crypto_spawn_alg(spawn);
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
+ tfm = crypto_create_tfm(alg, spawn->frontend);
+ if (IS_ERR(tfm))
+ goto out_put_alg;
+
+ return tfm;
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return tfm;
+}
+EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+
int crypto_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&crypto_chain, nb);
@@ -595,7 +697,9 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
-struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask)
{
const char *name;
int err;
@@ -605,9 +709,9 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
if (IS_ERR(name))
return ERR_PTR(err);
- return crypto_alg_mod_lookup(name, type, mask);
+ return crypto_find_alg(name, frontend, type, mask);
}
-EXPORT_SYMBOL_GPL(crypto_attr_alg);
+EXPORT_SYMBOL_GPL(crypto_attr_alg2);
int crypto_attr_u32(struct rtattr *rta, u32 *num)
{
@@ -627,17 +731,20 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num)
}
EXPORT_SYMBOL_GPL(crypto_attr_u32);
-struct crypto_instance *crypto_alloc_instance(const char *name,
- struct crypto_alg *alg)
+void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
+ unsigned int head)
{
struct crypto_instance *inst;
- struct crypto_spawn *spawn;
+ char *p;
int err;
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
+ p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
+ GFP_KERNEL);
+ if (!p)
return ERR_PTR(-ENOMEM);
+ inst = (void *)(p + head);
+
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
@@ -647,6 +754,25 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
+ return p;
+
+err_free_inst:
+ kfree(p);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_instance2);
+
+struct crypto_instance *crypto_alloc_instance(const char *name,
+ struct crypto_alg *alg)
+{
+ struct crypto_instance *inst;
+ struct crypto_spawn *spawn;
+ int err;
+
+ inst = crypto_alloc_instance2(name, alg, 0);
+ if (IS_ERR(inst))
+ goto out;
+
spawn = crypto_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
@@ -658,7 +784,10 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
err_free_inst:
kfree(inst);
- return ERR_PTR(err);
+ inst = ERR_PTR(err);
+
+out:
+ return inst;
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance);
@@ -692,7 +821,7 @@ out:
}
EXPORT_SYMBOL_GPL(crypto_enqueue_request);
-struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
+void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
{
struct list_head *request;
@@ -707,7 +836,14 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
request = queue->list.next;
list_del(request);
- return list_entry(request, struct crypto_async_request, list);
+ return (char *)list_entry(request, struct crypto_async_request, list) -
+ offset;
+}
+EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
+
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
+{
+ return __crypto_dequeue_request(queue, 0);
}
EXPORT_SYMBOL_GPL(crypto_dequeue_request);
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 9908dd830c26..791d194958fa 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -19,6 +19,7 @@
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
@@ -68,6 +69,11 @@ static int cryptomgr_probe(void *data)
goto err;
do {
+ if (tmpl->create) {
+ err = tmpl->create(tmpl, param->tb);
+ continue;
+ }
+
inst = tmpl->alloc(param->tb);
if (IS_ERR(inst))
err = PTR_ERR(inst);
@@ -206,6 +212,10 @@ static int cryptomgr_test(void *data)
u32 type = param->type;
int err = 0;
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ goto skiptest;
+#endif
+
if (type & CRYPTO_ALG_TESTED)
goto skiptest;
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index d80ed4c1e009..2bc332142849 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -85,7 +85,7 @@ static void xor_vectors(unsigned char *in1, unsigned char *in2,
* Returns DEFAULT_BLK_SZ bytes of random data per call
* returns 0 if generation succeded, <0 if something went wrong
*/
-static int _get_more_prng_bytes(struct prng_context *ctx)
+static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
{
int i;
unsigned char tmp[DEFAULT_BLK_SZ];
@@ -132,7 +132,7 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
*/
if (!memcmp(ctx->rand_data, ctx->last_rand_data,
DEFAULT_BLK_SZ)) {
- if (fips_enabled) {
+ if (cont_test) {
panic("cprng %p Failed repetition check!\n",
ctx);
}
@@ -185,18 +185,15 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
}
/* Our exported functions */
-static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
+static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
+ int do_cont_test)
{
- unsigned long flags;
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
- if (nbytes < 0)
- return -EINVAL;
-
- spin_lock_irqsave(&ctx->prng_lock, flags);
+ spin_lock_bh(&ctx->prng_lock);
err = -EINVAL;
if (ctx->flags & PRNG_NEED_RESET)
@@ -221,7 +218,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
remainder:
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx) < 0) {
+ if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
memset(buf, 0, nbytes);
err = -EINVAL;
goto done;
@@ -248,7 +245,7 @@ empty_rbuf:
*/
for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
- if (_get_more_prng_bytes(ctx) < 0) {
+ if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
memset(buf, 0, nbytes);
err = -EINVAL;
goto done;
@@ -268,7 +265,7 @@ empty_rbuf:
goto remainder;
done:
- spin_unlock_irqrestore(&ctx->prng_lock, flags);
+ spin_unlock_bh(&ctx->prng_lock);
dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
err, ctx);
return err;
@@ -284,10 +281,9 @@ static int reset_prng_context(struct prng_context *ctx,
unsigned char *V, unsigned char *DT)
{
int ret;
- int rc = -EINVAL;
unsigned char *prng_key;
- spin_lock(&ctx->prng_lock);
+ spin_lock_bh(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
@@ -308,34 +304,20 @@ static int reset_prng_context(struct prng_context *ctx,
memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
- if (ctx->tfm)
- crypto_free_cipher(ctx->tfm);
-
- ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tfm)) {
- dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
- ctx);
- ctx->tfm = NULL;
- goto out;
- }
-
ctx->rand_data_valid = DEFAULT_BLK_SZ;
ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
if (ret) {
dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
crypto_cipher_get_flags(ctx->tfm));
- crypto_free_cipher(ctx->tfm);
goto out;
}
- rc = 0;
+ ret = 0;
ctx->flags &= ~PRNG_NEED_RESET;
out:
- spin_unlock(&ctx->prng_lock);
-
- return rc;
-
+ spin_unlock_bh(&ctx->prng_lock);
+ return ret;
}
static int cprng_init(struct crypto_tfm *tfm)
@@ -343,6 +325,12 @@ static int cprng_init(struct crypto_tfm *tfm)
struct prng_context *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->prng_lock);
+ ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(ctx->tfm)) {
+ dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
+ ctx);
+ return PTR_ERR(ctx->tfm);
+ }
if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
return -EINVAL;
@@ -366,7 +354,7 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
{
struct prng_context *prng = crypto_rng_ctx(tfm);
- return get_prng_bytes(rdata, dlen, prng);
+ return get_prng_bytes(rdata, dlen, prng, 0);
}
/*
@@ -414,26 +402,79 @@ static struct crypto_alg rng_alg = {
}
};
+#ifdef CONFIG_CRYPTO_FIPS
+static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen)
+{
+ struct prng_context *prng = crypto_rng_ctx(tfm);
+
+ return get_prng_bytes(rdata, dlen, prng, 1);
+}
+
+static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+{
+ u8 rdata[DEFAULT_BLK_SZ];
+ int rc;
+
+ struct prng_context *prng = crypto_rng_ctx(tfm);
+
+ rc = cprng_reset(tfm, seed, slen);
+
+ if (!rc)
+ goto out;
+
+ /* this primes our continuity test */
+ rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0);
+ prng->rand_data_valid = DEFAULT_BLK_SZ;
+
+out:
+ return rc;
+}
+
+static struct crypto_alg fips_rng_alg = {
+ .cra_name = "fips(ansi_cprng)",
+ .cra_driver_name = "fips_ansi_cprng",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_ctxsize = sizeof(struct prng_context),
+ .cra_type = &crypto_rng_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(rng_alg.cra_list),
+ .cra_init = cprng_init,
+ .cra_exit = cprng_exit,
+ .cra_u = {
+ .rng = {
+ .rng_make_random = fips_cprng_get_random,
+ .rng_reset = fips_cprng_reset,
+ .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
+ }
+ }
+};
+#endif
/* Module initalization */
static int __init prng_mod_init(void)
{
- int ret = 0;
+ int rc = 0;
- if (fips_enabled)
- rng_alg.cra_priority += 200;
+ rc = crypto_register_alg(&rng_alg);
+#ifdef CONFIG_CRYPTO_FIPS
+ if (rc)
+ goto out;
- ret = crypto_register_alg(&rng_alg);
+ rc = crypto_register_alg(&fips_rng_alg);
- if (ret)
- goto out;
out:
- return 0;
+#endif
+ return rc;
}
static void __exit prng_mod_fini(void)
{
crypto_unregister_alg(&rng_alg);
+#ifdef CONFIG_CRYPTO_FIPS
+ crypto_unregister_alg(&fips_rng_alg);
+#endif
return;
}
diff --git a/crypto/anubis.c b/crypto/anubis.c
index e42c3a8ba4aa..77530d571c96 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -469,14 +469,13 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
- switch (key_len)
- {
+ switch (key_len) {
case 16: case 20: case 24: case 28:
case 32: case 36: case 40:
break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return - EINVAL;
+ return -EINVAL;
}
ctx->key_len = key_len * 8;
@@ -530,23 +529,24 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
/*
* compute kappa^{r+1} from kappa^r:
*/
- if (r == R) {
+ if (r == R)
break;
- }
for (i = 0; i < N; i++) {
int j = i;
inter[i] = T0[(kappa[j--] >> 24) ];
- if (j < 0) j = N - 1;
+ if (j < 0)
+ j = N - 1;
inter[i] ^= T1[(kappa[j--] >> 16) & 0xff];
- if (j < 0) j = N - 1;
+ if (j < 0)
+ j = N - 1;
inter[i] ^= T2[(kappa[j--] >> 8) & 0xff];
- if (j < 0) j = N - 1;
+ if (j < 0)
+ j = N - 1;
inter[i] ^= T3[(kappa[j ] ) & 0xff];
}
kappa[0] = inter[0] ^ rc[r];
- for (i = 1; i < N; i++) {
+ for (i = 1; i < N; i++)
kappa[i] = inter[i];
- }
}
/*
@@ -690,7 +690,7 @@ static struct crypto_alg anubis_alg = {
static int __init anubis_mod_init(void)
{
int ret = 0;
-
+
ret = crypto_register_alg(&anubis_alg);
return ret;
}
diff --git a/crypto/api.c b/crypto/api.c
index d5944f92b416..033a7147e5eb 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -10,7 +10,7 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
@@ -285,21 +285,14 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
return crypto_init_cipher_ops(tfm);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
- CRYPTO_ALG_TYPE_HASH_MASK)
- return crypto_init_digest_ops_async(tfm);
- else
- return crypto_init_digest_ops(tfm);
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm);
-
+
default:
break;
}
-
+
BUG();
return -EINVAL;
}
@@ -318,18 +311,13 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
case CRYPTO_ALG_TYPE_CIPHER:
crypto_exit_cipher_ops(tfm);
break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- crypto_exit_digest_ops(tfm);
- break;
-
+
case CRYPTO_ALG_TYPE_COMPRESS:
crypto_exit_compress_ops(tfm);
break;
-
+
default:
BUG();
-
}
}
@@ -349,11 +337,7 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
case CRYPTO_ALG_TYPE_CIPHER:
len += crypto_cipher_ctxsize(alg);
break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- len += crypto_digest_ctxsize(alg);
- break;
-
+
case CRYPTO_ALG_TYPE_COMPRESS:
len += crypto_compress_ctxsize(alg);
break;
@@ -472,7 +456,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
int err = -ENOMEM;
tfmsize = frontend->tfmsize;
- total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend);
+ total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
mem = kzalloc(total, GFP_KERNEL);
if (mem == NULL)
@@ -481,7 +465,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
- err = frontend->init_tfm(tfm, frontend);
+ err = frontend->init_tfm(tfm);
if (err)
goto out_free_tfm;
@@ -503,6 +487,27 @@ out:
}
EXPORT_SYMBOL_GPL(crypto_create_tfm);
+struct crypto_alg *crypto_find_alg(const char *alg_name,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask)
+{
+ struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
+ crypto_alg_mod_lookup;
+
+ if (frontend) {
+ type &= frontend->maskclear;
+ mask &= frontend->maskclear;
+ type |= frontend->type;
+ mask |= frontend->maskset;
+
+ if (frontend->lookup)
+ lookup = frontend->lookup;
+ }
+
+ return lookup(alg_name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_find_alg);
+
/*
* crypto_alloc_tfm - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
@@ -526,21 +531,13 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm);
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask)
{
- struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
void *tfm;
int err;
- type &= frontend->maskclear;
- mask &= frontend->maskclear;
- type |= frontend->type;
- mask |= frontend->maskset;
-
- lookup = frontend->lookup ?: crypto_alg_mod_lookup;
-
for (;;) {
struct crypto_alg *alg;
- alg = lookup(alg_name, type, mask);
+ alg = crypto_find_alg(alg_name, frontend, type, mask);
if (IS_ERR(alg)) {
err = PTR_ERR(alg);
goto err;
@@ -595,12 +592,12 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
{
int ret = 0;
struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
-
+
if (!IS_ERR(alg)) {
crypto_mod_put(alg);
ret = 1;
}
-
+
return ret;
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index d8fb39145986..1b11abbb5c91 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -14,3 +14,18 @@ config ASYNC_MEMSET
tristate
select ASYNC_CORE
+config ASYNC_PQ
+ tristate
+ select ASYNC_CORE
+
+config ASYNC_RAID6_RECOV
+ tristate
+ select ASYNC_CORE
+ select ASYNC_PQ
+ select ASYNC_XOR
+
+config ASYNC_TX_DISABLE_PQ_VAL_DMA
+ bool
+
+config ASYNC_TX_DISABLE_XOR_VAL_DMA
+ bool
diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile
index 27baa7d52fbc..d1e0e6f72bc1 100644
--- a/crypto/async_tx/Makefile
+++ b/crypto/async_tx/Makefile
@@ -2,3 +2,6 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o
obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o
obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o
obj-$(CONFIG_ASYNC_XOR) += async_xor.o
+obj-$(CONFIG_ASYNC_PQ) += async_pq.o
+obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o
+obj-$(CONFIG_ASYNC_RAID6_TEST) += raid6test.o
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index ddccfb01c416..518c22bd9562 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -33,28 +33,31 @@
* async_memcpy - attempt to copy memory with a dma engine.
* @dest: destination page
* @src: src page
- * @offset: offset in pages to start transaction
+ * @dest_offset: offset into 'dest' to start transaction
+ * @src_offset: offset into 'src' to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
- * @depend_tx: memcpy depends on the result of this transaction
- * @cb_fn: function to call when the memcpy completes
- * @cb_param: parameter to pass to the callback routine
+ * @submit: submission / completion modifiers
+ *
+ * honored flags: ASYNC_TX_ACK
*/
struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
- unsigned int src_offset, size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+ unsigned int src_offset, size_t len,
+ struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY,
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
&dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
- if (device) {
+ if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
dma_addr_t dma_dest, dma_src;
- unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ unsigned long dma_prep_flags = 0;
+ if (submit->cb_fn)
+ dma_prep_flags |= DMA_PREP_INTERRUPT;
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_prep_flags |= DMA_PREP_FENCE;
dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
DMA_FROM_DEVICE);
@@ -67,42 +70,29 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
- async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+ async_tx_submit(chan, tx, submit);
} else {
void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
/* wait for any prerequisite operations */
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
src_buf = kmap_atomic(src, KM_USER1) + src_offset;
memcpy(dest_buf, src_buf, len);
- kunmap_atomic(dest_buf, KM_USER0);
kunmap_atomic(src_buf, KM_USER1);
+ kunmap_atomic(dest_buf, KM_USER0);
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_memcpy);
-static int __init async_memcpy_init(void)
-{
- return 0;
-}
-
-static void __exit async_memcpy_exit(void)
-{
- do { } while (0);
-}
-
-module_init(async_memcpy_init);
-module_exit(async_memcpy_exit);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memcpy api");
MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 5b5eb99bb244..58e4a8752aee 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -35,26 +35,26 @@
* @val: fill value
* @offset: offset in pages to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
- * @depend_tx: memset depends on the result of this transaction
- * @cb_fn: function to call when the memcpy completes
- * @cb_param: parameter to pass to the callback routine
+ *
+ * honored flags: ASYNC_TX_ACK
*/
struct dma_async_tx_descriptor *
-async_memset(struct page *dest, int val, unsigned int offset,
- size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+async_memset(struct page *dest, int val, unsigned int offset, size_t len,
+ struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET,
&dest, 1, NULL, 0, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
- if (device) {
+ if (device && is_dma_fill_aligned(device, offset, 0, len)) {
dma_addr_t dma_dest;
- unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ unsigned long dma_prep_flags = 0;
+ if (submit->cb_fn)
+ dma_prep_flags |= DMA_PREP_INTERRUPT;
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_prep_flags |= DMA_PREP_FENCE;
dma_dest = dma_map_page(device->dev, dest, offset, len,
DMA_FROM_DEVICE);
@@ -64,38 +64,25 @@ async_memset(struct page *dest, int val, unsigned int offset,
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
- async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+ async_tx_submit(chan, tx, submit);
} else { /* run the memset synchronously */
void *dest_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
- dest_buf = (void *) (((char *) page_address(dest)) + offset);
+ dest_buf = page_address(dest) + offset;
/* wait for any prerequisite operations */
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
memset(dest_buf, val, len);
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_memset);
-static int __init async_memset_init(void)
-{
- return 0;
-}
-
-static void __exit async_memset_exit(void)
-{
- do { } while (0);
-}
-
-module_init(async_memset_init);
-module_exit(async_memset_exit);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memset api");
MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
new file mode 100644
index 000000000000..fdd8257d35d9
--- /dev/null
+++ b/crypto/async_tx/async_pq.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
+ * Copyright(c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/raid/pq.h>
+#include <linux/async_tx.h>
+#include <linux/gfp.h>
+
+/**
+ * pq_scribble_page - space to hold throwaway P or Q buffer for
+ * synchronous gen_syndrome
+ */
+static struct page *pq_scribble_page;
+
+/* the struct page *blocks[] parameter passed to async_gen_syndrome()
+ * and async_syndrome_val() contains the 'P' destination address at
+ * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
+ *
+ * note: these are macros as they are used as lvalues
+ */
+#define P(b, d) (b[d-2])
+#define Q(b, d) (b[d-1])
+
+/**
+ * do_async_gen_syndrome - asynchronously calculate P and/or Q
+ */
+static __async_inline struct dma_async_tx_descriptor *
+do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
+ const unsigned char *scfs, unsigned int offset, int disks,
+ size_t len, dma_addr_t *dma_src,
+ struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct dma_device *dma = chan->device;
+ enum dma_ctrl_flags dma_flags = 0;
+ enum async_tx_flags flags_orig = submit->flags;
+ dma_async_tx_callback cb_fn_orig = submit->cb_fn;
+ dma_async_tx_callback cb_param_orig = submit->cb_param;
+ int src_cnt = disks - 2;
+ unsigned char coefs[src_cnt];
+ unsigned short pq_src_cnt;
+ dma_addr_t dma_dest[2];
+ int src_off = 0;
+ int idx;
+ int i;
+
+ /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
+ if (P(blocks, disks))
+ dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
+ len, DMA_BIDIRECTIONAL);
+ else
+ dma_flags |= DMA_PREP_PQ_DISABLE_P;
+ if (Q(blocks, disks))
+ dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
+ len, DMA_BIDIRECTIONAL);
+ else
+ dma_flags |= DMA_PREP_PQ_DISABLE_Q;
+
+ /* convert source addresses being careful to collapse 'empty'
+ * sources and update the coefficients accordingly
+ */
+ for (i = 0, idx = 0; i < src_cnt; i++) {
+ if (blocks[i] == NULL)
+ continue;
+ dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
+ DMA_TO_DEVICE);
+ coefs[idx] = scfs[i];
+ idx++;
+ }
+ src_cnt = idx;
+
+ while (src_cnt > 0) {
+ submit->flags = flags_orig;
+ pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
+ /* if we are submitting additional pqs, leave the chain open,
+ * clear the callback parameters, and leave the destination
+ * buffers mapped
+ */
+ if (src_cnt > pq_src_cnt) {
+ submit->flags &= ~ASYNC_TX_ACK;
+ submit->flags |= ASYNC_TX_FENCE;
+ dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
+ submit->cb_fn = NULL;
+ submit->cb_param = NULL;
+ } else {
+ dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
+ submit->cb_fn = cb_fn_orig;
+ submit->cb_param = cb_param_orig;
+ if (cb_fn_orig)
+ dma_flags |= DMA_PREP_INTERRUPT;
+ }
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
+
+ /* Since we have clobbered the src_list we are committed
+ * to doing this asynchronously. Drivers force forward
+ * progress in case they can not provide a descriptor
+ */
+ for (;;) {
+ tx = dma->device_prep_dma_pq(chan, dma_dest,
+ &dma_src[src_off],
+ pq_src_cnt,
+ &coefs[src_off], len,
+ dma_flags);
+ if (likely(tx))
+ break;
+ async_tx_quiesce(&submit->depend_tx);
+ dma_async_issue_pending(chan);
+ }
+
+ async_tx_submit(chan, tx, submit);
+ submit->depend_tx = tx;
+
+ /* drop completed sources */
+ src_cnt -= pq_src_cnt;
+ src_off += pq_src_cnt;
+
+ dma_flags |= DMA_PREP_CONTINUE;
+ }
+
+ return tx;
+}
+
+/**
+ * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
+ */
+static void
+do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
+ size_t len, struct async_submit_ctl *submit)
+{
+ void **srcs;
+ int i;
+
+ if (submit->scribble)
+ srcs = submit->scribble;
+ else
+ srcs = (void **) blocks;
+
+ for (i = 0; i < disks; i++) {
+ if (blocks[i] == NULL) {
+ BUG_ON(i > disks - 3); /* P or Q can't be zero */
+ srcs[i] = (void*)raid6_empty_zero_page;
+ } else
+ srcs[i] = page_address(blocks[i]) + offset;
+ }
+ raid6_call.gen_syndrome(disks, len, srcs);
+ async_tx_sync_epilog(submit);
+}
+
+/**
+ * async_gen_syndrome - asynchronously calculate a raid6 syndrome
+ * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
+ * @offset: common offset into each block (src and dest) to start transaction
+ * @disks: number of blocks (including missing P or Q, see below)
+ * @len: length of operation in bytes
+ * @submit: submission/completion modifiers
+ *
+ * General note: This routine assumes a field of GF(2^8) with a
+ * primitive polynomial of 0x11d and a generator of {02}.
+ *
+ * 'disks' note: callers can optionally omit either P or Q (but not
+ * both) from the calculation by setting blocks[disks-2] or
+ * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
+ * PAGE_SIZE as a temporary buffer of this size is used in the
+ * synchronous path. 'disks' always accounts for both destination
+ * buffers. If any source buffers (blocks[i] where i < disks - 2) are
+ * set to NULL those buffers will be replaced with the raid6_zero_page
+ * in the synchronous path and omitted in the hardware-asynchronous
+ * path.
+ *
+ * 'blocks' note: if submit->scribble is NULL then the contents of
+ * 'blocks' may be overwritten to perform address conversions
+ * (dma_map_page() or page_address()).
+ */
+struct dma_async_tx_descriptor *
+async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
+ size_t len, struct async_submit_ctl *submit)
+{
+ int src_cnt = disks - 2;
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
+ &P(blocks, disks), 2,
+ blocks, src_cnt, len);
+ struct dma_device *device = chan ? chan->device : NULL;
+ dma_addr_t *dma_src = NULL;
+
+ BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
+
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) blocks;
+
+ if (dma_src && device &&
+ (src_cnt <= dma_maxpq(device, 0) ||
+ dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
+ is_dma_pq_aligned(device, offset, 0, len)) {
+ /* run the p+q asynchronously */
+ pr_debug("%s: (async) disks: %d len: %zu\n",
+ __func__, disks, len);
+ return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
+ disks, len, dma_src, submit);
+ }
+
+ /* run the pq synchronously */
+ pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
+
+ /* wait for any prerequisite operations */
+ async_tx_quiesce(&submit->depend_tx);
+
+ if (!P(blocks, disks)) {
+ P(blocks, disks) = pq_scribble_page;
+ BUG_ON(len + offset > PAGE_SIZE);
+ }
+ if (!Q(blocks, disks)) {
+ Q(blocks, disks) = pq_scribble_page;
+ BUG_ON(len + offset > PAGE_SIZE);
+ }
+ do_sync_gen_syndrome(blocks, offset, disks, len, submit);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(async_gen_syndrome);
+
+static inline struct dma_chan *
+pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
+{
+ #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+ return NULL;
+ #endif
+ return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
+ disks, len);
+}
+
+/**
+ * async_syndrome_val - asynchronously validate a raid6 syndrome
+ * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
+ * @offset: common offset into each block (src and dest) to start transaction
+ * @disks: number of blocks (including missing P or Q, see below)
+ * @len: length of operation in bytes
+ * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
+ * @spare: temporary result buffer for the synchronous case
+ * @submit: submission / completion modifiers
+ *
+ * The same notes from async_gen_syndrome apply to the 'blocks',
+ * and 'disks' parameters of this routine. The synchronous path
+ * requires a temporary result buffer and submit->scribble to be
+ * specified.
+ */
+struct dma_async_tx_descriptor *
+async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
+ size_t len, enum sum_check_flags *pqres, struct page *spare,
+ struct async_submit_ctl *submit)
+{
+ struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
+ struct dma_device *device = chan ? chan->device : NULL;
+ struct dma_async_tx_descriptor *tx;
+ unsigned char coefs[disks-2];
+ enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
+ dma_addr_t *dma_src = NULL;
+ int src_cnt = 0;
+
+ BUG_ON(disks < 4);
+
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) blocks;
+
+ if (dma_src && device && disks <= dma_maxpq(device, 0) &&
+ is_dma_pq_aligned(device, offset, 0, len)) {
+ struct device *dev = device->dev;
+ dma_addr_t *pq = &dma_src[disks-2];
+ int i;
+
+ pr_debug("%s: (async) disks: %d len: %zu\n",
+ __func__, disks, len);
+ if (!P(blocks, disks))
+ dma_flags |= DMA_PREP_PQ_DISABLE_P;
+ else
+ pq[0] = dma_map_page(dev, P(blocks, disks),
+ offset, len,
+ DMA_TO_DEVICE);
+ if (!Q(blocks, disks))
+ dma_flags |= DMA_PREP_PQ_DISABLE_Q;
+ else
+ pq[1] = dma_map_page(dev, Q(blocks, disks),
+ offset, len,
+ DMA_TO_DEVICE);
+
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
+ for (i = 0; i < disks-2; i++)
+ if (likely(blocks[i])) {
+ dma_src[src_cnt] = dma_map_page(dev, blocks[i],
+ offset, len,
+ DMA_TO_DEVICE);
+ coefs[src_cnt] = raid6_gfexp[i];
+ src_cnt++;
+ }
+
+ for (;;) {
+ tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
+ src_cnt,
+ coefs,
+ len, pqres,
+ dma_flags);
+ if (likely(tx))
+ break;
+ async_tx_quiesce(&submit->depend_tx);
+ dma_async_issue_pending(chan);
+ }
+ async_tx_submit(chan, tx, submit);
+
+ return tx;
+ } else {
+ struct page *p_src = P(blocks, disks);
+ struct page *q_src = Q(blocks, disks);
+ enum async_tx_flags flags_orig = submit->flags;
+ dma_async_tx_callback cb_fn_orig = submit->cb_fn;
+ void *scribble = submit->scribble;
+ void *cb_param_orig = submit->cb_param;
+ void *p, *q, *s;
+
+ pr_debug("%s: (sync) disks: %d len: %zu\n",
+ __func__, disks, len);
+
+ /* caller must provide a temporary result buffer and
+ * allow the input parameters to be preserved
+ */
+ BUG_ON(!spare || !scribble);
+
+ /* wait for any prerequisite operations */
+ async_tx_quiesce(&submit->depend_tx);
+
+ /* recompute p and/or q into the temporary buffer and then
+ * check to see the result matches the current value
+ */
+ tx = NULL;
+ *pqres = 0;
+ if (p_src) {
+ init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
+ NULL, NULL, scribble);
+ tx = async_xor(spare, blocks, offset, disks-2, len, submit);
+ async_tx_quiesce(&tx);
+ p = page_address(p_src) + offset;
+ s = page_address(spare) + offset;
+ *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
+ }
+
+ if (q_src) {
+ P(blocks, disks) = NULL;
+ Q(blocks, disks) = spare;
+ init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
+ tx = async_gen_syndrome(blocks, offset, disks, len, submit);
+ async_tx_quiesce(&tx);
+ q = page_address(q_src) + offset;
+ s = page_address(spare) + offset;
+ *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
+ }
+
+ /* restore P, Q and submit */
+ P(blocks, disks) = p_src;
+ Q(blocks, disks) = q_src;
+
+ submit->cb_fn = cb_fn_orig;
+ submit->cb_param = cb_param_orig;
+ submit->flags = flags_orig;
+ async_tx_sync_epilog(submit);
+
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(async_syndrome_val);
+
+static int __init async_pq_init(void)
+{
+ pq_scribble_page = alloc_page(GFP_KERNEL);
+
+ if (pq_scribble_page)
+ return 0;
+
+ pr_err("%s: failed to allocate required spare page\n", __func__);
+
+ return -ENOMEM;
+}
+
+static void __exit async_pq_exit(void)
+{
+ put_page(pq_scribble_page);
+}
+
+module_init(async_pq_init);
+module_exit(async_pq_exit);
+
+MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
+MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
new file mode 100644
index 000000000000..ce038d861eb9
--- /dev/null
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -0,0 +1,505 @@
+/*
+ * Asynchronous RAID-6 recovery calculations ASYNC_TX API.
+ * Copyright(c) 2009 Intel Corporation
+ *
+ * based on raid6recov.c:
+ * Copyright 2002 H. Peter Anvin
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/raid/pq.h>
+#include <linux/async_tx.h>
+
+static struct dma_async_tx_descriptor *
+async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
+ size_t len, struct async_submit_ctl *submit)
+{
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
+ &dest, 1, srcs, 2, len);
+ struct dma_device *dma = chan ? chan->device : NULL;
+ const u8 *amul, *bmul;
+ u8 ax, bx;
+ u8 *a, *b, *c;
+
+ if (dma) {
+ dma_addr_t dma_dest[2];
+ dma_addr_t dma_src[2];
+ struct device *dev = dma->dev;
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
+
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
+ dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
+ dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
+ tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
+ len, dma_flags);
+ if (tx) {
+ async_tx_submit(chan, tx, submit);
+ return tx;
+ }
+
+ /* could not get a descriptor, unmap and fall through to
+ * the synchronous path
+ */
+ dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
+ dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
+ }
+
+ /* run the operation synchronously */
+ async_tx_quiesce(&submit->depend_tx);
+ amul = raid6_gfmul[coef[0]];
+ bmul = raid6_gfmul[coef[1]];
+ a = page_address(srcs[0]);
+ b = page_address(srcs[1]);
+ c = page_address(dest);
+
+ while (len--) {
+ ax = amul[*a++];
+ bx = bmul[*b++];
+ *c++ = ax ^ bx;
+ }
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
+ struct async_submit_ctl *submit)
+{
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
+ &dest, 1, &src, 1, len);
+ struct dma_device *dma = chan ? chan->device : NULL;
+ const u8 *qmul; /* Q multiplier table */
+ u8 *d, *s;
+
+ if (dma) {
+ dma_addr_t dma_dest[2];
+ dma_addr_t dma_src[1];
+ struct device *dev = dma->dev;
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
+
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
+ dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
+ tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
+ len, dma_flags);
+ if (tx) {
+ async_tx_submit(chan, tx, submit);
+ return tx;
+ }
+
+ /* could not get a descriptor, unmap and fall through to
+ * the synchronous path
+ */
+ dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
+ }
+
+ /* no channel available, or failed to allocate a descriptor, so
+ * perform the operation synchronously
+ */
+ async_tx_quiesce(&submit->depend_tx);
+ qmul = raid6_gfmul[coef];
+ d = page_address(dest);
+ s = page_address(src);
+
+ while (len--)
+ *d++ = qmul[*s++];
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+__2data_recov_4(int disks, size_t bytes, int faila, int failb,
+ struct page **blocks, struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *p, *q, *a, *b;
+ struct page *srcs[2];
+ unsigned char coef[2];
+ enum async_tx_flags flags = submit->flags;
+ dma_async_tx_callback cb_fn = submit->cb_fn;
+ void *cb_param = submit->cb_param;
+ void *scribble = submit->scribble;
+
+ p = blocks[disks-2];
+ q = blocks[disks-1];
+
+ a = blocks[faila];
+ b = blocks[failb];
+
+ /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */
+ /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
+ srcs[0] = p;
+ srcs[1] = q;
+ coef[0] = raid6_gfexi[failb-faila];
+ coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_sum_product(b, srcs, coef, bytes, submit);
+
+ /* Dy = P+Pxy+Dx */
+ srcs[0] = p;
+ srcs[1] = b;
+ init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
+ cb_param, scribble);
+ tx = async_xor(a, srcs, 0, 2, bytes, submit);
+
+ return tx;
+
+}
+
+static struct dma_async_tx_descriptor *
+__2data_recov_5(int disks, size_t bytes, int faila, int failb,
+ struct page **blocks, struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *p, *q, *g, *dp, *dq;
+ struct page *srcs[2];
+ unsigned char coef[2];
+ enum async_tx_flags flags = submit->flags;
+ dma_async_tx_callback cb_fn = submit->cb_fn;
+ void *cb_param = submit->cb_param;
+ void *scribble = submit->scribble;
+ int good_srcs, good, i;
+
+ good_srcs = 0;
+ good = -1;
+ for (i = 0; i < disks-2; i++) {
+ if (blocks[i] == NULL)
+ continue;
+ if (i == faila || i == failb)
+ continue;
+ good = i;
+ good_srcs++;
+ }
+ BUG_ON(good_srcs > 1);
+
+ p = blocks[disks-2];
+ q = blocks[disks-1];
+ g = blocks[good];
+
+ /* Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for delta p and
+ * delta q
+ */
+ dp = blocks[faila];
+ dq = blocks[failb];
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_memcpy(dp, g, 0, 0, bytes, submit);
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
+
+ /* compute P + Pxy */
+ srcs[0] = dp;
+ srcs[1] = p;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dp, srcs, 0, 2, bytes, submit);
+
+ /* compute Q + Qxy */
+ srcs[0] = dq;
+ srcs[1] = q;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dq, srcs, 0, 2, bytes, submit);
+
+ /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
+ srcs[0] = dp;
+ srcs[1] = dq;
+ coef[0] = raid6_gfexi[failb-faila];
+ coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_sum_product(dq, srcs, coef, bytes, submit);
+
+ /* Dy = P+Pxy+Dx */
+ srcs[0] = dp;
+ srcs[1] = dq;
+ init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
+ cb_param, scribble);
+ tx = async_xor(dp, srcs, 0, 2, bytes, submit);
+
+ return tx;
+}
+
+static struct dma_async_tx_descriptor *
+__2data_recov_n(int disks, size_t bytes, int faila, int failb,
+ struct page **blocks, struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *p, *q, *dp, *dq;
+ struct page *srcs[2];
+ unsigned char coef[2];
+ enum async_tx_flags flags = submit->flags;
+ dma_async_tx_callback cb_fn = submit->cb_fn;
+ void *cb_param = submit->cb_param;
+ void *scribble = submit->scribble;
+
+ p = blocks[disks-2];
+ q = blocks[disks-1];
+
+ /* Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = blocks[faila];
+ blocks[faila] = NULL;
+ blocks[disks-2] = dp;
+ dq = blocks[failb];
+ blocks[failb] = NULL;
+ blocks[disks-1] = dq;
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
+
+ /* Restore pointer table */
+ blocks[faila] = dp;
+ blocks[failb] = dq;
+ blocks[disks-2] = p;
+ blocks[disks-1] = q;
+
+ /* compute P + Pxy */
+ srcs[0] = dp;
+ srcs[1] = p;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dp, srcs, 0, 2, bytes, submit);
+
+ /* compute Q + Qxy */
+ srcs[0] = dq;
+ srcs[1] = q;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dq, srcs, 0, 2, bytes, submit);
+
+ /* Dx = A*(P+Pxy) + B*(Q+Qxy) */
+ srcs[0] = dp;
+ srcs[1] = dq;
+ coef[0] = raid6_gfexi[failb-faila];
+ coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_sum_product(dq, srcs, coef, bytes, submit);
+
+ /* Dy = P+Pxy+Dx */
+ srcs[0] = dp;
+ srcs[1] = dq;
+ init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
+ cb_param, scribble);
+ tx = async_xor(dp, srcs, 0, 2, bytes, submit);
+
+ return tx;
+}
+
+/**
+ * async_raid6_2data_recov - asynchronously calculate two missing data blocks
+ * @disks: number of disks in the RAID-6 array
+ * @bytes: block size
+ * @faila: first failed drive index
+ * @failb: second failed drive index
+ * @blocks: array of source pointers where the last two entries are p and q
+ * @submit: submission/completion modifiers
+ */
+struct dma_async_tx_descriptor *
+async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
+ struct page **blocks, struct async_submit_ctl *submit)
+{
+ void *scribble = submit->scribble;
+ int non_zero_srcs, i;
+
+ BUG_ON(faila == failb);
+ if (failb < faila)
+ swap(faila, failb);
+
+ pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
+
+ /* if a dma resource is not available or a scribble buffer is not
+ * available punt to the synchronous path. In the 'dma not
+ * available' case be sure to use the scribble buffer to
+ * preserve the content of 'blocks' as the caller intended.
+ */
+ if (!async_dma_find_channel(DMA_PQ) || !scribble) {
+ void **ptrs = scribble ? scribble : (void **) blocks;
+
+ async_tx_quiesce(&submit->depend_tx);
+ for (i = 0; i < disks; i++)
+ if (blocks[i] == NULL)
+ ptrs[i] = (void *) raid6_empty_zero_page;
+ else
+ ptrs[i] = page_address(blocks[i]);
+
+ raid6_2data_recov(disks, bytes, faila, failb, ptrs);
+
+ async_tx_sync_epilog(submit);
+
+ return NULL;
+ }
+
+ non_zero_srcs = 0;
+ for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
+ if (blocks[i])
+ non_zero_srcs++;
+ switch (non_zero_srcs) {
+ case 0:
+ case 1:
+ /* There must be at least 2 sources - the failed devices. */
+ BUG();
+
+ case 2:
+ /* dma devices do not uniformly understand a zero source pq
+ * operation (in contrast to the synchronous case), so
+ * explicitly handle the special case of a 4 disk array with
+ * both data disks missing.
+ */
+ return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
+ case 3:
+ /* dma devices do not uniformly understand a single
+ * source pq operation (in contrast to the synchronous
+ * case), so explicitly handle the special case of a 5 disk
+ * array with 2 of 3 data disks missing.
+ */
+ return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
+ default:
+ return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
+ }
+}
+EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
+
+/**
+ * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
+ * @disks: number of disks in the RAID-6 array
+ * @bytes: block size
+ * @faila: failed drive index
+ * @blocks: array of source pointers where the last two entries are p and q
+ * @submit: submission/completion modifiers
+ */
+struct dma_async_tx_descriptor *
+async_raid6_datap_recov(int disks, size_t bytes, int faila,
+ struct page **blocks, struct async_submit_ctl *submit)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ struct page *p, *q, *dq;
+ u8 coef;
+ enum async_tx_flags flags = submit->flags;
+ dma_async_tx_callback cb_fn = submit->cb_fn;
+ void *cb_param = submit->cb_param;
+ void *scribble = submit->scribble;
+ int good_srcs, good, i;
+ struct page *srcs[2];
+
+ pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
+
+ /* if a dma resource is not available or a scribble buffer is not
+ * available punt to the synchronous path. In the 'dma not
+ * available' case be sure to use the scribble buffer to
+ * preserve the content of 'blocks' as the caller intended.
+ */
+ if (!async_dma_find_channel(DMA_PQ) || !scribble) {
+ void **ptrs = scribble ? scribble : (void **) blocks;
+
+ async_tx_quiesce(&submit->depend_tx);
+ for (i = 0; i < disks; i++)
+ if (blocks[i] == NULL)
+ ptrs[i] = (void*)raid6_empty_zero_page;
+ else
+ ptrs[i] = page_address(blocks[i]);
+
+ raid6_datap_recov(disks, bytes, faila, ptrs);
+
+ async_tx_sync_epilog(submit);
+
+ return NULL;
+ }
+
+ good_srcs = 0;
+ good = -1;
+ for (i = 0; i < disks-2; i++) {
+ if (i == faila)
+ continue;
+ if (blocks[i]) {
+ good = i;
+ good_srcs++;
+ if (good_srcs > 1)
+ break;
+ }
+ }
+ BUG_ON(good_srcs == 0);
+
+ p = blocks[disks-2];
+ q = blocks[disks-1];
+
+ /* Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = blocks[faila];
+ blocks[faila] = NULL;
+ blocks[disks-1] = dq;
+
+ /* in the 4-disk case we only need to perform a single source
+ * multiplication with the one good data block.
+ */
+ if (good_srcs == 1) {
+ struct page *g = blocks[good];
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
+ scribble);
+ tx = async_memcpy(p, g, 0, 0, bytes, submit);
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
+ scribble);
+ tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
+ } else {
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
+ scribble);
+ tx = async_gen_syndrome(blocks, 0, disks, bytes, submit);
+ }
+
+ /* Restore pointer table */
+ blocks[faila] = dq;
+ blocks[disks-1] = q;
+
+ /* calculate g^{-faila} */
+ coef = raid6_gfinv[raid6_gfexp[faila]];
+
+ srcs[0] = dq;
+ srcs[1] = q;
+ init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ NULL, NULL, scribble);
+ tx = async_xor(dq, srcs, 0, 2, bytes, submit);
+
+ init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
+ tx = async_mult(dq, dq, coef, bytes, submit);
+
+ srcs[0] = p;
+ srcs[1] = dq;
+ init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
+ cb_param, scribble);
+ tx = async_xor(p, srcs, 0, 2, bytes, submit);
+
+ return tx;
+}
+EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
+
+MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
+MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
+MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 06eb6cc09fef..7f2c00a45205 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -42,16 +42,21 @@ static void __exit async_tx_exit(void)
async_dmaengine_put();
}
+module_init(async_tx_init);
+module_exit(async_tx_exit);
+
/**
* __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously
- * @depend_tx: transaction dependency
+ * @submit: transaction dependency and submission modifiers
* @tx_type: transaction type
*/
struct dma_chan *
-__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
- enum dma_transaction_type tx_type)
+__async_tx_find_channel(struct async_submit_ctl *submit,
+ enum dma_transaction_type tx_type)
{
+ struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
+
/* see if we can keep the chain on one channel */
if (depend_tx &&
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
@@ -59,17 +64,6 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
return async_dma_find_channel(tx_type);
}
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
-#else
-static int __init async_tx_init(void)
-{
- printk(KERN_INFO "async_tx: api initialized (sync-only)\n");
- return 0;
-}
-
-static void __exit async_tx_exit(void)
-{
- do { } while (0);
-}
#endif
@@ -83,24 +77,23 @@ static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_async_tx_descriptor *tx)
{
- struct dma_chan *chan;
- struct dma_device *device;
+ struct dma_chan *chan = depend_tx->chan;
+ struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
/* first check to see if we can still append to depend_tx */
- spin_lock_bh(&depend_tx->lock);
- if (depend_tx->parent && depend_tx->chan == tx->chan) {
- tx->parent = depend_tx;
- depend_tx->next = tx;
+ txd_lock(depend_tx);
+ if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
+ txd_chain(depend_tx, tx);
intr_tx = NULL;
}
- spin_unlock_bh(&depend_tx->lock);
+ txd_unlock(depend_tx);
- if (!intr_tx)
+ /* attached dependency, flush the parent channel */
+ if (!intr_tx) {
+ device->device_issue_pending(chan);
return;
-
- chan = depend_tx->chan;
- device = chan->device;
+ }
/* see if we can schedule an interrupt
* otherwise poll for completion
@@ -113,27 +106,26 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
if (intr_tx) {
intr_tx->callback = NULL;
intr_tx->callback_param = NULL;
- tx->parent = intr_tx;
- /* safe to set ->next outside the lock since we know we are
+ /* safe to chain outside the lock since we know we are
* not submitted yet
*/
- intr_tx->next = tx;
+ txd_chain(intr_tx, tx);
/* check if we need to append */
- spin_lock_bh(&depend_tx->lock);
- if (depend_tx->parent) {
- intr_tx->parent = depend_tx;
- depend_tx->next = intr_tx;
+ txd_lock(depend_tx);
+ if (txd_parent(depend_tx)) {
+ txd_chain(depend_tx, intr_tx);
async_tx_ack(intr_tx);
intr_tx = NULL;
}
- spin_unlock_bh(&depend_tx->lock);
+ txd_unlock(depend_tx);
if (intr_tx) {
- intr_tx->parent = NULL;
+ txd_clear_parent(intr_tx);
intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx);
}
+ device->device_issue_pending(chan);
} else {
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for depend_tx\n",
@@ -144,13 +136,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
/**
- * submit_disposition - while holding depend_tx->lock we must avoid submitting
- * new operations to prevent a circular locking dependency with
- * drivers that already hold a channel lock when calling
- * async_tx_run_dependencies.
+ * submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
+ *
+ * while holding depend_tx->lock we must avoid submitting new operations
+ * to prevent a circular locking dependency with drivers that already
+ * hold a channel lock when calling async_tx_run_dependencies.
*/
enum submit_disposition {
ASYNC_TX_SUBMITTED,
@@ -160,11 +153,12 @@ enum submit_disposition {
void
async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
- enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+ struct async_submit_ctl *submit)
{
- tx->callback = cb_fn;
- tx->callback_param = cb_param;
+ struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
+
+ tx->callback = submit->cb_fn;
+ tx->callback_param = submit->cb_param;
if (depend_tx) {
enum submit_disposition s;
@@ -175,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
* 2/ dependencies are 1:1 i.e. two transactions can
* not depend on the same parent
*/
- BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next ||
- tx->parent);
+ BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
+ txd_parent(tx));
/* the lock prevents async_tx_run_dependencies from missing
* the setting of ->next when ->parent != NULL
*/
- spin_lock_bh(&depend_tx->lock);
- if (depend_tx->parent) {
+ txd_lock(depend_tx);
+ if (txd_parent(depend_tx)) {
/* we have a parent so we can not submit directly
* if we are staying on the same channel: append
* else: channel switch
*/
if (depend_tx->chan == chan) {
- tx->parent = depend_tx;
- depend_tx->next = tx;
+ txd_chain(depend_tx, tx);
s = ASYNC_TX_SUBMITTED;
} else
s = ASYNC_TX_CHANNEL_SWITCH;
@@ -202,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
else
s = ASYNC_TX_CHANNEL_SWITCH;
}
- spin_unlock_bh(&depend_tx->lock);
+ txd_unlock(depend_tx);
switch (s) {
case ASYNC_TX_SUBMITTED:
@@ -211,39 +204,38 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
async_tx_channel_switch(depend_tx, tx);
break;
case ASYNC_TX_DIRECT_SUBMIT:
- tx->parent = NULL;
+ txd_clear_parent(tx);
tx->tx_submit(tx);
break;
}
} else {
- tx->parent = NULL;
+ txd_clear_parent(tx);
tx->tx_submit(tx);
}
- if (flags & ASYNC_TX_ACK)
+ if (submit->flags & ASYNC_TX_ACK)
async_tx_ack(tx);
- if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
+ if (depend_tx)
async_tx_ack(depend_tx);
}
EXPORT_SYMBOL_GPL(async_tx_submit);
/**
- * async_trigger_callback - schedules the callback function to be run after
- * any dependent operations have been completed.
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
- * @depend_tx: 'callback' requires the completion of this transaction
- * @cb_fn: function to call after depend_tx completes
- * @cb_param: parameter to pass to the callback routine
+ * async_trigger_callback - schedules the callback function to be run
+ * @submit: submission and completion parameters
+ *
+ * honored flags: ASYNC_TX_ACK
+ *
+ * The callback is run after any dependent operations have completed.
*/
struct dma_async_tx_descriptor *
-async_trigger_callback(enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+async_trigger_callback(struct async_submit_ctl *submit)
{
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *tx;
+ struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
if (depend_tx) {
chan = depend_tx->chan;
@@ -262,14 +254,14 @@ async_trigger_callback(enum async_tx_flags flags,
if (tx) {
pr_debug("%s: (async)\n", __func__);
- async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+ async_tx_submit(chan, tx, submit);
} else {
pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
}
return tx;
@@ -295,9 +287,6 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
}
EXPORT_SYMBOL_GPL(async_tx_quiesce);
-module_init(async_tx_init);
-module_exit(async_tx_exit);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
MODULE_LICENSE("GPL");
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 95fe2c8d6c51..079ae8ca590b 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -33,55 +33,57 @@
/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
- unsigned int offset, int src_cnt, size_t len,
- enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+ unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
+ struct async_submit_ctl *submit)
{
struct dma_device *dma = chan->device;
- dma_addr_t *dma_src = (dma_addr_t *) src_list;
struct dma_async_tx_descriptor *tx = NULL;
int src_off = 0;
int i;
- dma_async_tx_callback _cb_fn;
- void *_cb_param;
- enum async_tx_flags async_flags;
+ dma_async_tx_callback cb_fn_orig = submit->cb_fn;
+ void *cb_param_orig = submit->cb_param;
+ enum async_tx_flags flags_orig = submit->flags;
enum dma_ctrl_flags dma_flags;
- int xor_src_cnt;
+ int xor_src_cnt = 0;
dma_addr_t dma_dest;
/* map the dest bidrectional in case it is re-used as a source */
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
for (i = 0; i < src_cnt; i++) {
/* only map the dest once */
+ if (!src_list[i])
+ continue;
if (unlikely(src_list[i] == dest)) {
- dma_src[i] = dma_dest;
+ dma_src[xor_src_cnt++] = dma_dest;
continue;
}
- dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
- len, DMA_TO_DEVICE);
+ dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
+ len, DMA_TO_DEVICE);
}
+ src_cnt = xor_src_cnt;
while (src_cnt) {
- async_flags = flags;
+ submit->flags = flags_orig;
dma_flags = 0;
- xor_src_cnt = min(src_cnt, dma->max_xor);
+ xor_src_cnt = min(src_cnt, (int)dma->max_xor);
/* if we are submitting additional xors, leave the chain open,
* clear the callback parameters, and leave the destination
* buffer mapped
*/
if (src_cnt > xor_src_cnt) {
- async_flags &= ~ASYNC_TX_ACK;
+ submit->flags &= ~ASYNC_TX_ACK;
+ submit->flags |= ASYNC_TX_FENCE;
dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
- _cb_fn = NULL;
- _cb_param = NULL;
+ submit->cb_fn = NULL;
+ submit->cb_param = NULL;
} else {
- _cb_fn = cb_fn;
- _cb_param = cb_param;
+ submit->cb_fn = cb_fn_orig;
+ submit->cb_param = cb_param_orig;
}
- if (_cb_fn)
+ if (submit->cb_fn)
dma_flags |= DMA_PREP_INTERRUPT;
-
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
/* Since we have clobbered the src_list we are committed
* to doing this asynchronously. Drivers force forward progress
* in case they can not provide a descriptor
@@ -90,7 +92,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
xor_src_cnt, len, dma_flags);
if (unlikely(!tx))
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
/* spin wait for the preceeding transactions to complete */
while (unlikely(!tx)) {
@@ -101,11 +103,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
dma_flags);
}
- async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
- _cb_param);
-
- depend_tx = tx;
- flags |= ASYNC_TX_DEP_ACK;
+ async_tx_submit(chan, tx, submit);
+ submit->depend_tx = tx;
if (src_cnt > xor_src_cnt) {
/* drop completed sources */
@@ -124,23 +123,28 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum async_tx_flags flags,
- dma_async_tx_callback cb_fn, void *cb_param)
+ int src_cnt, size_t len, struct async_submit_ctl *submit)
{
int i;
- int xor_src_cnt;
+ int xor_src_cnt = 0;
int src_off = 0;
void *dest_buf;
- void **srcs = (void **) src_list;
+ void **srcs;
- /* reuse the 'src_list' array to convert to buffer pointers */
- for (i = 0; i < src_cnt; i++)
- srcs[i] = page_address(src_list[i]) + offset;
+ if (submit->scribble)
+ srcs = submit->scribble;
+ else
+ srcs = (void **) src_list;
+ /* convert to buffer pointers */
+ for (i = 0; i < src_cnt; i++)
+ if (src_list[i])
+ srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
+ src_cnt = xor_src_cnt;
/* set destination address */
dest_buf = page_address(dest) + offset;
- if (flags & ASYNC_TX_XOR_ZERO_DST)
+ if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
memset(dest_buf, 0, len);
while (src_cnt > 0) {
@@ -153,61 +157,70 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
src_off += xor_src_cnt;
}
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
}
/**
* async_xor - attempt to xor a set of blocks with a dma engine.
- * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
- * flag must be set to not include dest data in the calculation. The
- * assumption with dma eninges is that they only use the destination
- * buffer as a source when it is explicity specified in the source list.
* @dest: destination page
- * @src_list: array of source pages (if the dest is also a source it must be
- * at index zero). The contents of this array may be overwritten.
- * @offset: offset in pages to start transaction
+ * @src_list: array of source pages
+ * @offset: common src/dst offset to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
- * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
- * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
- * @depend_tx: xor depends on the result of this transaction.
- * @cb_fn: function to call when the xor completes
- * @cb_param: parameter to pass to the callback routine
+ * @submit: submission / completion modifiers
+ *
+ * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
+ *
+ * xor_blocks always uses the dest as a source so the
+ * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
+ * the calculation. The assumption with dma eninges is that they only
+ * use the destination buffer as a source when it is explicity specified
+ * in the source list.
+ *
+ * src_list note: if the dest is also a source it must be at index zero.
+ * The contents of this array will be overwritten if a scribble region
+ * is not specified.
*/
struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+ int src_cnt, size_t len, struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
+ struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
+ dma_addr_t *dma_src = NULL;
+
BUG_ON(src_cnt <= 1);
- if (chan) {
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) src_list;
+
+ if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
/* run the xor asynchronously */
pr_debug("%s (async): len: %zu\n", __func__, len);
return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
- flags, depend_tx, cb_fn, cb_param);
+ dma_src, submit);
} else {
/* run the xor synchronously */
pr_debug("%s (sync): len: %zu\n", __func__, len);
+ WARN_ONCE(chan, "%s: no space for dma address conversion\n",
+ __func__);
/* in the sync case the dest is an implied source
* (assumes the dest is the first source)
*/
- if (flags & ASYNC_TX_XOR_DROP_DST) {
+ if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
src_cnt--;
src_list++;
}
/* wait for any prerequisite operations */
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
- do_sync_xor(dest, src_list, offset, src_cnt, len,
- flags, cb_fn, cb_param);
+ do_sync_xor(dest, src_list, offset, src_cnt, len, submit);
return NULL;
}
@@ -221,105 +234,104 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
memcmp(a, a + 4, len - 4) == 0);
}
+static inline struct dma_chan *
+xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
+ struct page **src_list, int src_cnt, size_t len)
+{
+ #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+ return NULL;
+ #endif
+ return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
+ src_cnt, len);
+}
+
/**
- * async_xor_zero_sum - attempt a xor parity check with a dma engine.
+ * async_xor_val - attempt a xor parity check with a dma engine.
* @dest: destination page used if the xor is performed synchronously
- * @src_list: array of source pages. The dest page must be listed as a source
- * at index zero. The contents of this array may be overwritten.
+ * @src_list: array of source pages
* @offset: offset in pages to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
* @result: 0 if sum == 0 else non-zero
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
- * @depend_tx: xor depends on the result of this transaction.
- * @cb_fn: function to call when the xor completes
- * @cb_param: parameter to pass to the callback routine
+ * @submit: submission / completion modifiers
+ *
+ * honored flags: ASYNC_TX_ACK
+ *
+ * src_list note: if the dest is also a source it must be at index zero.
+ * The contents of this array will be overwritten if a scribble region
+ * is not specified.
*/
struct dma_async_tx_descriptor *
-async_xor_zero_sum(struct page *dest, struct page **src_list,
- unsigned int offset, int src_cnt, size_t len,
- u32 *result, enum async_tx_flags flags,
- struct dma_async_tx_descriptor *depend_tx,
- dma_async_tx_callback cb_fn, void *cb_param)
+async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
+ int src_cnt, size_t len, enum sum_check_flags *result,
+ struct async_submit_ctl *submit)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
- &dest, 1, src_list,
- src_cnt, len);
+ struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
+ dma_addr_t *dma_src = NULL;
BUG_ON(src_cnt <= 1);
- if (device && src_cnt <= device->max_xor) {
- dma_addr_t *dma_src = (dma_addr_t *) src_list;
- unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ if (submit->scribble)
+ dma_src = submit->scribble;
+ else if (sizeof(dma_addr_t) <= sizeof(struct page *))
+ dma_src = (dma_addr_t *) src_list;
+
+ if (dma_src && device && src_cnt <= device->max_xor &&
+ is_dma_xor_aligned(device, offset, 0, len)) {
+ unsigned long dma_prep_flags = 0;
int i;
pr_debug("%s: (async) len: %zu\n", __func__, len);
+ if (submit->cb_fn)
+ dma_prep_flags |= DMA_PREP_INTERRUPT;
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_prep_flags |= DMA_PREP_FENCE;
for (i = 0; i < src_cnt; i++)
dma_src[i] = dma_map_page(device->dev, src_list[i],
offset, len, DMA_TO_DEVICE);
- tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
- len, result,
- dma_prep_flags);
+ tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
+ len, result,
+ dma_prep_flags);
if (unlikely(!tx)) {
- async_tx_quiesce(&depend_tx);
+ async_tx_quiesce(&submit->depend_tx);
while (!tx) {
dma_async_issue_pending(chan);
- tx = device->device_prep_dma_zero_sum(chan,
+ tx = device->device_prep_dma_xor_val(chan,
dma_src, src_cnt, len, result,
dma_prep_flags);
}
}
- async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+ async_tx_submit(chan, tx, submit);
} else {
- unsigned long xor_flags = flags;
+ enum async_tx_flags flags_orig = submit->flags;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
+ WARN_ONCE(device && src_cnt <= device->max_xor,
+ "%s: no space for dma address conversion\n",
+ __func__);
- xor_flags |= ASYNC_TX_XOR_DROP_DST;
- xor_flags &= ~ASYNC_TX_ACK;
+ submit->flags |= ASYNC_TX_XOR_DROP_DST;
+ submit->flags &= ~ASYNC_TX_ACK;
- tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
- depend_tx, NULL, NULL);
+ tx = async_xor(dest, src_list, offset, src_cnt, len, submit);
async_tx_quiesce(&tx);
- *result = page_is_zero(dest, offset, len) ? 0 : 1;
+ *result = !page_is_zero(dest, offset, len) << SUM_CHECK_P;
- async_tx_sync_epilog(cb_fn, cb_param);
+ async_tx_sync_epilog(submit);
+ submit->flags = flags_orig;
}
return tx;
}
-EXPORT_SYMBOL_GPL(async_xor_zero_sum);
-
-static int __init async_xor_init(void)
-{
- #ifdef CONFIG_DMA_ENGINE
- /* To conserve stack space the input src_list (array of page pointers)
- * is reused to hold the array of dma addresses passed to the driver.
- * This conversion is only possible when dma_addr_t is less than the
- * the size of a pointer. HIGHMEM64G is known to violate this
- * assumption.
- */
- BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
- #endif
-
- return 0;
-}
-
-static void __exit async_xor_exit(void)
-{
- do { } while (0);
-}
-
-module_init(async_xor_init);
-module_exit(async_xor_exit);
+EXPORT_SYMBOL_GPL(async_xor_val);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
new file mode 100644
index 000000000000..c1321935ebcc
--- /dev/null
+++ b/crypto/async_tx/raid6test.c
@@ -0,0 +1,248 @@
+/*
+ * asynchronous raid6 recovery self test
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * based on drivers/md/raid6test/test.c:
+ * Copyright 2002-2007 H. Peter Anvin
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/async_tx.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+
+#undef pr
+#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
+
+#define NDISKS 16 /* Including P and Q */
+
+static struct page *dataptrs[NDISKS];
+static addr_conv_t addr_conv[NDISKS];
+static struct page *data[NDISKS+3];
+static struct page *spare;
+static struct page *recovi;
+static struct page *recovj;
+
+static void callback(void *param)
+{
+ struct completion *cmp = param;
+
+ complete(cmp);
+}
+
+static void makedata(int disks)
+{
+ int i, j;
+
+ for (i = 0; i < disks; i++) {
+ for (j = 0; j < PAGE_SIZE/sizeof(u32); j += sizeof(u32)) {
+ u32 *p = page_address(data[i]) + j;
+
+ *p = random32();
+ }
+
+ dataptrs[i] = data[i];
+ }
+}
+
+static char disk_type(int d, int disks)
+{
+ if (d == disks - 2)
+ return 'P';
+ else if (d == disks - 1)
+ return 'Q';
+ else
+ return 'D';
+}
+
+/* Recover two failed blocks. */
+static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs)
+{
+ struct async_submit_ctl submit;
+ struct completion cmp;
+ struct dma_async_tx_descriptor *tx = NULL;
+ enum sum_check_flags result = ~0;
+
+ if (faila > failb)
+ swap(faila, failb);
+
+ if (failb == disks-1) {
+ if (faila == disks-2) {
+ /* P+Q failure. Just rebuild the syndrome. */
+ init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
+ tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
+ } else {
+ struct page *blocks[disks];
+ struct page *dest;
+ int count = 0;
+ int i;
+
+ /* data+Q failure. Reconstruct data from P,
+ * then rebuild syndrome
+ */
+ for (i = disks; i-- ; ) {
+ if (i == faila || i == failb)
+ continue;
+ blocks[count++] = ptrs[i];
+ }
+ dest = ptrs[faila];
+ init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
+ NULL, NULL, addr_conv);
+ tx = async_xor(dest, blocks, 0, count, bytes, &submit);
+
+ init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
+ tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
+ }
+ } else {
+ if (failb == disks-2) {
+ /* data+P failure. */
+ init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
+ tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit);
+ } else {
+ /* data+data failure. */
+ init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
+ tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit);
+ }
+ }
+ init_completion(&cmp);
+ init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv);
+ tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit);
+ async_tx_issue_pending(tx);
+
+ if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0)
+ pr("%s: timeout! (faila: %d failb: %d disks: %d)\n",
+ __func__, faila, failb, disks);
+
+ if (result != 0)
+ pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n",
+ __func__, faila, failb, result);
+}
+
+static int test_disks(int i, int j, int disks)
+{
+ int erra, errb;
+
+ memset(page_address(recovi), 0xf0, PAGE_SIZE);
+ memset(page_address(recovj), 0xba, PAGE_SIZE);
+
+ dataptrs[i] = recovi;
+ dataptrs[j] = recovj;
+
+ raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs);
+
+ erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE);
+ errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE);
+
+ pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n",
+ __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks),
+ (!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB");
+
+ dataptrs[i] = data[i];
+ dataptrs[j] = data[j];
+
+ return erra || errb;
+}
+
+static int test(int disks, int *tests)
+{
+ struct dma_async_tx_descriptor *tx;
+ struct async_submit_ctl submit;
+ struct completion cmp;
+ int err = 0;
+ int i, j;
+
+ recovi = data[disks];
+ recovj = data[disks+1];
+ spare = data[disks+2];
+
+ makedata(disks);
+
+ /* Nuke syndromes */
+ memset(page_address(data[disks-2]), 0xee, PAGE_SIZE);
+ memset(page_address(data[disks-1]), 0xee, PAGE_SIZE);
+
+ /* Generate assumed good syndrome */
+ init_completion(&cmp);
+ init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv);
+ tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit);
+ async_tx_issue_pending(tx);
+
+ if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) {
+ pr("error: initial gen_syndrome(%d) timed out\n", disks);
+ return 1;
+ }
+
+ pr("testing the %d-disk case...\n", disks);
+ for (i = 0; i < disks-1; i++)
+ for (j = i+1; j < disks; j++) {
+ (*tests)++;
+ err += test_disks(i, j, disks);
+ }
+
+ return err;
+}
+
+
+static int raid6_test(void)
+{
+ int err = 0;
+ int tests = 0;
+ int i;
+
+ for (i = 0; i < NDISKS+3; i++) {
+ data[i] = alloc_page(GFP_KERNEL);
+ if (!data[i]) {
+ while (i--)
+ put_page(data[i]);
+ return -ENOMEM;
+ }
+ }
+
+ /* the 4-disk and 5-disk cases are special for the recovery code */
+ if (NDISKS > 4)
+ err += test(4, &tests);
+ if (NDISKS > 5)
+ err += test(5, &tests);
+ /* the 11 and 12 disk cases are special for ioatdma (p-disabled
+ * q-continuation without extended descriptor)
+ */
+ if (NDISKS > 12) {
+ err += test(11, &tests);
+ err += test(12, &tests);
+ }
+ err += test(NDISKS, &tests);
+
+ pr("\n");
+ pr("complete (%d tests, %d failure%s)\n",
+ tests, err, err == 1 ? "" : "s");
+
+ for (i = 0; i < NDISKS+3; i++)
+ put_page(data[i]);
+
+ return 0;
+}
+
+static void raid6_test_exit(void)
+{
+}
+
+/* when compiled-in wait for drivers to load first (assumes dma drivers
+ * are also compliled-in)
+ */
+late_initcall(raid6_test);
+module_exit(raid6_test_exit);
+MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
+MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests");
+MODULE_LICENSE("GPL");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 5793b64c81a8..a5a22cfcd07b 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -23,24 +23,42 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
+
struct authenc_instance_ctx {
- struct crypto_spawn auth;
+ struct crypto_ahash_spawn auth;
struct crypto_skcipher_spawn enc;
};
struct crypto_authenc_ctx {
- spinlock_t auth_lock;
- struct crypto_hash *auth;
+ unsigned int reqoff;
+ struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
};
+struct authenc_request_ctx {
+ unsigned int cryptlen;
+ struct scatterlist *sg;
+ struct scatterlist asg[2];
+ struct scatterlist cipher[2];
+ crypto_completion_t complete;
+ crypto_completion_t update_complete;
+ char tail[];
+};
+
+static void authenc_request_complete(struct aead_request *req, int err)
+{
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
unsigned int authkeylen;
unsigned int enckeylen;
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_hash *auth = ctx->auth;
+ struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
@@ -64,11 +82,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
authkeylen = keylen - enckeylen;
- crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
- crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) &
+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_hash_setkey(auth, key, authkeylen);
- crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) &
+ err = crypto_ahash_setkey(auth, key, authkeylen);
+ crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
if (err)
@@ -103,40 +121,202 @@ static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
sg_mark_end(head);
}
-static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags,
- struct scatterlist *cipher,
- unsigned int cryptlen)
+static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc), 1);
+
+out:
+ authenc_request_complete(req, err);
+}
+
+static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc), 1);
+
+out:
+ aead_request_complete(req, err);
+}
+
+static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
+ int err)
+{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc);
+ cryptlen -= authsize;
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ authenc_request_complete(req, err);
+}
+
+static void authenc_verify_ahash_done(struct crypto_async_request *areq,
+ int err)
{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_hash *auth = ctx->auth;
- struct hash_desc desc = {
- .tfm = auth,
- .flags = aead_request_flags(req) & flags,
- };
- u8 *hash = aead_request_ctx(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
+
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc);
+ cryptlen -= authsize;
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ authenc_request_complete(req, err);
+}
+
+static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
+{
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ u8 *hash = areq_ctx->tail;
int err;
- hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth),
- crypto_hash_alignmask(auth) + 1);
+ hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
+ ahash_request_set_tfm(ahreq, auth);
- spin_lock_bh(&ctx->auth_lock);
- err = crypto_hash_init(&desc);
+ err = crypto_ahash_init(ahreq);
if (err)
- goto auth_unlock;
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->update_complete, req);
- err = crypto_hash_update(&desc, req->assoc, req->assoclen);
+ err = crypto_ahash_update(ahreq);
if (err)
- goto auth_unlock;
+ return ERR_PTR(err);
- err = crypto_hash_update(&desc, cipher, cryptlen);
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
if (err)
- goto auth_unlock;
+ return ERR_PTR(err);
- err = crypto_hash_final(&desc, hash);
-auth_unlock:
- spin_unlock_bh(&ctx->auth_lock);
+ return hash;
+}
+static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
+{
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ u8 *hash = areq_ctx->tail;
+ int err;
+
+ hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_digest(ahreq);
if (err)
return ERR_PTR(err);
@@ -147,11 +327,15 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
unsigned int flags)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *dst = req->dst;
- struct scatterlist cipher[2];
- struct page *dstp;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *asg = areq_ctx->asg;
unsigned int ivsize = crypto_aead_ivsize(authenc);
- unsigned int cryptlen;
+ unsigned int cryptlen = req->cryptlen;
+ authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
+ struct page *dstp;
u8 *vdst;
u8 *hash;
@@ -163,10 +347,25 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
sg_set_buf(cipher, iv, ivsize);
authenc_chain(cipher, dst, vdst == iv + ivsize);
dst = cipher;
+ cryptlen += ivsize;
+ }
+
+ if (sg_is_last(assoc)) {
+ authenc_ahash_fn = crypto_authenc_ahash;
+ sg_init_table(asg, 2);
+ sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
+ authenc_chain(asg, dst, 0);
+ dst = asg;
+ cryptlen += req->assoclen;
}
- cryptlen = req->cryptlen + ivsize;
- hash = crypto_authenc_hash(req, flags, dst, cryptlen);
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->sg = dst;
+
+ areq_ctx->complete = authenc_geniv_ahash_done;
+ areq_ctx->update_complete = authenc_geniv_ahash_update_done;
+
+ hash = authenc_ahash_fn(req, flags);
if (IS_ERR(hash))
return PTR_ERR(hash);
@@ -190,18 +389,20 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
err = crypto_authenc_genicv(areq, iv, 0);
}
- aead_request_complete(areq, err);
+ authenc_request_complete(areq, err);
}
static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct ablkcipher_request *abreq = aead_request_ctx(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_ablkcipher *enc = ctx->enc;
struct scatterlist *dst = req->dst;
unsigned int cryptlen = req->cryptlen;
- u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc);
+ struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+ + ctx->reqoff);
+ u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
int err;
ablkcipher_request_set_tfm(abreq, enc);
@@ -229,7 +430,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
err = crypto_authenc_genicv(areq, greq->giv, 0);
}
- aead_request_complete(areq, err);
+ authenc_request_complete(areq, err);
}
static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
@@ -256,33 +457,40 @@ static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
}
static int crypto_authenc_verify(struct aead_request *req,
- struct scatterlist *cipher,
- unsigned int cryptlen)
+ authenc_ahash_t authenc_ahash_fn)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
u8 *ohash;
u8 *ihash;
unsigned int authsize;
- ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher,
- cryptlen);
+ areq_ctx->complete = authenc_verify_ahash_done;
+ areq_ctx->update_complete = authenc_verify_ahash_update_done;
+
+ ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
if (IS_ERR(ohash))
return PTR_ERR(ohash);
authsize = crypto_aead_authsize(authenc);
ihash = ohash + authsize;
- scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0);
- return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+ return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
unsigned int cryptlen)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *src = req->src;
- struct scatterlist cipher[2];
- struct page *srcp;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *asg = areq_ctx->asg;
unsigned int ivsize = crypto_aead_ivsize(authenc);
+ authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
+ struct page *srcp;
u8 *vsrc;
srcp = sg_page(src);
@@ -293,9 +501,22 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
sg_set_buf(cipher, iv, ivsize);
authenc_chain(cipher, src, vsrc == iv + ivsize);
src = cipher;
+ cryptlen += ivsize;
+ }
+
+ if (sg_is_last(assoc)) {
+ authenc_ahash_fn = crypto_authenc_ahash;
+ sg_init_table(asg, 2);
+ sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
+ authenc_chain(asg, src, 0);
+ src = asg;
+ cryptlen += req->assoclen;
}
- return crypto_authenc_verify(req, src, cryptlen + ivsize);
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->sg = src;
+
+ return crypto_authenc_verify(req, authenc_ahash_fn);
}
static int crypto_authenc_decrypt(struct aead_request *req)
@@ -326,38 +547,42 @@ static int crypto_authenc_decrypt(struct aead_request *req)
static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_hash *auth;
+ struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
int err;
- auth = crypto_spawn_hash(&ictx->auth);
+ auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
- goto err_free_hash;
+ goto err_free_ahash;
ctx->auth = auth;
ctx->enc = enc;
- tfm->crt_aead.reqsize = max_t(unsigned int,
- (crypto_hash_alignmask(auth) &
- ~(crypto_tfm_ctx_alignment() - 1)) +
- crypto_hash_digestsize(auth) * 2,
- sizeof(struct skcipher_givcrypt_request) +
- crypto_ablkcipher_reqsize(enc) +
- crypto_ablkcipher_ivsize(enc));
- spin_lock_init(&ctx->auth_lock);
+ ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
+ crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1) +
+ crypto_ablkcipher_ivsize(enc);
+
+ tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) +
+ ctx->reqoff +
+ max_t(unsigned int,
+ crypto_ahash_reqsize(auth) +
+ sizeof(struct ahash_request),
+ sizeof(struct skcipher_givcrypt_request) +
+ crypto_ablkcipher_reqsize(enc));
return 0;
-err_free_hash:
- crypto_free_hash(auth);
+err_free_ahash:
+ crypto_free_ahash(auth);
return err;
}
@@ -365,7 +590,7 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_hash(ctx->auth);
+ crypto_free_ahash(ctx->auth);
crypto_free_ablkcipher(ctx->enc);
}
@@ -373,7 +598,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
- struct crypto_alg *auth;
+ struct hash_alg_common *auth;
+ struct crypto_alg *auth_base;
struct crypto_alg *enc;
struct authenc_instance_ctx *ctx;
const char *enc_name;
@@ -387,10 +613,12 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
- auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
+ auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
if (IS_ERR(auth))
- return ERR_PTR(PTR_ERR(auth));
+ return ERR_CAST(auth);
+
+ auth_base = &auth->base;
enc_name = crypto_attr_alg_name(tb[2]);
err = PTR_ERR(enc_name);
@@ -404,7 +632,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
ctx = crypto_instance_ctx(inst);
- err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK);
+ err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
if (err)
goto err_free_inst;
@@ -419,28 +647,25 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
- "authenc(%s,%s)", auth->cra_name, enc->cra_name) >=
+ "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "authenc(%s,%s)", auth->cra_driver_name,
+ "authenc(%s,%s)", auth_base->cra_driver_name,
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority;
+ inst->alg.cra_priority = enc->cra_priority *
+ 10 + auth_base->cra_priority;
inst->alg.cra_blocksize = enc->cra_blocksize;
- inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask;
+ inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
- inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ?
- auth->cra_hash.digestsize :
- auth->cra_type ?
- __crypto_shash_alg(auth)->digestsize :
- auth->cra_digest.dia_digestsize;
+ inst->alg.cra_aead.maxauthsize = auth->digestsize;
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
@@ -453,13 +678,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
out:
- crypto_mod_put(auth);
+ crypto_mod_put(auth_base);
return inst;
err_drop_enc:
crypto_drop_skcipher(&ctx->enc);
err_drop_auth:
- crypto_drop_spawn(&ctx->auth);
+ crypto_drop_ahash(&ctx->auth);
err_free_inst:
kfree(inst);
out_put_auth:
@@ -472,7 +697,7 @@ static void crypto_authenc_free(struct crypto_instance *inst)
struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->enc);
- crypto_drop_spawn(&ctx->auth);
+ crypto_drop_ahash(&ctx->auth);
kfree(inst);
}
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 90d26c91f4e9..7a7219266e3c 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
memcpy(walk->dst.virt.addr, walk->page, n);
blkcipher_unmap_dst(walk);
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
- blkcipher_unmap_src(walk);
if (walk->flags & BLKCIPHER_WALK_DIFF)
blkcipher_unmap_dst(walk);
+ blkcipher_unmap_src(walk);
}
scatterwalk_advance(&walk->in, n);
diff --git a/crypto/blowfish.c b/crypto/blowfish.c
index 6f5b48731922..a67d52ee0580 100644
--- a/crypto/blowfish.c
+++ b/crypto/blowfish.c
@@ -1,4 +1,4 @@
-/*
+/*
* Cryptographic API.
*
* Blowfish Cipher Algorithm, by Bruce Schneier.
@@ -299,7 +299,7 @@ static const u32 bf_sbox[256 * 4] = {
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
};
-/*
+/*
* Round loop unrolling macros, S is a pointer to a S-Box array
* organized in 4 unsigned longs at a row.
*/
@@ -315,7 +315,7 @@ static const u32 bf_sbox[256 * 4] = {
/*
* The blowfish encipher, processes 64-bit blocks.
- * NOTE: This function MUSTN'T respect endianess
+ * NOTE: This function MUSTN'T respect endianess
*/
static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
{
@@ -395,7 +395,7 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
out_blk[1] = cpu_to_be32(yl);
}
-/*
+/*
* Calculates the blowfish S and P boxes for encryption and decryption.
*/
static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
@@ -417,10 +417,10 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
/* Actual subkey generation */
for (j = 0, i = 0; i < 16 + 2; i++) {
- temp = (((u32 )key[j] << 24) |
- ((u32 )key[(j + 1) % keylen] << 16) |
- ((u32 )key[(j + 2) % keylen] << 8) |
- ((u32 )key[(j + 3) % keylen]));
+ temp = (((u32)key[j] << 24) |
+ ((u32)key[(j + 1) % keylen] << 16) |
+ ((u32)key[(j + 2) % keylen] << 8) |
+ ((u32)key[(j + 3) % keylen]));
P[i] = P[i] ^ temp;
j = (j + 4) % keylen;
@@ -444,7 +444,7 @@ static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
S[count + 1] = data[1];
}
}
-
+
/* Bruce says not to bother with the weak key check. */
return 0;
}
diff --git a/crypto/camellia.c b/crypto/camellia.c
index 964635d163f4..64cff46ea5e4 100644
--- a/crypto/camellia.c
+++ b/crypto/camellia.c
@@ -39,271 +39,271 @@
#include <asm/unaligned.h>
static const u32 camellia_sp1110[256] = {
- 0x70707000,0x82828200,0x2c2c2c00,0xececec00,
- 0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500,
- 0xe4e4e400,0x85858500,0x57575700,0x35353500,
- 0xeaeaea00,0x0c0c0c00,0xaeaeae00,0x41414100,
- 0x23232300,0xefefef00,0x6b6b6b00,0x93939300,
- 0x45454500,0x19191900,0xa5a5a500,0x21212100,
- 0xededed00,0x0e0e0e00,0x4f4f4f00,0x4e4e4e00,
- 0x1d1d1d00,0x65656500,0x92929200,0xbdbdbd00,
- 0x86868600,0xb8b8b800,0xafafaf00,0x8f8f8f00,
- 0x7c7c7c00,0xebebeb00,0x1f1f1f00,0xcecece00,
- 0x3e3e3e00,0x30303000,0xdcdcdc00,0x5f5f5f00,
- 0x5e5e5e00,0xc5c5c500,0x0b0b0b00,0x1a1a1a00,
- 0xa6a6a600,0xe1e1e100,0x39393900,0xcacaca00,
- 0xd5d5d500,0x47474700,0x5d5d5d00,0x3d3d3d00,
- 0xd9d9d900,0x01010100,0x5a5a5a00,0xd6d6d600,
- 0x51515100,0x56565600,0x6c6c6c00,0x4d4d4d00,
- 0x8b8b8b00,0x0d0d0d00,0x9a9a9a00,0x66666600,
- 0xfbfbfb00,0xcccccc00,0xb0b0b000,0x2d2d2d00,
- 0x74747400,0x12121200,0x2b2b2b00,0x20202000,
- 0xf0f0f000,0xb1b1b100,0x84848400,0x99999900,
- 0xdfdfdf00,0x4c4c4c00,0xcbcbcb00,0xc2c2c200,
- 0x34343400,0x7e7e7e00,0x76767600,0x05050500,
- 0x6d6d6d00,0xb7b7b700,0xa9a9a900,0x31313100,
- 0xd1d1d100,0x17171700,0x04040400,0xd7d7d700,
- 0x14141400,0x58585800,0x3a3a3a00,0x61616100,
- 0xdedede00,0x1b1b1b00,0x11111100,0x1c1c1c00,
- 0x32323200,0x0f0f0f00,0x9c9c9c00,0x16161600,
- 0x53535300,0x18181800,0xf2f2f200,0x22222200,
- 0xfefefe00,0x44444400,0xcfcfcf00,0xb2b2b200,
- 0xc3c3c300,0xb5b5b500,0x7a7a7a00,0x91919100,
- 0x24242400,0x08080800,0xe8e8e800,0xa8a8a800,
- 0x60606000,0xfcfcfc00,0x69696900,0x50505000,
- 0xaaaaaa00,0xd0d0d000,0xa0a0a000,0x7d7d7d00,
- 0xa1a1a100,0x89898900,0x62626200,0x97979700,
- 0x54545400,0x5b5b5b00,0x1e1e1e00,0x95959500,
- 0xe0e0e000,0xffffff00,0x64646400,0xd2d2d200,
- 0x10101000,0xc4c4c400,0x00000000,0x48484800,
- 0xa3a3a300,0xf7f7f700,0x75757500,0xdbdbdb00,
- 0x8a8a8a00,0x03030300,0xe6e6e600,0xdadada00,
- 0x09090900,0x3f3f3f00,0xdddddd00,0x94949400,
- 0x87878700,0x5c5c5c00,0x83838300,0x02020200,
- 0xcdcdcd00,0x4a4a4a00,0x90909000,0x33333300,
- 0x73737300,0x67676700,0xf6f6f600,0xf3f3f300,
- 0x9d9d9d00,0x7f7f7f00,0xbfbfbf00,0xe2e2e200,
- 0x52525200,0x9b9b9b00,0xd8d8d800,0x26262600,
- 0xc8c8c800,0x37373700,0xc6c6c600,0x3b3b3b00,
- 0x81818100,0x96969600,0x6f6f6f00,0x4b4b4b00,
- 0x13131300,0xbebebe00,0x63636300,0x2e2e2e00,
- 0xe9e9e900,0x79797900,0xa7a7a700,0x8c8c8c00,
- 0x9f9f9f00,0x6e6e6e00,0xbcbcbc00,0x8e8e8e00,
- 0x29292900,0xf5f5f500,0xf9f9f900,0xb6b6b600,
- 0x2f2f2f00,0xfdfdfd00,0xb4b4b400,0x59595900,
- 0x78787800,0x98989800,0x06060600,0x6a6a6a00,
- 0xe7e7e700,0x46464600,0x71717100,0xbababa00,
- 0xd4d4d400,0x25252500,0xababab00,0x42424200,
- 0x88888800,0xa2a2a200,0x8d8d8d00,0xfafafa00,
- 0x72727200,0x07070700,0xb9b9b900,0x55555500,
- 0xf8f8f800,0xeeeeee00,0xacacac00,0x0a0a0a00,
- 0x36363600,0x49494900,0x2a2a2a00,0x68686800,
- 0x3c3c3c00,0x38383800,0xf1f1f100,0xa4a4a400,
- 0x40404000,0x28282800,0xd3d3d300,0x7b7b7b00,
- 0xbbbbbb00,0xc9c9c900,0x43434300,0xc1c1c100,
- 0x15151500,0xe3e3e300,0xadadad00,0xf4f4f400,
- 0x77777700,0xc7c7c700,0x80808000,0x9e9e9e00,
+ 0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00,
+ 0xb3b3b300, 0x27272700, 0xc0c0c000, 0xe5e5e500,
+ 0xe4e4e400, 0x85858500, 0x57575700, 0x35353500,
+ 0xeaeaea00, 0x0c0c0c00, 0xaeaeae00, 0x41414100,
+ 0x23232300, 0xefefef00, 0x6b6b6b00, 0x93939300,
+ 0x45454500, 0x19191900, 0xa5a5a500, 0x21212100,
+ 0xededed00, 0x0e0e0e00, 0x4f4f4f00, 0x4e4e4e00,
+ 0x1d1d1d00, 0x65656500, 0x92929200, 0xbdbdbd00,
+ 0x86868600, 0xb8b8b800, 0xafafaf00, 0x8f8f8f00,
+ 0x7c7c7c00, 0xebebeb00, 0x1f1f1f00, 0xcecece00,
+ 0x3e3e3e00, 0x30303000, 0xdcdcdc00, 0x5f5f5f00,
+ 0x5e5e5e00, 0xc5c5c500, 0x0b0b0b00, 0x1a1a1a00,
+ 0xa6a6a600, 0xe1e1e100, 0x39393900, 0xcacaca00,
+ 0xd5d5d500, 0x47474700, 0x5d5d5d00, 0x3d3d3d00,
+ 0xd9d9d900, 0x01010100, 0x5a5a5a00, 0xd6d6d600,
+ 0x51515100, 0x56565600, 0x6c6c6c00, 0x4d4d4d00,
+ 0x8b8b8b00, 0x0d0d0d00, 0x9a9a9a00, 0x66666600,
+ 0xfbfbfb00, 0xcccccc00, 0xb0b0b000, 0x2d2d2d00,
+ 0x74747400, 0x12121200, 0x2b2b2b00, 0x20202000,
+ 0xf0f0f000, 0xb1b1b100, 0x84848400, 0x99999900,
+ 0xdfdfdf00, 0x4c4c4c00, 0xcbcbcb00, 0xc2c2c200,
+ 0x34343400, 0x7e7e7e00, 0x76767600, 0x05050500,
+ 0x6d6d6d00, 0xb7b7b700, 0xa9a9a900, 0x31313100,
+ 0xd1d1d100, 0x17171700, 0x04040400, 0xd7d7d700,
+ 0x14141400, 0x58585800, 0x3a3a3a00, 0x61616100,
+ 0xdedede00, 0x1b1b1b00, 0x11111100, 0x1c1c1c00,
+ 0x32323200, 0x0f0f0f00, 0x9c9c9c00, 0x16161600,
+ 0x53535300, 0x18181800, 0xf2f2f200, 0x22222200,
+ 0xfefefe00, 0x44444400, 0xcfcfcf00, 0xb2b2b200,
+ 0xc3c3c300, 0xb5b5b500, 0x7a7a7a00, 0x91919100,
+ 0x24242400, 0x08080800, 0xe8e8e800, 0xa8a8a800,
+ 0x60606000, 0xfcfcfc00, 0x69696900, 0x50505000,
+ 0xaaaaaa00, 0xd0d0d000, 0xa0a0a000, 0x7d7d7d00,
+ 0xa1a1a100, 0x89898900, 0x62626200, 0x97979700,
+ 0x54545400, 0x5b5b5b00, 0x1e1e1e00, 0x95959500,
+ 0xe0e0e000, 0xffffff00, 0x64646400, 0xd2d2d200,
+ 0x10101000, 0xc4c4c400, 0x00000000, 0x48484800,
+ 0xa3a3a300, 0xf7f7f700, 0x75757500, 0xdbdbdb00,
+ 0x8a8a8a00, 0x03030300, 0xe6e6e600, 0xdadada00,
+ 0x09090900, 0x3f3f3f00, 0xdddddd00, 0x94949400,
+ 0x87878700, 0x5c5c5c00, 0x83838300, 0x02020200,
+ 0xcdcdcd00, 0x4a4a4a00, 0x90909000, 0x33333300,
+ 0x73737300, 0x67676700, 0xf6f6f600, 0xf3f3f300,
+ 0x9d9d9d00, 0x7f7f7f00, 0xbfbfbf00, 0xe2e2e200,
+ 0x52525200, 0x9b9b9b00, 0xd8d8d800, 0x26262600,
+ 0xc8c8c800, 0x37373700, 0xc6c6c600, 0x3b3b3b00,
+ 0x81818100, 0x96969600, 0x6f6f6f00, 0x4b4b4b00,
+ 0x13131300, 0xbebebe00, 0x63636300, 0x2e2e2e00,
+ 0xe9e9e900, 0x79797900, 0xa7a7a700, 0x8c8c8c00,
+ 0x9f9f9f00, 0x6e6e6e00, 0xbcbcbc00, 0x8e8e8e00,
+ 0x29292900, 0xf5f5f500, 0xf9f9f900, 0xb6b6b600,
+ 0x2f2f2f00, 0xfdfdfd00, 0xb4b4b400, 0x59595900,
+ 0x78787800, 0x98989800, 0x06060600, 0x6a6a6a00,
+ 0xe7e7e700, 0x46464600, 0x71717100, 0xbababa00,
+ 0xd4d4d400, 0x25252500, 0xababab00, 0x42424200,
+ 0x88888800, 0xa2a2a200, 0x8d8d8d00, 0xfafafa00,
+ 0x72727200, 0x07070700, 0xb9b9b900, 0x55555500,
+ 0xf8f8f800, 0xeeeeee00, 0xacacac00, 0x0a0a0a00,
+ 0x36363600, 0x49494900, 0x2a2a2a00, 0x68686800,
+ 0x3c3c3c00, 0x38383800, 0xf1f1f100, 0xa4a4a400,
+ 0x40404000, 0x28282800, 0xd3d3d300, 0x7b7b7b00,
+ 0xbbbbbb00, 0xc9c9c900, 0x43434300, 0xc1c1c100,
+ 0x15151500, 0xe3e3e300, 0xadadad00, 0xf4f4f400,
+ 0x77777700, 0xc7c7c700, 0x80808000, 0x9e9e9e00,
};
static const u32 camellia_sp0222[256] = {
- 0x00e0e0e0,0x00050505,0x00585858,0x00d9d9d9,
- 0x00676767,0x004e4e4e,0x00818181,0x00cbcbcb,
- 0x00c9c9c9,0x000b0b0b,0x00aeaeae,0x006a6a6a,
- 0x00d5d5d5,0x00181818,0x005d5d5d,0x00828282,
- 0x00464646,0x00dfdfdf,0x00d6d6d6,0x00272727,
- 0x008a8a8a,0x00323232,0x004b4b4b,0x00424242,
- 0x00dbdbdb,0x001c1c1c,0x009e9e9e,0x009c9c9c,
- 0x003a3a3a,0x00cacaca,0x00252525,0x007b7b7b,
- 0x000d0d0d,0x00717171,0x005f5f5f,0x001f1f1f,
- 0x00f8f8f8,0x00d7d7d7,0x003e3e3e,0x009d9d9d,
- 0x007c7c7c,0x00606060,0x00b9b9b9,0x00bebebe,
- 0x00bcbcbc,0x008b8b8b,0x00161616,0x00343434,
- 0x004d4d4d,0x00c3c3c3,0x00727272,0x00959595,
- 0x00ababab,0x008e8e8e,0x00bababa,0x007a7a7a,
- 0x00b3b3b3,0x00020202,0x00b4b4b4,0x00adadad,
- 0x00a2a2a2,0x00acacac,0x00d8d8d8,0x009a9a9a,
- 0x00171717,0x001a1a1a,0x00353535,0x00cccccc,
- 0x00f7f7f7,0x00999999,0x00616161,0x005a5a5a,
- 0x00e8e8e8,0x00242424,0x00565656,0x00404040,
- 0x00e1e1e1,0x00636363,0x00090909,0x00333333,
- 0x00bfbfbf,0x00989898,0x00979797,0x00858585,
- 0x00686868,0x00fcfcfc,0x00ececec,0x000a0a0a,
- 0x00dadada,0x006f6f6f,0x00535353,0x00626262,
- 0x00a3a3a3,0x002e2e2e,0x00080808,0x00afafaf,
- 0x00282828,0x00b0b0b0,0x00747474,0x00c2c2c2,
- 0x00bdbdbd,0x00363636,0x00222222,0x00383838,
- 0x00646464,0x001e1e1e,0x00393939,0x002c2c2c,
- 0x00a6a6a6,0x00303030,0x00e5e5e5,0x00444444,
- 0x00fdfdfd,0x00888888,0x009f9f9f,0x00656565,
- 0x00878787,0x006b6b6b,0x00f4f4f4,0x00232323,
- 0x00484848,0x00101010,0x00d1d1d1,0x00515151,
- 0x00c0c0c0,0x00f9f9f9,0x00d2d2d2,0x00a0a0a0,
- 0x00555555,0x00a1a1a1,0x00414141,0x00fafafa,
- 0x00434343,0x00131313,0x00c4c4c4,0x002f2f2f,
- 0x00a8a8a8,0x00b6b6b6,0x003c3c3c,0x002b2b2b,
- 0x00c1c1c1,0x00ffffff,0x00c8c8c8,0x00a5a5a5,
- 0x00202020,0x00898989,0x00000000,0x00909090,
- 0x00474747,0x00efefef,0x00eaeaea,0x00b7b7b7,
- 0x00151515,0x00060606,0x00cdcdcd,0x00b5b5b5,
- 0x00121212,0x007e7e7e,0x00bbbbbb,0x00292929,
- 0x000f0f0f,0x00b8b8b8,0x00070707,0x00040404,
- 0x009b9b9b,0x00949494,0x00212121,0x00666666,
- 0x00e6e6e6,0x00cecece,0x00ededed,0x00e7e7e7,
- 0x003b3b3b,0x00fefefe,0x007f7f7f,0x00c5c5c5,
- 0x00a4a4a4,0x00373737,0x00b1b1b1,0x004c4c4c,
- 0x00919191,0x006e6e6e,0x008d8d8d,0x00767676,
- 0x00030303,0x002d2d2d,0x00dedede,0x00969696,
- 0x00262626,0x007d7d7d,0x00c6c6c6,0x005c5c5c,
- 0x00d3d3d3,0x00f2f2f2,0x004f4f4f,0x00191919,
- 0x003f3f3f,0x00dcdcdc,0x00797979,0x001d1d1d,
- 0x00525252,0x00ebebeb,0x00f3f3f3,0x006d6d6d,
- 0x005e5e5e,0x00fbfbfb,0x00696969,0x00b2b2b2,
- 0x00f0f0f0,0x00313131,0x000c0c0c,0x00d4d4d4,
- 0x00cfcfcf,0x008c8c8c,0x00e2e2e2,0x00757575,
- 0x00a9a9a9,0x004a4a4a,0x00575757,0x00848484,
- 0x00111111,0x00454545,0x001b1b1b,0x00f5f5f5,
- 0x00e4e4e4,0x000e0e0e,0x00737373,0x00aaaaaa,
- 0x00f1f1f1,0x00dddddd,0x00595959,0x00141414,
- 0x006c6c6c,0x00929292,0x00545454,0x00d0d0d0,
- 0x00787878,0x00707070,0x00e3e3e3,0x00494949,
- 0x00808080,0x00505050,0x00a7a7a7,0x00f6f6f6,
- 0x00777777,0x00939393,0x00868686,0x00838383,
- 0x002a2a2a,0x00c7c7c7,0x005b5b5b,0x00e9e9e9,
- 0x00eeeeee,0x008f8f8f,0x00010101,0x003d3d3d,
+ 0x00e0e0e0, 0x00050505, 0x00585858, 0x00d9d9d9,
+ 0x00676767, 0x004e4e4e, 0x00818181, 0x00cbcbcb,
+ 0x00c9c9c9, 0x000b0b0b, 0x00aeaeae, 0x006a6a6a,
+ 0x00d5d5d5, 0x00181818, 0x005d5d5d, 0x00828282,
+ 0x00464646, 0x00dfdfdf, 0x00d6d6d6, 0x00272727,
+ 0x008a8a8a, 0x00323232, 0x004b4b4b, 0x00424242,
+ 0x00dbdbdb, 0x001c1c1c, 0x009e9e9e, 0x009c9c9c,
+ 0x003a3a3a, 0x00cacaca, 0x00252525, 0x007b7b7b,
+ 0x000d0d0d, 0x00717171, 0x005f5f5f, 0x001f1f1f,
+ 0x00f8f8f8, 0x00d7d7d7, 0x003e3e3e, 0x009d9d9d,
+ 0x007c7c7c, 0x00606060, 0x00b9b9b9, 0x00bebebe,
+ 0x00bcbcbc, 0x008b8b8b, 0x00161616, 0x00343434,
+ 0x004d4d4d, 0x00c3c3c3, 0x00727272, 0x00959595,
+ 0x00ababab, 0x008e8e8e, 0x00bababa, 0x007a7a7a,
+ 0x00b3b3b3, 0x00020202, 0x00b4b4b4, 0x00adadad,
+ 0x00a2a2a2, 0x00acacac, 0x00d8d8d8, 0x009a9a9a,
+ 0x00171717, 0x001a1a1a, 0x00353535, 0x00cccccc,
+ 0x00f7f7f7, 0x00999999, 0x00616161, 0x005a5a5a,
+ 0x00e8e8e8, 0x00242424, 0x00565656, 0x00404040,
+ 0x00e1e1e1, 0x00636363, 0x00090909, 0x00333333,
+ 0x00bfbfbf, 0x00989898, 0x00979797, 0x00858585,
+ 0x00686868, 0x00fcfcfc, 0x00ececec, 0x000a0a0a,
+ 0x00dadada, 0x006f6f6f, 0x00535353, 0x00626262,
+ 0x00a3a3a3, 0x002e2e2e, 0x00080808, 0x00afafaf,
+ 0x00282828, 0x00b0b0b0, 0x00747474, 0x00c2c2c2,
+ 0x00bdbdbd, 0x00363636, 0x00222222, 0x00383838,
+ 0x00646464, 0x001e1e1e, 0x00393939, 0x002c2c2c,
+ 0x00a6a6a6, 0x00303030, 0x00e5e5e5, 0x00444444,
+ 0x00fdfdfd, 0x00888888, 0x009f9f9f, 0x00656565,
+ 0x00878787, 0x006b6b6b, 0x00f4f4f4, 0x00232323,
+ 0x00484848, 0x00101010, 0x00d1d1d1, 0x00515151,
+ 0x00c0c0c0, 0x00f9f9f9, 0x00d2d2d2, 0x00a0a0a0,
+ 0x00555555, 0x00a1a1a1, 0x00414141, 0x00fafafa,
+ 0x00434343, 0x00131313, 0x00c4c4c4, 0x002f2f2f,
+ 0x00a8a8a8, 0x00b6b6b6, 0x003c3c3c, 0x002b2b2b,
+ 0x00c1c1c1, 0x00ffffff, 0x00c8c8c8, 0x00a5a5a5,
+ 0x00202020, 0x00898989, 0x00000000, 0x00909090,
+ 0x00474747, 0x00efefef, 0x00eaeaea, 0x00b7b7b7,
+ 0x00151515, 0x00060606, 0x00cdcdcd, 0x00b5b5b5,
+ 0x00121212, 0x007e7e7e, 0x00bbbbbb, 0x00292929,
+ 0x000f0f0f, 0x00b8b8b8, 0x00070707, 0x00040404,
+ 0x009b9b9b, 0x00949494, 0x00212121, 0x00666666,
+ 0x00e6e6e6, 0x00cecece, 0x00ededed, 0x00e7e7e7,
+ 0x003b3b3b, 0x00fefefe, 0x007f7f7f, 0x00c5c5c5,
+ 0x00a4a4a4, 0x00373737, 0x00b1b1b1, 0x004c4c4c,
+ 0x00919191, 0x006e6e6e, 0x008d8d8d, 0x00767676,
+ 0x00030303, 0x002d2d2d, 0x00dedede, 0x00969696,
+ 0x00262626, 0x007d7d7d, 0x00c6c6c6, 0x005c5c5c,
+ 0x00d3d3d3, 0x00f2f2f2, 0x004f4f4f, 0x00191919,
+ 0x003f3f3f, 0x00dcdcdc, 0x00797979, 0x001d1d1d,
+ 0x00525252, 0x00ebebeb, 0x00f3f3f3, 0x006d6d6d,
+ 0x005e5e5e, 0x00fbfbfb, 0x00696969, 0x00b2b2b2,
+ 0x00f0f0f0, 0x00313131, 0x000c0c0c, 0x00d4d4d4,
+ 0x00cfcfcf, 0x008c8c8c, 0x00e2e2e2, 0x00757575,
+ 0x00a9a9a9, 0x004a4a4a, 0x00575757, 0x00848484,
+ 0x00111111, 0x00454545, 0x001b1b1b, 0x00f5f5f5,
+ 0x00e4e4e4, 0x000e0e0e, 0x00737373, 0x00aaaaaa,
+ 0x00f1f1f1, 0x00dddddd, 0x00595959, 0x00141414,
+ 0x006c6c6c, 0x00929292, 0x00545454, 0x00d0d0d0,
+ 0x00787878, 0x00707070, 0x00e3e3e3, 0x00494949,
+ 0x00808080, 0x00505050, 0x00a7a7a7, 0x00f6f6f6,
+ 0x00777777, 0x00939393, 0x00868686, 0x00838383,
+ 0x002a2a2a, 0x00c7c7c7, 0x005b5b5b, 0x00e9e9e9,
+ 0x00eeeeee, 0x008f8f8f, 0x00010101, 0x003d3d3d,
};
static const u32 camellia_sp3033[256] = {
- 0x38003838,0x41004141,0x16001616,0x76007676,
- 0xd900d9d9,0x93009393,0x60006060,0xf200f2f2,
- 0x72007272,0xc200c2c2,0xab00abab,0x9a009a9a,
- 0x75007575,0x06000606,0x57005757,0xa000a0a0,
- 0x91009191,0xf700f7f7,0xb500b5b5,0xc900c9c9,
- 0xa200a2a2,0x8c008c8c,0xd200d2d2,0x90009090,
- 0xf600f6f6,0x07000707,0xa700a7a7,0x27002727,
- 0x8e008e8e,0xb200b2b2,0x49004949,0xde00dede,
- 0x43004343,0x5c005c5c,0xd700d7d7,0xc700c7c7,
- 0x3e003e3e,0xf500f5f5,0x8f008f8f,0x67006767,
- 0x1f001f1f,0x18001818,0x6e006e6e,0xaf00afaf,
- 0x2f002f2f,0xe200e2e2,0x85008585,0x0d000d0d,
- 0x53005353,0xf000f0f0,0x9c009c9c,0x65006565,
- 0xea00eaea,0xa300a3a3,0xae00aeae,0x9e009e9e,
- 0xec00ecec,0x80008080,0x2d002d2d,0x6b006b6b,
- 0xa800a8a8,0x2b002b2b,0x36003636,0xa600a6a6,
- 0xc500c5c5,0x86008686,0x4d004d4d,0x33003333,
- 0xfd00fdfd,0x66006666,0x58005858,0x96009696,
- 0x3a003a3a,0x09000909,0x95009595,0x10001010,
- 0x78007878,0xd800d8d8,0x42004242,0xcc00cccc,
- 0xef00efef,0x26002626,0xe500e5e5,0x61006161,
- 0x1a001a1a,0x3f003f3f,0x3b003b3b,0x82008282,
- 0xb600b6b6,0xdb00dbdb,0xd400d4d4,0x98009898,
- 0xe800e8e8,0x8b008b8b,0x02000202,0xeb00ebeb,
- 0x0a000a0a,0x2c002c2c,0x1d001d1d,0xb000b0b0,
- 0x6f006f6f,0x8d008d8d,0x88008888,0x0e000e0e,
- 0x19001919,0x87008787,0x4e004e4e,0x0b000b0b,
- 0xa900a9a9,0x0c000c0c,0x79007979,0x11001111,
- 0x7f007f7f,0x22002222,0xe700e7e7,0x59005959,
- 0xe100e1e1,0xda00dada,0x3d003d3d,0xc800c8c8,
- 0x12001212,0x04000404,0x74007474,0x54005454,
- 0x30003030,0x7e007e7e,0xb400b4b4,0x28002828,
- 0x55005555,0x68006868,0x50005050,0xbe00bebe,
- 0xd000d0d0,0xc400c4c4,0x31003131,0xcb00cbcb,
- 0x2a002a2a,0xad00adad,0x0f000f0f,0xca00caca,
- 0x70007070,0xff00ffff,0x32003232,0x69006969,
- 0x08000808,0x62006262,0x00000000,0x24002424,
- 0xd100d1d1,0xfb00fbfb,0xba00baba,0xed00eded,
- 0x45004545,0x81008181,0x73007373,0x6d006d6d,
- 0x84008484,0x9f009f9f,0xee00eeee,0x4a004a4a,
- 0xc300c3c3,0x2e002e2e,0xc100c1c1,0x01000101,
- 0xe600e6e6,0x25002525,0x48004848,0x99009999,
- 0xb900b9b9,0xb300b3b3,0x7b007b7b,0xf900f9f9,
- 0xce00cece,0xbf00bfbf,0xdf00dfdf,0x71007171,
- 0x29002929,0xcd00cdcd,0x6c006c6c,0x13001313,
- 0x64006464,0x9b009b9b,0x63006363,0x9d009d9d,
- 0xc000c0c0,0x4b004b4b,0xb700b7b7,0xa500a5a5,
- 0x89008989,0x5f005f5f,0xb100b1b1,0x17001717,
- 0xf400f4f4,0xbc00bcbc,0xd300d3d3,0x46004646,
- 0xcf00cfcf,0x37003737,0x5e005e5e,0x47004747,
- 0x94009494,0xfa00fafa,0xfc00fcfc,0x5b005b5b,
- 0x97009797,0xfe00fefe,0x5a005a5a,0xac00acac,
- 0x3c003c3c,0x4c004c4c,0x03000303,0x35003535,
- 0xf300f3f3,0x23002323,0xb800b8b8,0x5d005d5d,
- 0x6a006a6a,0x92009292,0xd500d5d5,0x21002121,
- 0x44004444,0x51005151,0xc600c6c6,0x7d007d7d,
- 0x39003939,0x83008383,0xdc00dcdc,0xaa00aaaa,
- 0x7c007c7c,0x77007777,0x56005656,0x05000505,
- 0x1b001b1b,0xa400a4a4,0x15001515,0x34003434,
- 0x1e001e1e,0x1c001c1c,0xf800f8f8,0x52005252,
- 0x20002020,0x14001414,0xe900e9e9,0xbd00bdbd,
- 0xdd00dddd,0xe400e4e4,0xa100a1a1,0xe000e0e0,
- 0x8a008a8a,0xf100f1f1,0xd600d6d6,0x7a007a7a,
- 0xbb00bbbb,0xe300e3e3,0x40004040,0x4f004f4f,
+ 0x38003838, 0x41004141, 0x16001616, 0x76007676,
+ 0xd900d9d9, 0x93009393, 0x60006060, 0xf200f2f2,
+ 0x72007272, 0xc200c2c2, 0xab00abab, 0x9a009a9a,
+ 0x75007575, 0x06000606, 0x57005757, 0xa000a0a0,
+ 0x91009191, 0xf700f7f7, 0xb500b5b5, 0xc900c9c9,
+ 0xa200a2a2, 0x8c008c8c, 0xd200d2d2, 0x90009090,
+ 0xf600f6f6, 0x07000707, 0xa700a7a7, 0x27002727,
+ 0x8e008e8e, 0xb200b2b2, 0x49004949, 0xde00dede,
+ 0x43004343, 0x5c005c5c, 0xd700d7d7, 0xc700c7c7,
+ 0x3e003e3e, 0xf500f5f5, 0x8f008f8f, 0x67006767,
+ 0x1f001f1f, 0x18001818, 0x6e006e6e, 0xaf00afaf,
+ 0x2f002f2f, 0xe200e2e2, 0x85008585, 0x0d000d0d,
+ 0x53005353, 0xf000f0f0, 0x9c009c9c, 0x65006565,
+ 0xea00eaea, 0xa300a3a3, 0xae00aeae, 0x9e009e9e,
+ 0xec00ecec, 0x80008080, 0x2d002d2d, 0x6b006b6b,
+ 0xa800a8a8, 0x2b002b2b, 0x36003636, 0xa600a6a6,
+ 0xc500c5c5, 0x86008686, 0x4d004d4d, 0x33003333,
+ 0xfd00fdfd, 0x66006666, 0x58005858, 0x96009696,
+ 0x3a003a3a, 0x09000909, 0x95009595, 0x10001010,
+ 0x78007878, 0xd800d8d8, 0x42004242, 0xcc00cccc,
+ 0xef00efef, 0x26002626, 0xe500e5e5, 0x61006161,
+ 0x1a001a1a, 0x3f003f3f, 0x3b003b3b, 0x82008282,
+ 0xb600b6b6, 0xdb00dbdb, 0xd400d4d4, 0x98009898,
+ 0xe800e8e8, 0x8b008b8b, 0x02000202, 0xeb00ebeb,
+ 0x0a000a0a, 0x2c002c2c, 0x1d001d1d, 0xb000b0b0,
+ 0x6f006f6f, 0x8d008d8d, 0x88008888, 0x0e000e0e,
+ 0x19001919, 0x87008787, 0x4e004e4e, 0x0b000b0b,
+ 0xa900a9a9, 0x0c000c0c, 0x79007979, 0x11001111,
+ 0x7f007f7f, 0x22002222, 0xe700e7e7, 0x59005959,
+ 0xe100e1e1, 0xda00dada, 0x3d003d3d, 0xc800c8c8,
+ 0x12001212, 0x04000404, 0x74007474, 0x54005454,
+ 0x30003030, 0x7e007e7e, 0xb400b4b4, 0x28002828,
+ 0x55005555, 0x68006868, 0x50005050, 0xbe00bebe,
+ 0xd000d0d0, 0xc400c4c4, 0x31003131, 0xcb00cbcb,
+ 0x2a002a2a, 0xad00adad, 0x0f000f0f, 0xca00caca,
+ 0x70007070, 0xff00ffff, 0x32003232, 0x69006969,
+ 0x08000808, 0x62006262, 0x00000000, 0x24002424,
+ 0xd100d1d1, 0xfb00fbfb, 0xba00baba, 0xed00eded,
+ 0x45004545, 0x81008181, 0x73007373, 0x6d006d6d,
+ 0x84008484, 0x9f009f9f, 0xee00eeee, 0x4a004a4a,
+ 0xc300c3c3, 0x2e002e2e, 0xc100c1c1, 0x01000101,
+ 0xe600e6e6, 0x25002525, 0x48004848, 0x99009999,
+ 0xb900b9b9, 0xb300b3b3, 0x7b007b7b, 0xf900f9f9,
+ 0xce00cece, 0xbf00bfbf, 0xdf00dfdf, 0x71007171,
+ 0x29002929, 0xcd00cdcd, 0x6c006c6c, 0x13001313,
+ 0x64006464, 0x9b009b9b, 0x63006363, 0x9d009d9d,
+ 0xc000c0c0, 0x4b004b4b, 0xb700b7b7, 0xa500a5a5,
+ 0x89008989, 0x5f005f5f, 0xb100b1b1, 0x17001717,
+ 0xf400f4f4, 0xbc00bcbc, 0xd300d3d3, 0x46004646,
+ 0xcf00cfcf, 0x37003737, 0x5e005e5e, 0x47004747,
+ 0x94009494, 0xfa00fafa, 0xfc00fcfc, 0x5b005b5b,
+ 0x97009797, 0xfe00fefe, 0x5a005a5a, 0xac00acac,
+ 0x3c003c3c, 0x4c004c4c, 0x03000303, 0x35003535,
+ 0xf300f3f3, 0x23002323, 0xb800b8b8, 0x5d005d5d,
+ 0x6a006a6a, 0x92009292, 0xd500d5d5, 0x21002121,
+ 0x44004444, 0x51005151, 0xc600c6c6, 0x7d007d7d,
+ 0x39003939, 0x83008383, 0xdc00dcdc, 0xaa00aaaa,
+ 0x7c007c7c, 0x77007777, 0x56005656, 0x05000505,
+ 0x1b001b1b, 0xa400a4a4, 0x15001515, 0x34003434,
+ 0x1e001e1e, 0x1c001c1c, 0xf800f8f8, 0x52005252,
+ 0x20002020, 0x14001414, 0xe900e9e9, 0xbd00bdbd,
+ 0xdd00dddd, 0xe400e4e4, 0xa100a1a1, 0xe000e0e0,
+ 0x8a008a8a, 0xf100f1f1, 0xd600d6d6, 0x7a007a7a,
+ 0xbb00bbbb, 0xe300e3e3, 0x40004040, 0x4f004f4f,
};
static const u32 camellia_sp4404[256] = {
- 0x70700070,0x2c2c002c,0xb3b300b3,0xc0c000c0,
- 0xe4e400e4,0x57570057,0xeaea00ea,0xaeae00ae,
- 0x23230023,0x6b6b006b,0x45450045,0xa5a500a5,
- 0xeded00ed,0x4f4f004f,0x1d1d001d,0x92920092,
- 0x86860086,0xafaf00af,0x7c7c007c,0x1f1f001f,
- 0x3e3e003e,0xdcdc00dc,0x5e5e005e,0x0b0b000b,
- 0xa6a600a6,0x39390039,0xd5d500d5,0x5d5d005d,
- 0xd9d900d9,0x5a5a005a,0x51510051,0x6c6c006c,
- 0x8b8b008b,0x9a9a009a,0xfbfb00fb,0xb0b000b0,
- 0x74740074,0x2b2b002b,0xf0f000f0,0x84840084,
- 0xdfdf00df,0xcbcb00cb,0x34340034,0x76760076,
- 0x6d6d006d,0xa9a900a9,0xd1d100d1,0x04040004,
- 0x14140014,0x3a3a003a,0xdede00de,0x11110011,
- 0x32320032,0x9c9c009c,0x53530053,0xf2f200f2,
- 0xfefe00fe,0xcfcf00cf,0xc3c300c3,0x7a7a007a,
- 0x24240024,0xe8e800e8,0x60600060,0x69690069,
- 0xaaaa00aa,0xa0a000a0,0xa1a100a1,0x62620062,
- 0x54540054,0x1e1e001e,0xe0e000e0,0x64640064,
- 0x10100010,0x00000000,0xa3a300a3,0x75750075,
- 0x8a8a008a,0xe6e600e6,0x09090009,0xdddd00dd,
- 0x87870087,0x83830083,0xcdcd00cd,0x90900090,
- 0x73730073,0xf6f600f6,0x9d9d009d,0xbfbf00bf,
- 0x52520052,0xd8d800d8,0xc8c800c8,0xc6c600c6,
- 0x81810081,0x6f6f006f,0x13130013,0x63630063,
- 0xe9e900e9,0xa7a700a7,0x9f9f009f,0xbcbc00bc,
- 0x29290029,0xf9f900f9,0x2f2f002f,0xb4b400b4,
- 0x78780078,0x06060006,0xe7e700e7,0x71710071,
- 0xd4d400d4,0xabab00ab,0x88880088,0x8d8d008d,
- 0x72720072,0xb9b900b9,0xf8f800f8,0xacac00ac,
- 0x36360036,0x2a2a002a,0x3c3c003c,0xf1f100f1,
- 0x40400040,0xd3d300d3,0xbbbb00bb,0x43430043,
- 0x15150015,0xadad00ad,0x77770077,0x80800080,
- 0x82820082,0xecec00ec,0x27270027,0xe5e500e5,
- 0x85850085,0x35350035,0x0c0c000c,0x41410041,
- 0xefef00ef,0x93930093,0x19190019,0x21210021,
- 0x0e0e000e,0x4e4e004e,0x65650065,0xbdbd00bd,
- 0xb8b800b8,0x8f8f008f,0xebeb00eb,0xcece00ce,
- 0x30300030,0x5f5f005f,0xc5c500c5,0x1a1a001a,
- 0xe1e100e1,0xcaca00ca,0x47470047,0x3d3d003d,
- 0x01010001,0xd6d600d6,0x56560056,0x4d4d004d,
- 0x0d0d000d,0x66660066,0xcccc00cc,0x2d2d002d,
- 0x12120012,0x20200020,0xb1b100b1,0x99990099,
- 0x4c4c004c,0xc2c200c2,0x7e7e007e,0x05050005,
- 0xb7b700b7,0x31310031,0x17170017,0xd7d700d7,
- 0x58580058,0x61610061,0x1b1b001b,0x1c1c001c,
- 0x0f0f000f,0x16160016,0x18180018,0x22220022,
- 0x44440044,0xb2b200b2,0xb5b500b5,0x91910091,
- 0x08080008,0xa8a800a8,0xfcfc00fc,0x50500050,
- 0xd0d000d0,0x7d7d007d,0x89890089,0x97970097,
- 0x5b5b005b,0x95950095,0xffff00ff,0xd2d200d2,
- 0xc4c400c4,0x48480048,0xf7f700f7,0xdbdb00db,
- 0x03030003,0xdada00da,0x3f3f003f,0x94940094,
- 0x5c5c005c,0x02020002,0x4a4a004a,0x33330033,
- 0x67670067,0xf3f300f3,0x7f7f007f,0xe2e200e2,
- 0x9b9b009b,0x26260026,0x37370037,0x3b3b003b,
- 0x96960096,0x4b4b004b,0xbebe00be,0x2e2e002e,
- 0x79790079,0x8c8c008c,0x6e6e006e,0x8e8e008e,
- 0xf5f500f5,0xb6b600b6,0xfdfd00fd,0x59590059,
- 0x98980098,0x6a6a006a,0x46460046,0xbaba00ba,
- 0x25250025,0x42420042,0xa2a200a2,0xfafa00fa,
- 0x07070007,0x55550055,0xeeee00ee,0x0a0a000a,
- 0x49490049,0x68680068,0x38380038,0xa4a400a4,
- 0x28280028,0x7b7b007b,0xc9c900c9,0xc1c100c1,
- 0xe3e300e3,0xf4f400f4,0xc7c700c7,0x9e9e009e,
+ 0x70700070, 0x2c2c002c, 0xb3b300b3, 0xc0c000c0,
+ 0xe4e400e4, 0x57570057, 0xeaea00ea, 0xaeae00ae,
+ 0x23230023, 0x6b6b006b, 0x45450045, 0xa5a500a5,
+ 0xeded00ed, 0x4f4f004f, 0x1d1d001d, 0x92920092,
+ 0x86860086, 0xafaf00af, 0x7c7c007c, 0x1f1f001f,
+ 0x3e3e003e, 0xdcdc00dc, 0x5e5e005e, 0x0b0b000b,
+ 0xa6a600a6, 0x39390039, 0xd5d500d5, 0x5d5d005d,
+ 0xd9d900d9, 0x5a5a005a, 0x51510051, 0x6c6c006c,
+ 0x8b8b008b, 0x9a9a009a, 0xfbfb00fb, 0xb0b000b0,
+ 0x74740074, 0x2b2b002b, 0xf0f000f0, 0x84840084,
+ 0xdfdf00df, 0xcbcb00cb, 0x34340034, 0x76760076,
+ 0x6d6d006d, 0xa9a900a9, 0xd1d100d1, 0x04040004,
+ 0x14140014, 0x3a3a003a, 0xdede00de, 0x11110011,
+ 0x32320032, 0x9c9c009c, 0x53530053, 0xf2f200f2,
+ 0xfefe00fe, 0xcfcf00cf, 0xc3c300c3, 0x7a7a007a,
+ 0x24240024, 0xe8e800e8, 0x60600060, 0x69690069,
+ 0xaaaa00aa, 0xa0a000a0, 0xa1a100a1, 0x62620062,
+ 0x54540054, 0x1e1e001e, 0xe0e000e0, 0x64640064,
+ 0x10100010, 0x00000000, 0xa3a300a3, 0x75750075,
+ 0x8a8a008a, 0xe6e600e6, 0x09090009, 0xdddd00dd,
+ 0x87870087, 0x83830083, 0xcdcd00cd, 0x90900090,
+ 0x73730073, 0xf6f600f6, 0x9d9d009d, 0xbfbf00bf,
+ 0x52520052, 0xd8d800d8, 0xc8c800c8, 0xc6c600c6,
+ 0x81810081, 0x6f6f006f, 0x13130013, 0x63630063,
+ 0xe9e900e9, 0xa7a700a7, 0x9f9f009f, 0xbcbc00bc,
+ 0x29290029, 0xf9f900f9, 0x2f2f002f, 0xb4b400b4,
+ 0x78780078, 0x06060006, 0xe7e700e7, 0x71710071,
+ 0xd4d400d4, 0xabab00ab, 0x88880088, 0x8d8d008d,
+ 0x72720072, 0xb9b900b9, 0xf8f800f8, 0xacac00ac,
+ 0x36360036, 0x2a2a002a, 0x3c3c003c, 0xf1f100f1,
+ 0x40400040, 0xd3d300d3, 0xbbbb00bb, 0x43430043,
+ 0x15150015, 0xadad00ad, 0x77770077, 0x80800080,
+ 0x82820082, 0xecec00ec, 0x27270027, 0xe5e500e5,
+ 0x85850085, 0x35350035, 0x0c0c000c, 0x41410041,
+ 0xefef00ef, 0x93930093, 0x19190019, 0x21210021,
+ 0x0e0e000e, 0x4e4e004e, 0x65650065, 0xbdbd00bd,
+ 0xb8b800b8, 0x8f8f008f, 0xebeb00eb, 0xcece00ce,
+ 0x30300030, 0x5f5f005f, 0xc5c500c5, 0x1a1a001a,
+ 0xe1e100e1, 0xcaca00ca, 0x47470047, 0x3d3d003d,
+ 0x01010001, 0xd6d600d6, 0x56560056, 0x4d4d004d,
+ 0x0d0d000d, 0x66660066, 0xcccc00cc, 0x2d2d002d,
+ 0x12120012, 0x20200020, 0xb1b100b1, 0x99990099,
+ 0x4c4c004c, 0xc2c200c2, 0x7e7e007e, 0x05050005,
+ 0xb7b700b7, 0x31310031, 0x17170017, 0xd7d700d7,
+ 0x58580058, 0x61610061, 0x1b1b001b, 0x1c1c001c,
+ 0x0f0f000f, 0x16160016, 0x18180018, 0x22220022,
+ 0x44440044, 0xb2b200b2, 0xb5b500b5, 0x91910091,
+ 0x08080008, 0xa8a800a8, 0xfcfc00fc, 0x50500050,
+ 0xd0d000d0, 0x7d7d007d, 0x89890089, 0x97970097,
+ 0x5b5b005b, 0x95950095, 0xffff00ff, 0xd2d200d2,
+ 0xc4c400c4, 0x48480048, 0xf7f700f7, 0xdbdb00db,
+ 0x03030003, 0xdada00da, 0x3f3f003f, 0x94940094,
+ 0x5c5c005c, 0x02020002, 0x4a4a004a, 0x33330033,
+ 0x67670067, 0xf3f300f3, 0x7f7f007f, 0xe2e200e2,
+ 0x9b9b009b, 0x26260026, 0x37370037, 0x3b3b003b,
+ 0x96960096, 0x4b4b004b, 0xbebe00be, 0x2e2e002e,
+ 0x79790079, 0x8c8c008c, 0x6e6e006e, 0x8e8e008e,
+ 0xf5f500f5, 0xb6b600b6, 0xfdfd00fd, 0x59590059,
+ 0x98980098, 0x6a6a006a, 0x46460046, 0xbaba00ba,
+ 0x25250025, 0x42420042, 0xa2a200a2, 0xfafa00fa,
+ 0x07070007, 0x55550055, 0xeeee00ee, 0x0a0a000a,
+ 0x49490049, 0x68680068, 0x38380038, 0xa4a400a4,
+ 0x28280028, 0x7b7b007b, 0xc9c900c9, 0xc1c100c1,
+ 0xe3e300e3, 0xf4f400f4, 0xc7c700c7, 0x9e9e009e,
};
@@ -344,7 +344,7 @@ static const u32 camellia_sp4404[256] = {
lr = (lr << bits) + (rl >> (32 - bits)); \
rl = (rl << bits) + (rr >> (32 - bits)); \
rr = (rr << bits) + (w0 >> (32 - bits)); \
- } while(0)
+ } while (0)
#define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \
do { \
@@ -354,7 +354,7 @@ static const u32 camellia_sp4404[256] = {
lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \
rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \
rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \
- } while(0)
+ } while (0)
#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \
do { \
@@ -373,7 +373,7 @@ static const u32 camellia_sp4404[256] = {
yl ^= yr; \
yr = ror32(yr, 8); \
yr ^= yl; \
- } while(0)
+ } while (0)
#define SUBKEY_L(INDEX) (subkey[(INDEX)*2])
#define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
@@ -835,7 +835,7 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
static void camellia_setup192(const unsigned char *key, u32 *subkey)
{
unsigned char kk[32];
- u32 krll, krlr, krrl,krrr;
+ u32 krll, krlr, krrl, krrr;
memcpy(kk, key, 24);
memcpy((unsigned char *)&krll, key+16, 4);
@@ -865,7 +865,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
t1 |= lr; \
ll ^= t1; \
rr ^= rol32(t3, 1); \
- } while(0)
+ } while (0)
#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
do { \
@@ -881,12 +881,12 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
ir ^= il ^ kr; \
yl ^= ir; \
yr ^= ror32(il, 8) ^ ir; \
- } while(0)
+ } while (0)
/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
{
- u32 il,ir,t0,t1; /* temporary variables */
+ u32 il, ir, t0, t1; /* temporary variables */
/* pre whitening but absorb kw2 */
io[0] ^= SUBKEY_L(0);
@@ -894,30 +894,30 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
/* main iteration */
#define ROUNDS(i) do { \
- CAMELLIA_ROUNDSM(io[0],io[1], \
- SUBKEY_L(i + 2),SUBKEY_R(i + 2), \
- io[2],io[3],il,ir); \
- CAMELLIA_ROUNDSM(io[2],io[3], \
- SUBKEY_L(i + 3),SUBKEY_R(i + 3), \
- io[0],io[1],il,ir); \
- CAMELLIA_ROUNDSM(io[0],io[1], \
- SUBKEY_L(i + 4),SUBKEY_R(i + 4), \
- io[2],io[3],il,ir); \
- CAMELLIA_ROUNDSM(io[2],io[3], \
- SUBKEY_L(i + 5),SUBKEY_R(i + 5), \
- io[0],io[1],il,ir); \
- CAMELLIA_ROUNDSM(io[0],io[1], \
- SUBKEY_L(i + 6),SUBKEY_R(i + 6), \
- io[2],io[3],il,ir); \
- CAMELLIA_ROUNDSM(io[2],io[3], \
- SUBKEY_L(i + 7),SUBKEY_R(i + 7), \
- io[0],io[1],il,ir); \
+ CAMELLIA_ROUNDSM(io[0], io[1], \
+ SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
+ io[2], io[3], il, ir); \
+ CAMELLIA_ROUNDSM(io[2], io[3], \
+ SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
+ io[0], io[1], il, ir); \
+ CAMELLIA_ROUNDSM(io[0], io[1], \
+ SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
+ io[2], io[3], il, ir); \
+ CAMELLIA_ROUNDSM(io[2], io[3], \
+ SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
+ io[0], io[1], il, ir); \
+ CAMELLIA_ROUNDSM(io[0], io[1], \
+ SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
+ io[2], io[3], il, ir); \
+ CAMELLIA_ROUNDSM(io[2], io[3], \
+ SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
+ io[0], io[1], il, ir); \
} while (0)
#define FLS(i) do { \
- CAMELLIA_FLS(io[0],io[1],io[2],io[3], \
- SUBKEY_L(i + 0),SUBKEY_R(i + 0), \
- SUBKEY_L(i + 1),SUBKEY_R(i + 1), \
- t0,t1,il,ir); \
+ CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
+ SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
+ SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
+ t0, t1, il, ir); \
} while (0)
ROUNDS(0);
@@ -941,7 +941,7 @@ static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
{
- u32 il,ir,t0,t1; /* temporary variables */
+ u32 il, ir, t0, t1; /* temporary variables */
/* pre whitening but absorb kw2 */
io[0] ^= SUBKEY_L(i);
@@ -949,30 +949,30 @@ static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
/* main iteration */
#define ROUNDS(i) do { \
- CAMELLIA_ROUNDSM(io[0],io[1], \
- SUBKEY_L(i + 7),SUBKEY_R(i + 7), \
- io[2],io[3],il,ir); \
- CAMELLIA_ROUNDSM(io[2],io[3], \
- SUBKEY_L(i + 6),SUBKEY_R(i + 6), \
- io[0],io[1],il,ir); \
- CAMELLIA_ROUNDSM(io[0],io[1], \
- SUBKEY_L(i + 5),SUBKEY_R(i + 5), \
- io[2],io[3],il,ir); \
- CAMELLIA_ROUNDSM(io[2],io[3], \
- SUBKEY_L(i + 4),SUBKEY_R(i + 4), \
- io[0],io[1],il,ir); \
- CAMELLIA_ROUNDSM(io[0],io[1], \
- SUBKEY_L(i + 3),SUBKEY_R(i + 3), \
- io[2],io[3],il,ir); \
- CAMELLIA_ROUNDSM(io[2],io[3], \
- SUBKEY_L(i + 2),SUBKEY_R(i + 2), \
- io[0],io[1],il,ir); \
+ CAMELLIA_ROUNDSM(io[0], io[1], \
+ SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
+ io[2], io[3], il, ir); \
+ CAMELLIA_ROUNDSM(io[2], io[3], \
+ SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
+ io[0], io[1], il, ir); \
+ CAMELLIA_ROUNDSM(io[0], io[1], \
+ SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
+ io[2], io[3], il, ir); \
+ CAMELLIA_ROUNDSM(io[2], io[3], \
+ SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
+ io[0], io[1], il, ir); \
+ CAMELLIA_ROUNDSM(io[0], io[1], \
+ SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
+ io[2], io[3], il, ir); \
+ CAMELLIA_ROUNDSM(io[2], io[3], \
+ SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
+ io[0], io[1], il, ir); \
} while (0)
#define FLS(i) do { \
- CAMELLIA_FLS(io[0],io[1],io[2],io[3], \
- SUBKEY_L(i + 1),SUBKEY_R(i + 1), \
- SUBKEY_L(i + 0),SUBKEY_R(i + 0), \
- t0,t1,il,ir); \
+ CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
+ SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
+ SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
+ t0, t1, il, ir); \
} while (0)
if (i == 32) {
diff --git a/crypto/cast5.c b/crypto/cast5.c
index 8cbe28fa0e0c..a1d2294b50ad 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -569,12 +569,12 @@ static const u32 sb8[256] = {
0xeaee6801, 0x8db2a283, 0xea8bf59e
};
-#define F1(D,m,r) ( (I = ((m) + (D))), (I=rol32(I,(r))), \
- (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) )
-#define F2(D,m,r) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \
- (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) )
-#define F3(D,m,r) ( (I = ((m) - (D))), (I=rol32(I,(r))), \
- (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) )
+#define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \
+ (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
+#define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
+ (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
+#define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \
+ (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
@@ -694,7 +694,7 @@ static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
dst[1] = cpu_to_be32(l);
}
-static void key_schedule(u32 * x, u32 * z, u32 * k)
+static void key_schedule(u32 *x, u32 *z, u32 *k)
{
#define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff)
diff --git a/crypto/cast6.c b/crypto/cast6.c
index 007d02beed67..e0c15a6c7c34 100644
--- a/crypto/cast6.c
+++ b/crypto/cast6.c
@@ -11,7 +11,7 @@
* under the terms of GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
@@ -35,12 +35,12 @@ struct cast6_ctx {
u8 Kr[12][4];
};
-#define F1(D,r,m) ( (I = ((m) + (D))), (I=rol32(I,(r))), \
- (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]) )
-#define F2(D,r,m) ( (I = ((m) ^ (D))), (I=rol32(I,(r))), \
- (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]) )
-#define F3(D,r,m) ( (I = ((m) - (D))), (I=rol32(I,(r))), \
- (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) )
+#define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \
+ (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
+#define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
+ (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
+#define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \
+ (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
static const u32 s1[256] = {
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
@@ -312,7 +312,7 @@ static const u32 s4[256] = {
static const u32 Tm[24][8] = {
{ 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d,
- 0x84c413be, 0xf39dff5f, 0x6277eb00 } ,
+ 0x84c413be, 0xf39dff5f, 0x6277eb00 } ,
{ 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525,
0xfb9370c6, 0x6a6d5c67, 0xd9474808 } ,
{ 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d,
@@ -369,7 +369,8 @@ static const u8 Tr[4][8] = {
};
/* forward octave */
-static void W(u32 *key, unsigned int i) {
+static void W(u32 *key, unsigned int i)
+{
u32 I;
key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
@@ -377,7 +378,7 @@ static void W(u32 *key, unsigned int i) {
key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]);
key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]);
key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]);
- key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]);
+ key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]);
key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
}
@@ -393,11 +394,11 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
if (key_len % 4 != 0) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
- }
+ }
+
+ memset(p_key, 0, 32);
+ memcpy(p_key, in_key, key_len);
- memset (p_key, 0, 32);
- memcpy (p_key, in_key, key_len);
-
key[0] = be32_to_cpu(p_key[0]); /* A */
key[1] = be32_to_cpu(p_key[1]); /* B */
key[2] = be32_to_cpu(p_key[2]); /* C */
@@ -406,18 +407,16 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
key[5] = be32_to_cpu(p_key[5]); /* F */
key[6] = be32_to_cpu(p_key[6]); /* G */
key[7] = be32_to_cpu(p_key[7]); /* H */
-
-
for (i = 0; i < 12; i++) {
- W (key, 2 * i);
- W (key, 2 * i + 1);
-
+ W(key, 2 * i);
+ W(key, 2 * i + 1);
+
c->Kr[i][0] = key[0] & 0x1f;
c->Kr[i][1] = key[2] & 0x1f;
c->Kr[i][2] = key[4] & 0x1f;
c->Kr[i][3] = key[6] & 0x1f;
-
+
c->Km[i][0] = key[7];
c->Km[i][1] = key[5];
c->Km[i][2] = key[3];
@@ -428,21 +427,23 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
}
/*forward quad round*/
-static void Q (u32 * block, u8 * Kr, u32 * Km) {
+static void Q(u32 *block, u8 *Kr, u32 *Km)
+{
u32 I;
block[2] ^= F1(block[3], Kr[0], Km[0]);
block[1] ^= F2(block[2], Kr[1], Km[1]);
block[0] ^= F3(block[1], Kr[2], Km[2]);
- block[3] ^= F1(block[0], Kr[3], Km[3]);
+ block[3] ^= F1(block[0], Kr[3], Km[3]);
}
/*reverse quad round*/
-static void QBAR (u32 * block, u8 * Kr, u32 * Km) {
+static void QBAR(u32 *block, u8 *Kr, u32 *Km)
+{
u32 I;
- block[3] ^= F1(block[0], Kr[3], Km[3]);
- block[0] ^= F3(block[1], Kr[2], Km[2]);
- block[1] ^= F2(block[2], Kr[1], Km[1]);
- block[2] ^= F1(block[3], Kr[0], Km[0]);
+ block[3] ^= F1(block[0], Kr[3], Km[3]);
+ block[0] ^= F3(block[1], Kr[2], Km[2]);
+ block[1] ^= F2(block[2], Kr[1], Km[1]);
+ block[2] ^= F1(block[3], Kr[0], Km[0]);
}
static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
@@ -451,64 +452,65 @@ static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
- u32 * Km;
- u8 * Kr;
+ u32 *Km;
+ u8 *Kr;
block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]);
block[2] = be32_to_cpu(src[2]);
block[3] = be32_to_cpu(src[3]);
- Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km);
- Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km);
- Km = c->Km[2]; Kr = c->Kr[2]; Q (block, Kr, Km);
- Km = c->Km[3]; Kr = c->Kr[3]; Q (block, Kr, Km);
- Km = c->Km[4]; Kr = c->Kr[4]; Q (block, Kr, Km);
- Km = c->Km[5]; Kr = c->Kr[5]; Q (block, Kr, Km);
- Km = c->Km[6]; Kr = c->Kr[6]; QBAR (block, Kr, Km);
- Km = c->Km[7]; Kr = c->Kr[7]; QBAR (block, Kr, Km);
- Km = c->Km[8]; Kr = c->Kr[8]; QBAR (block, Kr, Km);
- Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km);
- Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km);
- Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km);
+ Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km);
+ Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km);
+ Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km);
+ Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km);
+ Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km);
+ Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km);
+ Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km);
+ Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km);
+ Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km);
+ Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km);
+ Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km);
+ Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km);
dst[0] = cpu_to_be32(block[0]);
dst[1] = cpu_to_be32(block[1]);
dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]);
-}
+}
-static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) {
- struct cast6_ctx * c = crypto_tfm_ctx(tfm);
+static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
+{
+ struct cast6_ctx *c = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
- u32 * Km;
- u8 * Kr;
+ u32 *Km;
+ u8 *Kr;
block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]);
block[2] = be32_to_cpu(src[2]);
block[3] = be32_to_cpu(src[3]);
- Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km);
- Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km);
- Km = c->Km[9]; Kr = c->Kr[9]; Q (block, Kr, Km);
- Km = c->Km[8]; Kr = c->Kr[8]; Q (block, Kr, Km);
- Km = c->Km[7]; Kr = c->Kr[7]; Q (block, Kr, Km);
- Km = c->Km[6]; Kr = c->Kr[6]; Q (block, Kr, Km);
- Km = c->Km[5]; Kr = c->Kr[5]; QBAR (block, Kr, Km);
- Km = c->Km[4]; Kr = c->Kr[4]; QBAR (block, Kr, Km);
- Km = c->Km[3]; Kr = c->Kr[3]; QBAR (block, Kr, Km);
- Km = c->Km[2]; Kr = c->Kr[2]; QBAR (block, Kr, Km);
- Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km);
- Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km);
-
+ Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km);
+ Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km);
+ Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km);
+ Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km);
+ Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km);
+ Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km);
+ Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km);
+ Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km);
+ Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km);
+ Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km);
+ Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km);
+ Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km);
+
dst[0] = cpu_to_be32(block[0]);
dst[1] = cpu_to_be32(block[1]);
dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]);
-}
+}
static struct crypto_alg alg = {
.cra_name = "cast6",
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 9a1a7316eeac..39541e0e537d 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -8,7 +8,7 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
diff --git a/crypto/compress.c b/crypto/compress.c
index 1ee357085d3a..c33f0763a956 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -7,7 +7,7 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
@@ -39,7 +39,7 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm)
ops->cot_compress = crypto_compress;
ops->cot_decompress = crypto_decompress;
-
+
return 0;
}
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index 973bc2cfab2e..de9e55c29794 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -1,4 +1,4 @@
-/*
+/*
* Cryptographic API.
*
* CRC32C chksum
@@ -30,7 +30,7 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
@@ -142,7 +142,7 @@ static u32 crc32c(u32 crc, const u8 *data, unsigned int length)
}
/*
- * Steps through buffer one byte at at time, calculates reflected
+ * Steps through buffer one byte at at time, calculates reflected
* crc using table.
*/
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index ae5fa99d5d36..e46d21ae26bc 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -3,6 +3,13 @@
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
+ * Added AEAD support to cryptd.
+ * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
+ * Adrian Hoban <adrian.hoban@intel.com>
+ * Gabriele Paoloni <gabriele.paoloni@intel.com>
+ * Aidan O'Mahony (aidan.o.mahony@intel.com)
+ * Copyright (c) 2010, Intel Corporation.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
@@ -12,6 +19,7 @@
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
#include <crypto/cryptd.h>
#include <crypto/crypto_wq.h>
#include <linux/err.h>
@@ -31,7 +39,7 @@ struct cryptd_cpu_queue {
};
struct cryptd_queue {
- struct cryptd_cpu_queue *cpu_queue;
+ struct cryptd_cpu_queue __percpu *cpu_queue;
};
struct cryptd_instance_ctx {
@@ -39,6 +47,16 @@ struct cryptd_instance_ctx {
struct cryptd_queue *queue;
};
+struct hashd_instance_ctx {
+ struct crypto_shash_spawn spawn;
+ struct cryptd_queue *queue;
+};
+
+struct aead_instance_ctx {
+ struct crypto_aead_spawn aead_spawn;
+ struct cryptd_queue *queue;
+};
+
struct cryptd_blkcipher_ctx {
struct crypto_blkcipher *child;
};
@@ -48,11 +66,20 @@ struct cryptd_blkcipher_request_ctx {
};
struct cryptd_hash_ctx {
- struct crypto_hash *child;
+ struct crypto_shash *child;
};
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
+ struct shash_desc desc;
+};
+
+struct cryptd_aead_ctx {
+ struct crypto_aead *child;
+};
+
+struct cryptd_aead_request_ctx {
+ crypto_completion_t complete;
};
static void cryptd_queue_worker(struct work_struct *work);
@@ -93,7 +120,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct cryptd_cpu_queue *cpu_queue;
cpu = get_cpu();
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
put_cpu();
@@ -249,32 +276,24 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
crypto_free_blkcipher(ctx->child);
}
-static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
- struct cryptd_queue *queue)
+static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
+ unsigned int tail)
{
+ char *p;
struct crypto_instance *inst;
- struct cryptd_instance_ctx *ctx;
int err;
- inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- if (!inst) {
- inst = ERR_PTR(-ENOMEM);
- goto out;
- }
+ p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ inst = (void *)(p + head);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
- ctx = crypto_instance_ctx(inst);
- err = crypto_init_spawn(&ctx->spawn, alg, inst,
- CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
- if (err)
- goto out_free_inst;
-
- ctx->queue = queue;
-
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50;
@@ -282,29 +301,41 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
- return inst;
+ return p;
out_free_inst:
- kfree(inst);
- inst = ERR_PTR(err);
+ kfree(p);
+ p = ERR_PTR(err);
goto out;
}
-static struct crypto_instance *cryptd_alloc_blkcipher(
- struct rtattr **tb, struct cryptd_queue *queue)
+static int cryptd_create_blkcipher(struct crypto_template *tmpl,
+ struct rtattr **tb,
+ struct cryptd_queue *queue)
{
+ struct cryptd_instance_ctx *ctx;
struct crypto_instance *inst;
struct crypto_alg *alg;
+ int err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ return PTR_ERR(alg);
- inst = cryptd_alloc_instance(alg, queue);
+ inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
+ ctx = crypto_instance_ctx(inst);
+ ctx->queue = queue;
+
+ err = crypto_init_spawn(&ctx->spawn, alg, inst,
+ CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+ if (err)
+ goto out_free_inst;
+
inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ablkcipher_type;
@@ -323,26 +354,34 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_spawn(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
+
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_spawn *spawn = &ictx->spawn;
+ struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_shash_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_hash *cipher;
+ struct crypto_shash *hash;
- cipher = crypto_spawn_hash(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
+ hash = crypto_spawn_shash(spawn);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
- ctx->child = cipher;
- tfm->crt_ahash.reqsize =
- sizeof(struct cryptd_hash_request_ctx);
+ ctx->child = hash;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct cryptd_hash_request_ctx) +
+ crypto_shash_descsize(hash));
return 0;
}
@@ -350,22 +389,22 @@ static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_hash(ctx->child);
+ crypto_free_shash(ctx->child);
}
static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_hash *child = ctx->child;
+ struct crypto_shash *child = ctx->child;
int err;
- crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_hash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_shash_setkey(child, key, keylen);
+ crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
@@ -385,21 +424,19 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
-
- rctx = ahash_request_ctx(req);
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_crt(child)->init(&desc);
+ err = crypto_shash_init(desc);
req->base.complete = rctx->complete;
@@ -416,23 +453,15 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req)
static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
+ struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_crt(child)->update(&desc,
- req->src,
- req->nbytes);
+ err = shash_ahash_update(req, &rctx->desc);
req->base.complete = rctx->complete;
@@ -449,21 +478,13 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req)
static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
-
- rctx = ahash_request_ctx(req);
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_crt(child)->final(&desc, req->result);
+ err = crypto_shash_final(&rctx->desc, req->result);
req->base.complete = rctx->complete;
@@ -478,26 +499,44 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
-static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- rctx = ahash_request_ctx(req);
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ err = shash_ahash_finup(req, &rctx->desc);
+
+ req->base.complete = rctx->complete;
+
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int cryptd_hash_finup_enqueue(struct ahash_request *req)
+{
+ return cryptd_hash_enqueue(req, cryptd_hash_finup);
+}
+
+static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+{
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_crt(child)->digest(&desc,
- req->src,
- req->nbytes,
- req->result);
+ err = shash_ahash_digest(req, desc);
req->base.complete = rctx->complete;
@@ -512,72 +551,261 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_digest);
}
-static struct crypto_instance *cryptd_alloc_hash(
- struct rtattr **tb, struct cryptd_queue *queue)
+static int cryptd_hash_export(struct ahash_request *req, void *out)
{
- struct crypto_instance *inst;
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_export(&rctx->desc, out);
+}
+
+static int cryptd_hash_import(struct ahash_request *req, const void *in)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_import(&rctx->desc, in);
+}
+
+static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ struct cryptd_queue *queue)
+{
+ struct hashd_instance_ctx *ctx;
+ struct ahash_instance *inst;
+ struct shash_alg *salg;
struct crypto_alg *alg;
+ int err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
- if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ salg = shash_attr_alg(tb[1], 0, 0);
+ if (IS_ERR(salg))
+ return PTR_ERR(salg);
- inst = cryptd_alloc_instance(alg, queue);
+ alg = &salg->base;
+ inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
+ sizeof(*ctx));
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
- inst->alg.cra_type = &crypto_ahash_type;
+ ctx = ahash_instance_ctx(inst);
+ ctx->queue = queue;
+
+ err = crypto_init_shash_spawn(&ctx->spawn, salg,
+ ahash_crypto_instance(inst));
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+
+ inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+
+ inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
+ inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
+
+ inst->alg.init = cryptd_hash_init_enqueue;
+ inst->alg.update = cryptd_hash_update_enqueue;
+ inst->alg.final = cryptd_hash_final_enqueue;
+ inst->alg.finup = cryptd_hash_finup_enqueue;
+ inst->alg.export = cryptd_hash_export;
+ inst->alg.import = cryptd_hash_import;
+ inst->alg.setkey = cryptd_hash_setkey;
+ inst->alg.digest = cryptd_hash_digest_enqueue;
+
+ err = ahash_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_shash(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return err;
+}
+
+static void cryptd_aead_crypt(struct aead_request *req,
+ struct crypto_aead *child,
+ int err,
+ int (*crypt)(struct aead_request *req))
+{
+ struct cryptd_aead_request_ctx *rctx;
+ rctx = aead_request_ctx(req);
+
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+ aead_request_set_tfm(req, child);
+ err = crypt( req );
+ req->base.complete = rctx->complete;
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
+{
+ struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
+ struct crypto_aead *child = ctx->child;
+ struct aead_request *req;
+
+ req = container_of(areq, struct aead_request, base);
+ cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
+}
+
+static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
+{
+ struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
+ struct crypto_aead *child = ctx->child;
+ struct aead_request *req;
+
+ req = container_of(areq, struct aead_request, base);
+ cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
+}
+
+static int cryptd_aead_enqueue(struct aead_request *req,
+ crypto_completion_t complete)
+{
+ struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
+
+ rctx->complete = req->base.complete;
+ req->base.complete = complete;
+ return cryptd_enqueue_request(queue, &req->base);
+}
+
+static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
+{
+ return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
+}
+
+static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
+{
+ return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
+}
+
+static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
+ struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aead *cipher;
+
+ cipher = crypto_spawn_aead(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
+ ctx->child = cipher;
+ tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
+ return 0;
+}
+
+static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ crypto_free_aead(ctx->child);
+}
+
+static int cryptd_create_aead(struct crypto_template *tmpl,
+ struct rtattr **tb,
+ struct cryptd_queue *queue)
+{
+ struct aead_instance_ctx *ctx;
+ struct crypto_instance *inst;
+ struct crypto_alg *alg;
+ int err;
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
+ CRYPTO_ALG_TYPE_MASK);
+ if (IS_ERR(alg))
+ return PTR_ERR(alg);
- inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
- inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+ inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
+ err = PTR_ERR(inst);
+ if (IS_ERR(inst))
+ goto out_put_alg;
- inst->alg.cra_init = cryptd_hash_init_tfm;
- inst->alg.cra_exit = cryptd_hash_exit_tfm;
+ ctx = crypto_instance_ctx(inst);
+ ctx->queue = queue;
- inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
- inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
- inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
- inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
- inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
+ err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
+ CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+ if (err)
+ goto out_free_inst;
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ inst->alg.cra_type = alg->cra_type;
+ inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
+ inst->alg.cra_init = cryptd_aead_init_tfm;
+ inst->alg.cra_exit = cryptd_aead_exit_tfm;
+ inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
+ inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
+ inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
+ inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
+ inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
+ inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
+ inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
+ inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
+ inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
+
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_spawn(&ctx->aead_spawn.base);
+out_free_inst:
+ kfree(inst);
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static struct cryptd_queue queue;
-static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
+static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
+ return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
- return cryptd_alloc_blkcipher(tb, &queue);
+ return cryptd_create_blkcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
- return cryptd_alloc_hash(tb, &queue);
+ return cryptd_create_hash(tmpl, tb, &queue);
+ case CRYPTO_ALG_TYPE_AEAD:
+ return cryptd_create_aead(tmpl, tb, &queue);
}
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
static void cryptd_free(struct crypto_instance *inst)
{
struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
+ struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
+ struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
- crypto_drop_spawn(&ctx->spawn);
- kfree(inst);
+ switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_drop_shash(&hctx->spawn);
+ kfree(ahash_instance(inst));
+ return;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_drop_spawn(&aead_ctx->aead_spawn.base);
+ kfree(inst);
+ return;
+ default:
+ crypto_drop_spawn(&ctx->spawn);
+ kfree(inst);
+ }
}
static struct crypto_template cryptd_tmpl = {
.name = "cryptd",
- .alloc = cryptd_alloc,
+ .create = cryptd_create,
.free = cryptd_free,
.module = THIS_MODULE,
};
@@ -620,6 +848,82 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
+struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_ahash *tfm;
+
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+ if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_ahash(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return __cryptd_ahash_cast(tfm);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
+
+struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
+{
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
+
+ return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_ahash_child);
+
+struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ return &rctx->desc;
+}
+EXPORT_SYMBOL_GPL(cryptd_shash_desc);
+
+void cryptd_free_ahash(struct cryptd_ahash *tfm)
+{
+ crypto_free_ahash(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_ahash);
+
+struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_aead *tfm;
+
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+ if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_aead(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+ return __cryptd_aead_cast(tfm);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
+
+struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
+{
+ struct cryptd_aead_ctx *ctx;
+ ctx = crypto_aead_ctx(&tfm->base);
+ return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_aead_child);
+
+void cryptd_free_aead(struct cryptd_aead *tfm)
+{
+ crypto_free_aead(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_aead);
+
static int __init cryptd_init(void)
{
int err;
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index cb71c9122bc0..07a8a96d46fc 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -1,11 +1,11 @@
-/*
+/*
* Cryptographic API.
*
* Null algorithms, aka Much Ado About Nothing.
*
* These are needed for IPsec, and may be useful in general for
* testing & debugging.
- *
+ *
* The null cipher is compliant with RFC2410.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
@@ -163,7 +163,7 @@ MODULE_ALIAS("cipher_null");
static int __init crypto_null_mod_init(void)
{
int ret = 0;
-
+
ret = crypto_register_alg(&cipher_null);
if (ret < 0)
goto out;
@@ -180,7 +180,7 @@ static int __init crypto_null_mod_init(void)
if (ret < 0)
goto out_unregister_digest;
-out:
+out:
return ret;
out_unregister_digest:
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 2d7425f0e7b8..4ca7222cfeb6 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -185,7 +185,7 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
/* Block size must be >= 4 bytes. */
err = -EINVAL;
@@ -219,6 +219,8 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
+ inst->alg.cra_blkcipher.geniv = "chainiv";
+
out:
crypto_mod_put(alg);
return inst;
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 9128da44e953..463dc859aa05 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -1,14 +1,14 @@
-/*
+/*
* Cryptographic API.
*
* Deflate algorithm (RFC 1951), implemented here primarily for use
* by IPCOMP (RFC 3173 & RFC 2394).
*
* Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* FIXME: deflate transforms will require up to a total of about 436k of kernel
@@ -49,7 +49,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx)
struct z_stream_s *stream = &ctx->comp_stream;
stream->workspace = vmalloc(zlib_deflate_workspacesize());
- if (!stream->workspace ) {
+ if (!stream->workspace) {
ret = -ENOMEM;
goto out;
}
@@ -61,7 +61,7 @@ static int deflate_comp_init(struct deflate_ctx *ctx)
ret = -EINVAL;
goto out_free;
}
-out:
+out:
return ret;
out_free:
vfree(stream->workspace);
@@ -74,7 +74,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
struct z_stream_s *stream = &ctx->decomp_stream;
stream->workspace = kzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
- if (!stream->workspace ) {
+ if (!stream->workspace) {
ret = -ENOMEM;
goto out;
}
@@ -106,7 +106,7 @@ static int deflate_init(struct crypto_tfm *tfm)
{
struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
int ret;
-
+
ret = deflate_comp_init(ctx);
if (ret)
goto out;
@@ -153,11 +153,11 @@ static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
out:
return ret;
}
-
+
static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
-
+
int ret = 0;
struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
struct z_stream_s *stream = &dctx->decomp_stream;
@@ -182,7 +182,7 @@ static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
u8 zerostuff = 0;
stream->next_in = &zerostuff;
- stream->avail_in = 1;
+ stream->avail_in = 1;
ret = zlib_inflate(stream, Z_FINISH);
}
if (ret != Z_STREAM_END) {
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 5bd3ee345a64..873818d48e86 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -614,7 +614,7 @@ static const u32 S8[64] = {
#define T3(x) pt[2 * (x) + 2]
#define T4(x) pt[2 * (x) + 3]
-#define PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
+#define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
/*
* Encryption key expansion
@@ -639,22 +639,22 @@ unsigned long des_ekey(u32 *pe, const u8 *k)
b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
- pe[15 * 2 + 0] = PC2(a, b, c, d); d = rs[d];
- pe[14 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[13 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[12 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[11 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[10 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 9 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 8 * 2 + 0] = PC2(d, a, b, c); c = rs[c];
- pe[ 7 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 6 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 5 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 4 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 3 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 2 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 1 * 2 + 0] = PC2(c, d, a, b); b = rs[b];
- pe[ 0 * 2 + 0] = PC2(b, c, d, a);
+ pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a);
/* Check if first half is weak */
w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
@@ -670,22 +670,22 @@ unsigned long des_ekey(u32 *pe, const u8 *k)
/* Check if second half is weak */
w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
- pe[15 * 2 + 1] = PC2(a, b, c, d); d = rs[d];
- pe[14 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[13 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[12 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[11 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[10 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 9 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 8 * 2 + 1] = PC2(d, a, b, c); c = rs[c];
- pe[ 7 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 6 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 5 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 4 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 3 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 2 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[ 1 * 2 + 1] = PC2(c, d, a, b); b = rs[b];
- pe[ 0 * 2 + 1] = PC2(b, c, d, a);
+ pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a);
/* Fixup: 2413 5768 -> 1357 2468 */
for (d = 0; d < 16; ++d) {
@@ -722,22 +722,22 @@ static void dkey(u32 *pe, const u8 *k)
b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
- pe[ 0 * 2] = PC2(a, b, c, d); d = rs[d];
- pe[ 1 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 2 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 3 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 4 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 5 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 6 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 7 * 2] = PC2(d, a, b, c); c = rs[c];
- pe[ 8 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 9 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[10 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[11 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[12 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[13 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[14 * 2] = PC2(c, d, a, b); b = rs[b];
- pe[15 * 2] = PC2(b, c, d, a);
+ pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[15 * 2] = DES_PC2(b, c, d, a);
/* Skip to next table set */
pt += 512;
@@ -747,22 +747,22 @@ static void dkey(u32 *pe, const u8 *k)
b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
- pe[ 0 * 2 + 1] = PC2(a, b, c, d); d = rs[d];
- pe[ 1 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 2 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 3 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 4 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 5 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b];
- pe[ 6 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d];
- pe[ 7 * 2 + 1] = PC2(d, a, b, c); c = rs[c];
- pe[ 8 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[ 9 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[10 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[11 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[12 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a];
- pe[13 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c];
- pe[14 * 2 + 1] = PC2(c, d, a, b); b = rs[b];
- pe[15 * 2 + 1] = PC2(b, c, d, a);
+ pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[15 * 2 + 1] = DES_PC2(b, c, d, a);
/* Fixup: 2413 5768 -> 1357 2468 */
for (d = 0; d < 16; ++d) {
@@ -869,8 +869,7 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_WEAK_KEY))
- {
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
diff --git a/crypto/digest.c b/crypto/digest.c
deleted file mode 100644
index 5d3f1303da98..000000000000
--- a/crypto/digest.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Digest operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/hardirq.h>
-#include <linux/highmem.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-
-#include "internal.h"
-
-static int init(struct hash_desc *desc)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
-
- tfm->__crt_alg->cra_digest.dia_init(tfm);
- return 0;
-}
-
-static int update2(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
- unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
-
- if (!nbytes)
- return 0;
-
- for (;;) {
- struct page *pg = sg_page(sg);
- unsigned int offset = sg->offset;
- unsigned int l = sg->length;
-
- if (unlikely(l > nbytes))
- l = nbytes;
- nbytes -= l;
-
- do {
- unsigned int bytes_from_page = min(l, ((unsigned int)
- (PAGE_SIZE)) -
- offset);
- char *src = crypto_kmap(pg, 0);
- char *p = src + offset;
-
- if (unlikely(offset & alignmask)) {
- unsigned int bytes =
- alignmask + 1 - (offset & alignmask);
- bytes = min(bytes, bytes_from_page);
- tfm->__crt_alg->cra_digest.dia_update(tfm, p,
- bytes);
- p += bytes;
- bytes_from_page -= bytes;
- l -= bytes;
- }
- tfm->__crt_alg->cra_digest.dia_update(tfm, p,
- bytes_from_page);
- crypto_kunmap(src, 0);
- crypto_yield(desc->flags);
- offset = 0;
- pg++;
- l -= bytes_from_page;
- } while (l > 0);
-
- if (!nbytes)
- break;
- sg = scatterwalk_sg_next(sg);
- }
-
- return 0;
-}
-
-static int update(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
- return update2(desc, sg, nbytes);
-}
-
-static int final(struct hash_desc *desc, u8 *out)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
- struct digest_alg *digest = &tfm->__crt_alg->cra_digest;
-
- if (unlikely((unsigned long)out & alignmask)) {
- unsigned long align = alignmask + 1;
- unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
- u8 *dst = (u8 *)ALIGN(addr, align) +
- ALIGN(tfm->__crt_alg->cra_ctxsize, align);
-
- digest->dia_final(tfm, dst);
- memcpy(out, dst, digest->dia_digestsize);
- } else
- digest->dia_final(tfm, out);
-
- return 0;
-}
-
-static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen)
-{
- crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
- return -ENOSYS;
-}
-
-static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(hash);
-
- crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK);
- return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen);
-}
-
-static int digest(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes, u8 *out)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
-
- init(desc);
- update2(desc, sg, nbytes);
- return final(desc, out);
-}
-
-int crypto_init_digest_ops(struct crypto_tfm *tfm)
-{
- struct hash_tfm *ops = &tfm->crt_hash;
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- if (dalg->dia_digestsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- ops->init = init;
- ops->update = update;
- ops->final = final;
- ops->digest = digest;
- ops->setkey = dalg->dia_setkey ? setkey : nosetkey;
- ops->digestsize = dalg->dia_digestsize;
-
- return 0;
-}
-
-void crypto_exit_digest_ops(struct crypto_tfm *tfm)
-{
-}
-
-static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
- unsigned int keylen)
-{
- crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
- return -ENOSYS;
-}
-
-static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
- return dalg->dia_setkey(tfm, key, keylen);
-}
-
-static int digest_async_init(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- dalg->dia_init(tfm);
- return 0;
-}
-
-static int digest_async_update(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- update(&desc, req->src, req->nbytes);
- return 0;
-}
-
-static int digest_async_final(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- final(&desc, req->result);
- return 0;
-}
-
-static int digest_async_digest(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return digest(&desc, req->src, req->nbytes, req->result);
-}
-
-int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
-{
- struct ahash_tfm *crt = &tfm->crt_ahash;
- struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
-
- if (dalg->dia_digestsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- crt->init = digest_async_init;
- crt->update = digest_async_update;
- crt->final = digest_async_final;
- crt->digest = digest_async_digest;
- crt->setkey = dalg->dia_setkey ? digest_async_setkey :
- digest_async_nosetkey;
- crt->digestsize = dalg->dia_digestsize;
-
- return 0;
-}
diff --git a/crypto/ecb.c b/crypto/ecb.c
index a46838e98a71..935cfef4aa84 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -55,7 +55,7 @@ static int crypto_ecb_crypt(struct blkcipher_desc *desc,
do {
fn(crypto_cipher_tfm(tfm), wdst, wsrc);
-
+
wsrc += bsize;
wdst += bsize;
} while ((nbytes -= bsize) >= bsize);
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index b82d61f4e26c..c33107e340b6 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -60,13 +60,13 @@ do { \
u32 t = lo & ((1 << n) - 1); \
lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \
hi = (hi >> n) | (t << (24-n)); \
-} while(0)
+} while (0)
/* Rotate right one 64 bit number as a 56 bit number */
#define ror56_64(k, n) \
do { \
k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \
-} while(0)
+} while (0)
/*
* Sboxes for Feistel network derived from
@@ -228,7 +228,7 @@ do { \
union lc4 { __be32 l; u8 c[4]; } u; \
u.l = sched ^ R; \
L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \
-} while(0)
+} while (0)
/*
* encryptor
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e70afd0c73dd..2f5fbba6576c 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -11,7 +11,10 @@
#include <crypto/gf128mul.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
+#include <crypto/hash.h>
+#include "internal.h"
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -21,11 +24,12 @@
struct gcm_instance_ctx {
struct crypto_skcipher_spawn ctr;
+ struct crypto_ahash_spawn ghash;
};
struct crypto_gcm_ctx {
struct crypto_ablkcipher *ctr;
- struct gf128mul_4k *gf128;
+ struct crypto_ahash *ghash;
};
struct crypto_rfc4106_ctx {
@@ -33,11 +37,23 @@ struct crypto_rfc4106_ctx {
u8 nonce[4];
};
+struct crypto_rfc4543_ctx {
+ struct crypto_aead *child;
+ u8 nonce[4];
+};
+
+struct crypto_rfc4543_req_ctx {
+ u8 auth_tag[16];
+ struct scatterlist cipher[1];
+ struct scatterlist payload[2];
+ struct scatterlist assoc[2];
+ struct aead_request subreq;
+};
+
struct crypto_gcm_ghash_ctx {
- u32 bytes;
- u32 flags;
- struct gf128mul_4k *gf128;
- u8 buffer[16];
+ unsigned int cryptlen;
+ struct scatterlist *src;
+ void (*complete)(struct aead_request *req, int err);
};
struct crypto_gcm_req_priv_ctx {
@@ -45,8 +61,11 @@ struct crypto_gcm_req_priv_ctx {
u8 iauth_tag[16];
struct scatterlist src[2];
struct scatterlist dst[2];
- struct crypto_gcm_ghash_ctx ghash;
- struct ablkcipher_request abreq;
+ struct crypto_gcm_ghash_ctx ghash_ctx;
+ union {
+ struct ahash_request ahreq;
+ struct ablkcipher_request abreq;
+ } u;
};
struct crypto_gcm_setkey_result {
@@ -54,6 +73,8 @@ struct crypto_gcm_setkey_result {
struct completion completion;
};
+static void *gcm_zeroes;
+
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
{
@@ -62,113 +83,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
-static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags,
- struct gf128mul_4k *gf128)
-{
- ctx->bytes = 0;
- ctx->flags = flags;
- ctx->gf128 = gf128;
- memset(ctx->buffer, 0, 16);
-}
-
-static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx,
- const u8 *src, unsigned int srclen)
-{
- u8 *dst = ctx->buffer;
-
- if (ctx->bytes) {
- int n = min(srclen, ctx->bytes);
- u8 *pos = dst + (16 - ctx->bytes);
-
- ctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos++ ^= *src++;
-
- if (!ctx->bytes)
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- while (srclen >= 16) {
- crypto_xor(dst, src, 16);
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- src += 16;
- srclen -= 16;
- }
-
- if (srclen) {
- ctx->bytes = 16 - srclen;
- while (srclen--)
- *dst++ ^= *src++;
- }
-}
-
-static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx,
- struct scatterlist *sg, int len)
-{
- struct scatter_walk walk;
- u8 *src;
- int n;
-
- if (!len)
- return;
-
- scatterwalk_start(&walk, sg);
-
- while (len) {
- n = scatterwalk_clamp(&walk, len);
-
- if (!n) {
- scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
- n = scatterwalk_clamp(&walk, len);
- }
-
- src = scatterwalk_map(&walk, 0);
-
- crypto_gcm_ghash_update(ctx, src, n);
- len -= n;
-
- scatterwalk_unmap(src, 0);
- scatterwalk_advance(&walk, n);
- scatterwalk_done(&walk, 0, len);
- if (len)
- crypto_yield(ctx->flags);
- }
-}
-
-static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx)
-{
- u8 *dst = ctx->buffer;
-
- if (ctx->bytes) {
- u8 *tmp = dst + (16 - ctx->bytes);
-
- while (ctx->bytes--)
- *tmp++ ^= 0;
-
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- ctx->bytes = 0;
-}
-
-static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx,
- unsigned int authlen,
- unsigned int cryptlen, u8 *dst)
-{
- u8 *buf = ctx->buffer;
- u128 lengths;
-
- lengths.a = cpu_to_be64(authlen * 8);
- lengths.b = cpu_to_be64(cryptlen * 8);
-
- crypto_gcm_ghash_flush(ctx);
- crypto_xor(buf, (u8 *)&lengths, 16);
- gf128mul_4k_lle((be128 *)buf, ctx->gf128);
- crypto_xor(dst, buf, 16);
-}
-
static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
{
struct crypto_gcm_setkey_result *result = req->data;
@@ -184,6 +98,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ struct crypto_ahash *ghash = ctx->ghash;
struct crypto_ablkcipher *ctr = ctx->ctr;
struct {
be128 hash;
@@ -233,13 +148,12 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (err)
goto out;
- if (ctx->gf128 != NULL)
- gf128mul_free_4k(ctx->gf128);
-
- ctx->gf128 = gf128mul_init_4k_lle(&data->hash);
-
- if (ctx->gf128 == NULL)
- err = -ENOMEM;
+ crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128));
+ crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) &
+ CRYPTO_TFM_RES_MASK);
out:
kfree(data);
@@ -272,8 +186,6 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- u32 flags = req->base.tfm->crt_flags;
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
struct scatterlist *dst;
__be32 counter = cpu_to_be32(1);
@@ -296,35 +208,329 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
cryptlen + sizeof(pctx->auth_tag),
req->iv);
+}
- crypto_gcm_ghash_init(ghash, flags, ctx->gf128);
+static inline unsigned int gcm_remain(unsigned int len)
+{
+ len &= 0xfU;
+ return len ? 16 - len : 0;
+}
+
+static void gcm_hash_len_done(struct crypto_async_request *areq, int err);
+static void gcm_hash_final_done(struct crypto_async_request *areq, int err);
+
+static int gcm_hash_update(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx,
+ crypto_completion_t complete,
+ struct scatterlist *src,
+ unsigned int len)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
- crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen);
- crypto_gcm_ghash_flush(ghash);
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ complete, req);
+ ahash_request_set_crypt(ahreq, src, NULL, len);
+
+ return crypto_ahash_update(ahreq);
}
-static int crypto_gcm_hash(struct aead_request *req)
+static int gcm_hash_remain(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx,
+ unsigned int remain,
+ crypto_completion_t complete)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ complete, req);
+ sg_init_one(pctx->src, gcm_zeroes, remain);
+ ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
+
+ return crypto_ahash_update(ahreq);
+}
+
+static int gcm_hash_len(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ u128 lengths;
+
+ lengths.a = cpu_to_be64(req->assoclen * 8);
+ lengths.b = cpu_to_be64(gctx->cryptlen * 8);
+ memcpy(pctx->iauth_tag, &lengths, 16);
+ sg_init_one(pctx->src, pctx->iauth_tag, 16);
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ gcm_hash_len_done, req);
+ ahash_request_set_crypt(ahreq, pctx->src,
+ NULL, sizeof(lengths));
+
+ return crypto_ahash_update(ahreq);
+}
+
+static int gcm_hash_final(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ gcm_hash_final_done, req);
+ ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0);
+
+ return crypto_ahash_final(ahreq);
+}
+
+static void __gcm_hash_final_done(struct aead_request *req, int err)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- u8 *auth_tag = pctx->auth_tag;
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+
+ if (!err)
+ crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
+
+ gctx->complete(req, err);
+}
- crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen);
- crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen,
- auth_tag);
+static void gcm_hash_final_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+
+ __gcm_hash_final_done(req, err);
+}
+
+static void __gcm_hash_len_done(struct aead_request *req, int err)
+{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+ if (!err) {
+ err = gcm_hash_final(req, pctx);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ __gcm_hash_final_done(req, err);
+}
+
+static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+
+ __gcm_hash_len_done(req, err);
+}
+
+static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err)
+{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+ if (!err) {
+ err = gcm_hash_len(req, pctx);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ __gcm_hash_len_done(req, err);
+}
+
+static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+
+ __gcm_hash_crypt_remain_done(req, err);
+}
+
+static void __gcm_hash_crypt_done(struct aead_request *req, int err)
+{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ unsigned int remain;
+
+ if (!err) {
+ remain = gcm_remain(gctx->cryptlen);
+ BUG_ON(!remain);
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_crypt_remain_done);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ __gcm_hash_crypt_remain_done(req, err);
+}
+
+static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+
+ __gcm_hash_crypt_done(req, err);
+}
+
+static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
+{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ crypto_completion_t complete;
+ unsigned int remain = 0;
+
+ if (!err && gctx->cryptlen) {
+ remain = gcm_remain(gctx->cryptlen);
+ complete = remain ? gcm_hash_crypt_done :
+ gcm_hash_crypt_remain_done;
+ err = gcm_hash_update(req, pctx, complete,
+ gctx->src, gctx->cryptlen);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ if (remain)
+ __gcm_hash_crypt_done(req, err);
+ else
+ __gcm_hash_crypt_remain_done(req, err);
+}
+
+static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+
+ __gcm_hash_assoc_remain_done(req, err);
+}
+
+static void __gcm_hash_assoc_done(struct aead_request *req, int err)
+{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ unsigned int remain;
+
+ if (!err) {
+ remain = gcm_remain(req->assoclen);
+ BUG_ON(!remain);
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_assoc_remain_done);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ __gcm_hash_assoc_remain_done(req, err);
+}
+
+static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+
+ __gcm_hash_assoc_done(req, err);
+}
+
+static void __gcm_hash_init_done(struct aead_request *req, int err)
+{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ crypto_completion_t complete;
+ unsigned int remain = 0;
+
+ if (!err && req->assoclen) {
+ remain = gcm_remain(req->assoclen);
+ complete = remain ? gcm_hash_assoc_done :
+ gcm_hash_assoc_remain_done;
+ err = gcm_hash_update(req, pctx, complete,
+ req->assoc, req->assoclen);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ if (remain)
+ __gcm_hash_assoc_done(req, err);
+ else
+ __gcm_hash_assoc_remain_done(req, err);
+}
+
+static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+
+ __gcm_hash_init_done(req, err);
+}
+
+static int gcm_hash(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int remain;
+ crypto_completion_t complete;
+ int err;
+
+ ahash_request_set_tfm(ahreq, ctx->ghash);
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ gcm_hash_init_done, req);
+ err = crypto_ahash_init(ahreq);
+ if (err)
+ return err;
+ remain = gcm_remain(req->assoclen);
+ complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
+ err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen);
+ if (err)
+ return err;
+ if (remain) {
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_assoc_remain_done);
+ if (err)
+ return err;
+ }
+ remain = gcm_remain(gctx->cryptlen);
+ complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
+ err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen);
+ if (err)
+ return err;
+ if (remain) {
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_crypt_remain_done);
+ if (err)
+ return err;
+ }
+ err = gcm_hash_len(req, pctx);
+ if (err)
+ return err;
+ err = gcm_hash_final(req, pctx);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void gcm_enc_copy_hash(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ u8 *auth_tag = pctx->auth_tag;
scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
crypto_aead_authsize(aead), 1);
- return 0;
}
-static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_enc_hash_done(struct aead_request *req, int err)
{
- struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err)
- err = crypto_gcm_hash(req);
+ gcm_enc_copy_hash(req, pctx);
+
+ aead_request_complete(req, err);
+}
+
+static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+ if (!err) {
+ err = gcm_hash(req, pctx);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ else if (!err) {
+ crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
+ gcm_enc_copy_hash(req, pctx);
+ }
+ }
aead_request_complete(req, err);
}
@@ -332,43 +538,73 @@ static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err)
static int crypto_gcm_encrypt(struct aead_request *req)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->abreq;
+ struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
int err;
crypto_gcm_init_crypt(abreq, req, req->cryptlen);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- crypto_gcm_encrypt_done, req);
+ gcm_encrypt_done, req);
+
+ gctx->src = req->dst;
+ gctx->cryptlen = req->cryptlen;
+ gctx->complete = gcm_enc_hash_done;
err = crypto_ablkcipher_encrypt(abreq);
if (err)
return err;
- return crypto_gcm_hash(req);
+ err = gcm_hash(req, pctx);
+ if (err)
+ return err;
+
+ crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
+ gcm_enc_copy_hash(req, pctx);
+
+ return 0;
}
-static int crypto_gcm_verify(struct aead_request *req)
+static int crypto_gcm_verify(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
u8 *auth_tag = pctx->auth_tag;
u8 *iauth_tag = pctx->iauth_tag;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen - authsize;
- crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag);
-
- authsize = crypto_aead_authsize(aead);
+ crypto_xor(auth_tag, iauth_tag, 16);
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
-static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err)
- err = crypto_gcm_verify(req);
+ err = crypto_gcm_verify(req, pctx);
+
+ aead_request_complete(req, err);
+}
+
+static void gcm_dec_hash_done(struct aead_request *req, int err)
+{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+
+ if (!err) {
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ gcm_decrypt_done, req);
+ crypto_gcm_init_crypt(abreq, req, gctx->cryptlen);
+ err = crypto_ablkcipher_decrypt(abreq);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ else if (!err)
+ err = crypto_gcm_verify(req, pctx);
+ }
aead_request_complete(req, err);
}
@@ -377,27 +613,32 @@ static int crypto_gcm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->abreq;
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
- unsigned int cryptlen = req->cryptlen;
+ struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int authsize = crypto_aead_authsize(aead);
+ unsigned int cryptlen = req->cryptlen;
int err;
if (cryptlen < authsize)
return -EINVAL;
cryptlen -= authsize;
- crypto_gcm_init_crypt(abreq, req, cryptlen);
- ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- crypto_gcm_decrypt_done, req);
+ gctx->src = req->src;
+ gctx->cryptlen = cryptlen;
+ gctx->complete = gcm_dec_hash_done;
- crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen);
+ err = gcm_hash(req, pctx);
+ if (err)
+ return err;
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ gcm_decrypt_done, req);
+ crypto_gcm_init_crypt(abreq, req, cryptlen);
err = crypto_ablkcipher_decrypt(abreq);
if (err)
return err;
- return crypto_gcm_verify(req);
+ return crypto_gcm_verify(req, pctx);
}
static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
@@ -406,43 +647,56 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ablkcipher *ctr;
+ struct crypto_ahash *ghash;
unsigned long align;
int err;
+ ghash = crypto_spawn_ahash(&ictx->ghash);
+ if (IS_ERR(ghash))
+ return PTR_ERR(ghash);
+
ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
- return err;
+ goto err_free_hash;
ctx->ctr = ctr;
- ctx->gf128 = NULL;
+ ctx->ghash = ghash;
align = crypto_tfm_alg_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
tfm->crt_aead.reqsize = align +
- sizeof(struct crypto_gcm_req_priv_ctx) +
- crypto_ablkcipher_reqsize(ctr);
+ offsetof(struct crypto_gcm_req_priv_ctx, u) +
+ max(sizeof(struct ablkcipher_request) +
+ crypto_ablkcipher_reqsize(ctr),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(ghash));
return 0;
+
+err_free_hash:
+ crypto_free_ahash(ghash);
+ return err;
}
static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->gf128 != NULL)
- gf128mul_free_4k(ctx->gf128);
-
+ crypto_free_ahash(ctx->ghash);
crypto_free_ablkcipher(ctx->ctr);
}
static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
const char *full_name,
- const char *ctr_name)
+ const char *ctr_name,
+ const char *ghash_name)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
struct crypto_alg *ctr;
+ struct crypto_alg *ghash_alg;
+ struct ahash_alg *ghash_ahash_alg;
struct gcm_instance_ctx *ctx;
int err;
@@ -454,17 +708,31 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
+ ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
+ CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ err = PTR_ERR(ghash_alg);
+ if (IS_ERR(ghash_alg))
+ return ERR_PTR(err);
+
+ err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
- return ERR_PTR(-ENOMEM);
+ goto out_put_ghash;
ctx = crypto_instance_ctx(inst);
+ ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base);
+ err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg,
+ inst);
+ if (err)
+ goto err_free_inst;
+
crypto_set_skcipher_spawn(&ctx->ctr, inst);
err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
if (err)
- goto err_free_inst;
+ goto err_drop_ghash;
ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
@@ -479,7 +747,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "gcm_base(%s)", ctr->cra_driver_name) >=
+ "gcm_base(%s,%s)", ctr->cra_driver_name,
+ ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
@@ -502,12 +771,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
inst->alg.cra_aead.decrypt = crypto_gcm_decrypt;
out:
+ crypto_mod_put(ghash_alg);
return inst;
out_put_ctr:
crypto_drop_skcipher(&ctx->ctr);
+err_drop_ghash:
+ crypto_drop_ahash(&ctx->ghash);
err_free_inst:
kfree(inst);
+out_put_ghash:
inst = ERR_PTR(err);
goto out;
}
@@ -532,7 +805,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
- return crypto_gcm_alloc_common(tb, full_name, ctr_name);
+ return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash");
}
static void crypto_gcm_free(struct crypto_instance *inst)
@@ -540,6 +813,7 @@ static void crypto_gcm_free(struct crypto_instance *inst)
struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->ctr);
+ crypto_drop_ahash(&ctx->ghash);
kfree(inst);
}
@@ -554,6 +828,7 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
{
int err;
const char *ctr_name;
+ const char *ghash_name;
char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
@@ -561,11 +836,16 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
if (IS_ERR(ctr_name))
return ERR_PTR(err);
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)",
- ctr_name) >= CRYPTO_MAX_ALG_NAME)
+ ghash_name = crypto_attr_alg_name(tb[2]);
+ err = PTR_ERR(ghash_name);
+ if (IS_ERR(ghash_name))
+ return ERR_PTR(err);
+
+ if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
+ ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
- return crypto_gcm_alloc_common(tb, full_name, ctr_name);
+ return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name);
}
static struct crypto_template crypto_gcm_base_tmpl = {
@@ -780,10 +1060,280 @@ static struct crypto_template crypto_rfc4106_tmpl = {
.module = THIS_MODULE,
};
+static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx(
+ struct aead_request *req)
+{
+ unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
+
+ return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
+}
+
+static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
+ struct crypto_aead *child = ctx->child;
+ int err;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ keylen -= 4;
+ memcpy(ctx->nonce, key + keylen, 4);
+
+ crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(child, key, keylen);
+ crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+
+ return err;
+}
+
+static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize)
+{
+ struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
+
+ if (authsize != 16)
+ return -EINVAL;
+
+ return crypto_aead_setauthsize(ctx->child, authsize);
+}
+
+/* this is the same as crypto_authenc_chain */
+static void crypto_rfc4543_chain(struct scatterlist *head,
+ struct scatterlist *sg, int chain)
+{
+ if (chain) {
+ head->length += sg->length;
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ if (sg)
+ scatterwalk_sg_chain(head, 2, sg);
+ else
+ sg_mark_end(head);
+}
+
+static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
+ int enc)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
+ struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
+ struct aead_request *subreq = &rctx->subreq;
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *cipher = rctx->cipher;
+ struct scatterlist *payload = rctx->payload;
+ struct scatterlist *assoc = rctx->assoc;
+ unsigned int authsize = crypto_aead_authsize(aead);
+ unsigned int assoclen = req->assoclen;
+ struct page *dstp;
+ u8 *vdst;
+ u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
+ crypto_aead_alignmask(ctx->child) + 1);
+
+ memcpy(iv, ctx->nonce, 4);
+ memcpy(iv + 4, req->iv, 8);
+
+ /* construct cipher/plaintext */
+ if (enc)
+ memset(rctx->auth_tag, 0, authsize);
+ else
+ scatterwalk_map_and_copy(rctx->auth_tag, dst,
+ req->cryptlen - authsize,
+ authsize, 0);
+
+ sg_init_one(cipher, rctx->auth_tag, authsize);
+
+ /* construct the aad */
+ dstp = sg_page(dst);
+ vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
+
+ sg_init_table(payload, 2);
+ sg_set_buf(payload, req->iv, 8);
+ crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
+ assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
+
+ sg_init_table(assoc, 2);
+ sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
+ req->assoc->offset);
+ crypto_rfc4543_chain(assoc, payload, 0);
+
+ aead_request_set_tfm(subreq, ctx->child);
+ aead_request_set_callback(subreq, req->base.flags, req->base.complete,
+ req->base.data);
+ aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
+ aead_request_set_assoc(subreq, assoc, assoclen);
+
+ return subreq;
+}
+
+static int crypto_rfc4543_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
+ struct aead_request *subreq;
+ int err;
+
+ subreq = crypto_rfc4543_crypt(req, 1);
+ err = crypto_aead_encrypt(subreq);
+ if (err)
+ return err;
+
+ scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen,
+ crypto_aead_authsize(aead), 1);
+
+ return 0;
+}
+
+static int crypto_rfc4543_decrypt(struct aead_request *req)
+{
+ req = crypto_rfc4543_crypt(req, 0);
+
+ return crypto_aead_decrypt(req);
+}
+
+static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
+ struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aead *aead;
+ unsigned long align;
+
+ aead = crypto_spawn_aead(spawn);
+ if (IS_ERR(aead))
+ return PTR_ERR(aead);
+
+ ctx->child = aead;
+
+ align = crypto_aead_alignmask(aead);
+ align &= ~(crypto_tfm_ctx_alignment() - 1);
+ tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) +
+ ALIGN(crypto_aead_reqsize(aead),
+ crypto_tfm_ctx_alignment()) +
+ align + 16;
+
+ return 0;
+}
+
+static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_aead(ctx->child);
+}
+
+static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ struct crypto_instance *inst;
+ struct crypto_aead_spawn *spawn;
+ struct crypto_alg *alg;
+ const char *ccm_name;
+ int err;
+
+ algt = crypto_get_attr_type(tb);
+ err = PTR_ERR(algt);
+ if (IS_ERR(algt))
+ return ERR_PTR(err);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
+ return ERR_PTR(-EINVAL);
+
+ ccm_name = crypto_attr_alg_name(tb[1]);
+ err = PTR_ERR(ccm_name);
+ if (IS_ERR(ccm_name))
+ return ERR_PTR(err);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return ERR_PTR(-ENOMEM);
+
+ spawn = crypto_instance_ctx(inst);
+ crypto_set_aead_spawn(spawn, inst);
+ err = crypto_grab_aead(spawn, ccm_name, 0,
+ crypto_requires_sync(algt->type, algt->mask));
+ if (err)
+ goto out_free_inst;
+
+ alg = crypto_aead_spawn_alg(spawn);
+
+ err = -EINVAL;
+
+ /* We only support 16-byte blocks. */
+ if (alg->cra_aead.ivsize != 16)
+ goto out_drop_alg;
+
+ /* Not a stream cipher? */
+ if (alg->cra_blocksize != 1)
+ goto out_drop_alg;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+ "rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
+ snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "rfc4543(%s)", alg->cra_driver_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto out_drop_alg;
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
+ inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.cra_priority = alg->cra_priority;
+ inst->alg.cra_blocksize = 1;
+ inst->alg.cra_alignmask = alg->cra_alignmask;
+ inst->alg.cra_type = &crypto_nivaead_type;
+
+ inst->alg.cra_aead.ivsize = 8;
+ inst->alg.cra_aead.maxauthsize = 16;
+
+ inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
+
+ inst->alg.cra_init = crypto_rfc4543_init_tfm;
+ inst->alg.cra_exit = crypto_rfc4543_exit_tfm;
+
+ inst->alg.cra_aead.setkey = crypto_rfc4543_setkey;
+ inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize;
+ inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt;
+ inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt;
+
+ inst->alg.cra_aead.geniv = "seqiv";
+
+out:
+ return inst;
+
+out_drop_alg:
+ crypto_drop_aead(spawn);
+out_free_inst:
+ kfree(inst);
+ inst = ERR_PTR(err);
+ goto out;
+}
+
+static void crypto_rfc4543_free(struct crypto_instance *inst)
+{
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(inst);
+}
+
+static struct crypto_template crypto_rfc4543_tmpl = {
+ .name = "rfc4543",
+ .alloc = crypto_rfc4543_alloc,
+ .free = crypto_rfc4543_free,
+ .module = THIS_MODULE,
+};
+
static int __init crypto_gcm_module_init(void)
{
int err;
+ gcm_zeroes = kzalloc(16, GFP_KERNEL);
+ if (!gcm_zeroes)
+ return -ENOMEM;
+
err = crypto_register_template(&crypto_gcm_base_tmpl);
if (err)
goto out;
@@ -796,18 +1346,27 @@ static int __init crypto_gcm_module_init(void)
if (err)
goto out_undo_gcm;
-out:
- return err;
+ err = crypto_register_template(&crypto_rfc4543_tmpl);
+ if (err)
+ goto out_undo_rfc4106;
+ return 0;
+
+out_undo_rfc4106:
+ crypto_unregister_template(&crypto_rfc4106_tmpl);
out_undo_gcm:
crypto_unregister_template(&crypto_gcm_tmpl);
out_undo_base:
crypto_unregister_template(&crypto_gcm_base_tmpl);
- goto out;
+out:
+ kfree(gcm_zeroes);
+ return err;
}
static void __exit crypto_gcm_module_exit(void)
{
+ kfree(gcm_zeroes);
+ crypto_unregister_template(&crypto_rfc4543_tmpl);
crypto_unregister_template(&crypto_rfc4106_tmpl);
crypto_unregister_template(&crypto_gcm_tmpl);
crypto_unregister_template(&crypto_gcm_base_tmpl);
@@ -821,3 +1380,4 @@ MODULE_DESCRIPTION("Galois/Counter Mode");
MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
MODULE_ALIAS("gcm_base");
MODULE_ALIAS("rfc4106");
+MODULE_ALIAS("rfc4543");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
new file mode 100644
index 000000000000..be4425616931
--- /dev/null
+++ b/crypto/ghash-generic.c
@@ -0,0 +1,170 @@
+/*
+ * GHASH: digest algorithm for GCM (Galois/Counter Mode).
+ *
+ * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
+ * Copyright (c) 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * The algorithm implementation is copied from gcm.c.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/hash.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+struct ghash_ctx {
+ struct gf128mul_4k *gf128;
+};
+
+struct ghash_desc_ctx {
+ u8 buffer[GHASH_BLOCK_SIZE];
+ u32 bytes;
+};
+
+static int ghash_init(struct shash_desc *desc)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ memset(dctx, 0, sizeof(*dctx));
+
+ return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (ctx->gf128)
+ gf128mul_free_4k(ctx->gf128);
+ ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
+ if (!ctx->gf128)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ghash_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ u8 *dst = dctx->buffer;
+
+ if (dctx->bytes) {
+ int n = min(srclen, dctx->bytes);
+ u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ dctx->bytes -= n;
+ srclen -= n;
+
+ while (n--)
+ *pos++ ^= *src++;
+
+ if (!dctx->bytes)
+ gf128mul_4k_lle((be128 *)dst, ctx->gf128);
+ }
+
+ while (srclen >= GHASH_BLOCK_SIZE) {
+ crypto_xor(dst, src, GHASH_BLOCK_SIZE);
+ gf128mul_4k_lle((be128 *)dst, ctx->gf128);
+ src += GHASH_BLOCK_SIZE;
+ srclen -= GHASH_BLOCK_SIZE;
+ }
+
+ if (srclen) {
+ dctx->bytes = GHASH_BLOCK_SIZE - srclen;
+ while (srclen--)
+ *dst++ ^= *src++;
+ }
+
+ return 0;
+}
+
+static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+{
+ u8 *dst = dctx->buffer;
+
+ if (dctx->bytes) {
+ u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ while (dctx->bytes--)
+ *tmp++ ^= 0;
+
+ gf128mul_4k_lle((be128 *)dst, ctx->gf128);
+ }
+
+ dctx->bytes = 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ u8 *buf = dctx->buffer;
+
+ ghash_flush(ctx, dctx);
+ memcpy(dst, buf, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static void ghash_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+ if (ctx->gf128)
+ gf128mul_free_4k(ctx->gf128);
+}
+
+static struct shash_alg ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ghash_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
+ .cra_exit = ghash_exit_tfm,
+ },
+};
+
+static int __init ghash_mod_init(void)
+{
+ return crypto_register_shash(&ghash_alg);
+}
+
+static void __exit ghash_mod_exit(void)
+{
+ crypto_unregister_shash(&ghash_alg);
+}
+
+module_init(ghash_mod_init);
+module_exit(ghash_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
+MODULE_ALIAS("ghash");
diff --git a/crypto/hash.c b/crypto/hash.c
deleted file mode 100644
index cb86b19fd105..000000000000
--- a/crypto/hash.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Cryptographic Hash operations.
- *
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <crypto/internal/hash.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-
-#include "internal.h"
-
-static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
-{
- return alg->cra_ctxsize;
-}
-
-static int hash_setkey_unaligned(struct crypto_hash *crt, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(crt);
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- unsigned long alignmask = crypto_hash_alignmask(crt);
- int ret;
- u8 *buffer, *alignbuffer;
- unsigned long absize;
-
- absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_ATOMIC);
- if (!buffer)
- return -ENOMEM;
-
- alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- memcpy(alignbuffer, key, keylen);
- ret = alg->setkey(crt, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
- return ret;
-}
-
-static int hash_setkey(struct crypto_hash *crt, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_hash_tfm(crt);
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- unsigned long alignmask = crypto_hash_alignmask(crt);
-
- if ((unsigned long)key & alignmask)
- return hash_setkey_unaligned(crt, key, keylen);
-
- return alg->setkey(crt, key, keylen);
-}
-
-static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
- struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- return alg->setkey(tfm_hash, key, keylen);
-}
-
-static int hash_async_init(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->init(&desc);
-}
-
-static int hash_async_update(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->update(&desc, req->src, req->nbytes);
-}
-
-static int hash_async_final(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->final(&desc, req->result);
-}
-
-static int hash_async_digest(struct ahash_request *req)
-{
- struct crypto_tfm *tfm = req->base.tfm;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
- struct hash_desc desc = {
- .tfm = __crypto_hash_cast(tfm),
- .flags = req->base.flags,
- };
-
- return alg->digest(&desc, req->src, req->nbytes, req->result);
-}
-
-static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
-{
- struct ahash_tfm *crt = &tfm->crt_ahash;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- crt->init = hash_async_init;
- crt->update = hash_async_update;
- crt->final = hash_async_final;
- crt->digest = hash_async_digest;
- crt->setkey = hash_async_setkey;
- crt->digestsize = alg->digestsize;
-
- return 0;
-}
-
-static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
-{
- struct hash_tfm *crt = &tfm->crt_hash;
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- crt->init = alg->init;
- crt->update = alg->update;
- crt->final = alg->final;
- crt->digest = alg->digest;
- crt->setkey = hash_setkey;
- crt->digestsize = alg->digestsize;
-
- return 0;
-}
-
-static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
-
- if (alg->digestsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
- return crypto_init_hash_ops_async(tfm);
- else
- return crypto_init_hash_ops_sync(tfm);
-}
-
-static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
- __attribute__ ((unused));
-static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
-{
- seq_printf(m, "type : hash\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
-}
-
-const struct crypto_type crypto_hash_type = {
- .ctxsize = crypto_hash_ctxsize,
- .init = crypto_init_hash_ops,
-#ifdef CONFIG_PROC_FS
- .show = crypto_hash_show,
-#endif
-};
-EXPORT_SYMBOL_GPL(crypto_hash_type);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Generic cryptographic hash type");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 0ad39c374963..8d9544cf8169 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -23,11 +23,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
-#include <linux/slab.h>
#include <linux/string.h>
struct hmac_ctx {
- struct crypto_hash *child;
+ struct crypto_shash *hash;
};
static inline void *align_ptr(void *p, unsigned int align)
@@ -35,65 +34,45 @@ static inline void *align_ptr(void *p, unsigned int align)
return (void *)ALIGN((unsigned long)p, align);
}
-static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm)
+static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm)
{
- return align_ptr(crypto_hash_ctx_aligned(tfm) +
- crypto_hash_blocksize(tfm) * 2 +
- crypto_hash_digestsize(tfm), sizeof(void *));
+ return align_ptr(crypto_shash_ctx_aligned(tfm) +
+ crypto_shash_statesize(tfm) * 2,
+ crypto_tfm_ctx_alignment());
}
-static int hmac_setkey(struct crypto_hash *parent,
+static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *ipad = crypto_hash_ctx_aligned(parent);
- char *opad = ipad + bs;
- char *digest = opad + bs;
- struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
- struct crypto_hash *tfm = ctx->child;
+ int bs = crypto_shash_blocksize(parent);
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *ipad = crypto_shash_ctx_aligned(parent);
+ char *opad = ipad + ss;
+ struct hmac_ctx *ctx = align_ptr(opad + ss,
+ crypto_tfm_ctx_alignment());
+ struct crypto_shash *hash = ctx->hash;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(hash)];
+ } desc;
unsigned int i;
+ desc.shash.tfm = hash;
+ desc.shash.flags = crypto_shash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
if (keylen > bs) {
- struct hash_desc desc;
- struct scatterlist tmp;
- int tmplen;
int err;
- desc.tfm = tfm;
- desc.flags = crypto_hash_get_flags(parent);
- desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_init(&desc);
+ err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad);
if (err)
return err;
- tmplen = bs * 2 + ds;
- sg_init_one(&tmp, ipad, tmplen);
-
- for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) {
- memcpy(ipad, inkey, tmplen);
- err = crypto_hash_update(&desc, &tmp, tmplen);
- if (err)
- return err;
- }
-
- if (keylen) {
- memcpy(ipad, inkey, keylen);
- err = crypto_hash_update(&desc, &tmp, keylen);
- if (err)
- return err;
- }
-
- err = crypto_hash_final(&desc, digest);
- if (err)
- return err;
-
- inkey = digest;
keylen = ds;
- }
+ } else
+ memcpy(ipad, inkey, keylen);
- memcpy(ipad, inkey, keylen);
memset(ipad + keylen, 0, bs - keylen);
memcpy(opad, ipad, bs);
@@ -102,184 +81,178 @@ static int hmac_setkey(struct crypto_hash *parent,
opad[i] ^= 0x5c;
}
- return 0;
+ return crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, ipad, bs) ?:
+ crypto_shash_export(&desc.shash, ipad) ?:
+ crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, opad, bs) ?:
+ crypto_shash_export(&desc.shash, opad);
}
-static int hmac_init(struct hash_desc *pdesc)
+static int hmac_export(struct shash_desc *pdesc, void *out)
{
- struct crypto_hash *parent = pdesc->tfm;
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *ipad = crypto_hash_ctx_aligned(parent);
- struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *));
- struct hash_desc desc;
- struct scatterlist tmp;
- int err;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- sg_init_one(&tmp, ipad, bs);
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_init(&desc);
- if (unlikely(err))
- return err;
-
- return crypto_hash_update(&desc, &tmp, bs);
+ return crypto_shash_export(desc, out);
}
-static int hmac_update(struct hash_desc *pdesc,
- struct scatterlist *sg, unsigned int nbytes)
+static int hmac_import(struct shash_desc *pdesc, const void *in)
{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
- struct hash_desc desc;
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = ctx->hash;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- return crypto_hash_update(&desc, sg, nbytes);
+ return crypto_shash_import(desc, in);
}
-static int hmac_final(struct hash_desc *pdesc, u8 *out)
+static int hmac_init(struct shash_desc *pdesc)
{
- struct crypto_hash *parent = pdesc->tfm;
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *opad = crypto_hash_ctx_aligned(parent) + bs;
- char *digest = opad + bs;
- struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
- struct hash_desc desc;
- struct scatterlist tmp;
- int err;
+ return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm));
+}
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- sg_init_one(&tmp, opad, bs + ds);
+static int hmac_update(struct shash_desc *pdesc,
+ const u8 *data, unsigned int nbytes)
+{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- err = crypto_hash_final(&desc, digest);
- if (unlikely(err))
- return err;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- return crypto_hash_digest(&desc, &tmp, bs + ds, out);
+ return crypto_shash_update(desc, data, nbytes);
}
-static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
- unsigned int nbytes, u8 *out)
+static int hmac_final(struct shash_desc *pdesc, u8 *out)
{
- struct crypto_hash *parent = pdesc->tfm;
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *ipad = crypto_hash_ctx_aligned(parent);
- char *opad = ipad + bs;
- char *digest = opad + bs;
- struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
- struct hash_desc desc;
- struct scatterlist sg1[2];
- struct scatterlist sg2[1];
- int err;
+ struct crypto_shash *parent = pdesc->tfm;
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- sg_init_table(sg1, 2);
- sg_set_buf(sg1, ipad, bs);
- scatterwalk_sg_chain(sg1, 2, sg);
+ return crypto_shash_final(desc, out) ?:
+ crypto_shash_import(desc, opad) ?:
+ crypto_shash_finup(desc, out, ds, out);
+}
- sg_init_table(sg2, 1);
- sg_set_buf(sg2, opad, bs + ds);
+static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
+ unsigned int nbytes, u8 *out)
+{
- err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest);
- if (unlikely(err))
- return err;
+ struct crypto_shash *parent = pdesc->tfm;
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- return crypto_hash_digest(&desc, sg2, bs + ds, out);
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_finup(desc, data, nbytes, out) ?:
+ crypto_shash_import(desc, opad) ?:
+ crypto_shash_finup(desc, out, ds, out);
}
static int hmac_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_hash *hash;
+ struct crypto_shash *parent = __crypto_shash_cast(tfm);
+ struct crypto_shash *hash;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
+ struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst);
+ struct hmac_ctx *ctx = hmac_ctx(parent);
- hash = crypto_spawn_hash(spawn);
+ hash = crypto_spawn_shash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
- ctx->child = hash;
+ parent->descsize = sizeof(struct shash_desc) +
+ crypto_shash_descsize(hash);
+
+ ctx->hash = hash;
return 0;
}
static void hmac_exit_tfm(struct crypto_tfm *tfm)
{
- struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
- crypto_free_hash(ctx->child);
+ struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm));
+ crypto_free_shash(ctx->hash);
}
-static void hmac_free(struct crypto_instance *inst)
+static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
-}
-
-static struct crypto_instance *hmac_alloc(struct rtattr **tb)
-{
- struct crypto_instance *inst;
+ struct shash_instance *inst;
struct crypto_alg *alg;
+ struct shash_alg *salg;
int err;
int ds;
+ int ss;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err)
- return ERR_PTR(err);
-
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
-
- inst = ERR_PTR(-EINVAL);
- ds = alg->cra_type == &crypto_hash_type ?
- alg->cra_hash.digestsize :
- alg->cra_type ?
- __crypto_shash_alg(alg)->digestsize :
- alg->cra_digest.dia_digestsize;
- if (ds > alg->cra_blocksize)
+ return err;
+
+ salg = shash_attr_alg(tb[1], 0, 0);
+ if (IS_ERR(salg))
+ return PTR_ERR(salg);
+
+ err = -EINVAL;
+ ds = salg->digestsize;
+ ss = salg->statesize;
+ alg = &salg->base;
+ if (ds > alg->cra_blocksize ||
+ ss < alg->cra_blocksize)
goto out_put_alg;
- inst = crypto_alloc_instance("hmac", alg);
+ inst = shash_alloc_instance("hmac", alg);
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_hash_type;
-
- inst->alg.cra_hash.digestsize = ds;
-
- inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
- ALIGN(inst->alg.cra_blocksize * 2 + ds,
- sizeof(void *));
-
- inst->alg.cra_init = hmac_init_tfm;
- inst->alg.cra_exit = hmac_exit_tfm;
-
- inst->alg.cra_hash.init = hmac_init;
- inst->alg.cra_hash.update = hmac_update;
- inst->alg.cra_hash.final = hmac_final;
- inst->alg.cra_hash.digest = hmac_digest;
- inst->alg.cra_hash.setkey = hmac_setkey;
+ err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg,
+ shash_crypto_instance(inst));
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ ss = ALIGN(ss, alg->cra_alignmask + 1);
+ inst->alg.digestsize = ds;
+ inst->alg.statesize = ss;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
+ ALIGN(ss * 2, crypto_tfm_ctx_alignment());
+
+ inst->alg.base.cra_init = hmac_init_tfm;
+ inst->alg.base.cra_exit = hmac_exit_tfm;
+
+ inst->alg.init = hmac_init;
+ inst->alg.update = hmac_update;
+ inst->alg.final = hmac_final;
+ inst->alg.finup = hmac_finup;
+ inst->alg.export = hmac_export;
+ inst->alg.import = hmac_import;
+ inst->alg.setkey = hmac_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static struct crypto_template hmac_tmpl = {
.name = "hmac",
- .alloc = hmac_alloc,
- .free = hmac_free,
+ .create = hmac_create,
+ .free = shash_free_instance,
.module = THIS_MODULE,
};
diff --git a/crypto/internal.h b/crypto/internal.h
index 113579a82dff..d4384b08ab29 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -6,7 +6,7 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
@@ -25,12 +25,7 @@
#include <linux/notifier.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
-
-#ifdef CONFIG_CRYPTO_FIPS
-extern int fips_enabled;
-#else
-#define fips_enabled 0
-#endif
+#include <linux/fips.h>
/* Crypto notification events. */
enum {
@@ -65,18 +60,6 @@ static inline void crypto_exit_proc(void)
{ }
#endif
-static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg)
-{
- unsigned int len = alg->cra_ctxsize;
-
- if (alg->cra_alignmask) {
- len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
- len += alg->cra_digest.dia_digestsize;
- }
-
- return len;
-}
-
static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
@@ -91,12 +74,9 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
-int crypto_init_digest_ops(struct crypto_tfm *tfm);
-int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
-void crypto_exit_digest_ops(struct crypto_tfm *tfm);
void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
void crypto_exit_compress_ops(struct crypto_tfm *tfm);
@@ -111,12 +91,12 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend);
+struct crypto_alg *crypto_find_alg(const char *alg_name,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask);
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask);
-int crypto_register_instance(struct crypto_template *tmpl,
- struct crypto_instance *inst);
-
int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v);
diff --git a/crypto/md5.c b/crypto/md5.c
index 83eb52961750..30efc7dad891 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -16,17 +16,13 @@
*
*/
#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#define MD5_DIGEST_SIZE 16
-#define MD5_HMAC_BLOCK_SIZE 64
-#define MD5_BLOCK_WORDS 16
-#define MD5_HASH_WORDS 4
-
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
@@ -35,12 +31,6 @@
#define MD5STEP(f, w, x, y, z, in, s) \
(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
-struct md5_ctx {
- u32 hash[MD5_HASH_WORDS];
- u32 block[MD5_BLOCK_WORDS];
- u64 byte_count;
-};
-
static void md5_transform(u32 *hash, u32 const *in)
{
u32 a, b, c, d;
@@ -141,7 +131,7 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
}
}
-static inline void md5_transform_helper(struct md5_ctx *ctx)
+static inline void md5_transform_helper(struct md5_state *ctx)
{
le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
md5_transform(ctx->hash, ctx->block);
@@ -149,7 +139,7 @@ static inline void md5_transform_helper(struct md5_ctx *ctx)
static int md5_init(struct shash_desc *desc)
{
- struct md5_ctx *mctx = shash_desc_ctx(desc);
+ struct md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = 0x67452301;
mctx->hash[1] = 0xefcdab89;
@@ -162,7 +152,7 @@ static int md5_init(struct shash_desc *desc)
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
- struct md5_ctx *mctx = shash_desc_ctx(desc);
+ struct md5_state *mctx = shash_desc_ctx(desc);
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
mctx->byte_count += len;
@@ -194,7 +184,7 @@ static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
static int md5_final(struct shash_desc *desc, u8 *out)
{
- struct md5_ctx *mctx = shash_desc_ctx(desc);
+ struct md5_state *mctx = shash_desc_ctx(desc);
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
@@ -220,12 +210,31 @@ static int md5_final(struct shash_desc *desc, u8 *out)
return 0;
}
+static int md5_export(struct shash_desc *desc, void *out)
+{
+ struct md5_state *ctx = shash_desc_ctx(desc);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int md5_import(struct shash_desc *desc, const void *in)
+{
+ struct md5_state *ctx = shash_desc_ctx(desc);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_init,
.update = md5_update,
.final = md5_final,
- .descsize = sizeof(struct md5_ctx),
+ .export = md5_export,
+ .import = md5_import,
+ .descsize = sizeof(struct md5_state),
+ .statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index bcadc03726b7..f7c4a7d7412e 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -36,14 +36,12 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0;
}
-static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
-static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm,
- const struct crypto_type *frontend)
+static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
{
return 0;
}
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
new file mode 100644
index 000000000000..75586f1f86e7
--- /dev/null
+++ b/crypto/pcrypt.c
@@ -0,0 +1,566 @@
+/*
+ * pcrypt - Parallel crypto wrapper.
+ *
+ * Copyright (C) 2009 secunet Security Networks AG
+ * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/kobject.h>
+#include <linux/cpu.h>
+#include <crypto/pcrypt.h>
+
+struct padata_pcrypt {
+ struct padata_instance *pinst;
+ struct workqueue_struct *wq;
+
+ /*
+ * Cpumask for callback CPUs. It should be
+ * equal to serial cpumask of corresponding padata instance,
+ * so it is updated when padata notifies us about serial
+ * cpumask change.
+ *
+ * cb_cpumask is protected by RCU. This fact prevents us from
+ * using cpumask_var_t directly because the actual type of
+ * cpumsak_var_t depends on kernel configuration(particularly on
+ * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
+ * cpumask_var_t may be either a pointer to the struct cpumask
+ * or a variable allocated on the stack. Thus we can not safely use
+ * cpumask_var_t with RCU operations such as rcu_assign_pointer or
+ * rcu_dereference. So cpumask_var_t is wrapped with struct
+ * pcrypt_cpumask which makes possible to use it with RCU.
+ */
+ struct pcrypt_cpumask {
+ cpumask_var_t mask;
+ } *cb_cpumask;
+ struct notifier_block nblock;
+};
+
+static struct padata_pcrypt pencrypt;
+static struct padata_pcrypt pdecrypt;
+static struct kset *pcrypt_kset;
+
+struct pcrypt_instance_ctx {
+ struct crypto_spawn spawn;
+ unsigned int tfm_count;
+};
+
+struct pcrypt_aead_ctx {
+ struct crypto_aead *child;
+ unsigned int cb_cpu;
+};
+
+static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
+ struct padata_pcrypt *pcrypt)
+{
+ unsigned int cpu_index, cpu, i;
+ struct pcrypt_cpumask *cpumask;
+
+ cpu = *cb_cpu;
+
+ rcu_read_lock_bh();
+ cpumask = rcu_dereference(pcrypt->cb_cpumask);
+ if (cpumask_test_cpu(cpu, cpumask->mask))
+ goto out;
+
+ if (!cpumask_weight(cpumask->mask))
+ goto out;
+
+ cpu_index = cpu % cpumask_weight(cpumask->mask);
+
+ cpu = cpumask_first(cpumask->mask);
+ for (i = 0; i < cpu_index; i++)
+ cpu = cpumask_next(cpu, cpumask->mask);
+
+ *cb_cpu = cpu;
+
+out:
+ rcu_read_unlock_bh();
+ return padata_do_parallel(pcrypt->pinst, padata, cpu);
+}
+
+static int pcrypt_aead_setkey(struct crypto_aead *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
+
+ return crypto_aead_setkey(ctx->child, key, keylen);
+}
+
+static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize)
+{
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
+
+ return crypto_aead_setauthsize(ctx->child, authsize);
+}
+
+static void pcrypt_aead_serial(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_request *req = pcrypt_request_ctx(preq);
+
+ aead_request_complete(req->base.data, padata->info);
+}
+
+static void pcrypt_aead_giv_serial(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
+
+ aead_request_complete(req->areq.base.data, padata->info);
+}
+
+static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+ struct pcrypt_request *preq = aead_request_ctx(req);
+ struct padata_priv *padata = pcrypt_request_padata(preq);
+
+ padata->info = err;
+ req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ padata_do_serial(padata);
+}
+
+static void pcrypt_aead_enc(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_request *req = pcrypt_request_ctx(preq);
+
+ padata->info = crypto_aead_encrypt(req);
+
+ if (padata->info == -EINPROGRESS)
+ return;
+
+ padata_do_serial(padata);
+}
+
+static int pcrypt_aead_encrypt(struct aead_request *req)
+{
+ int err;
+ struct pcrypt_request *preq = aead_request_ctx(req);
+ struct aead_request *creq = pcrypt_request_ctx(preq);
+ struct padata_priv *padata = pcrypt_request_padata(preq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ u32 flags = aead_request_flags(req);
+
+ memset(padata, 0, sizeof(struct padata_priv));
+
+ padata->parallel = pcrypt_aead_enc;
+ padata->serial = pcrypt_aead_serial;
+
+ aead_request_set_tfm(creq, ctx->child);
+ aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
+ pcrypt_aead_done, req);
+ aead_request_set_crypt(creq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_assoc(creq, req->assoc, req->assoclen);
+
+ err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
+ if (!err)
+ return -EINPROGRESS;
+
+ return err;
+}
+
+static void pcrypt_aead_dec(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_request *req = pcrypt_request_ctx(preq);
+
+ padata->info = crypto_aead_decrypt(req);
+
+ if (padata->info == -EINPROGRESS)
+ return;
+
+ padata_do_serial(padata);
+}
+
+static int pcrypt_aead_decrypt(struct aead_request *req)
+{
+ int err;
+ struct pcrypt_request *preq = aead_request_ctx(req);
+ struct aead_request *creq = pcrypt_request_ctx(preq);
+ struct padata_priv *padata = pcrypt_request_padata(preq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ u32 flags = aead_request_flags(req);
+
+ memset(padata, 0, sizeof(struct padata_priv));
+
+ padata->parallel = pcrypt_aead_dec;
+ padata->serial = pcrypt_aead_serial;
+
+ aead_request_set_tfm(creq, ctx->child);
+ aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
+ pcrypt_aead_done, req);
+ aead_request_set_crypt(creq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_assoc(creq, req->assoc, req->assoclen);
+
+ err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
+ if (!err)
+ return -EINPROGRESS;
+
+ return err;
+}
+
+static void pcrypt_aead_givenc(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
+
+ padata->info = crypto_aead_givencrypt(req);
+
+ if (padata->info == -EINPROGRESS)
+ return;
+
+ padata_do_serial(padata);
+}
+
+static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
+{
+ int err;
+ struct aead_request *areq = &req->areq;
+ struct pcrypt_request *preq = aead_request_ctx(areq);
+ struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
+ struct padata_priv *padata = pcrypt_request_padata(preq);
+ struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ u32 flags = aead_request_flags(areq);
+
+ memset(padata, 0, sizeof(struct padata_priv));
+
+ padata->parallel = pcrypt_aead_givenc;
+ padata->serial = pcrypt_aead_giv_serial;
+
+ aead_givcrypt_set_tfm(creq, ctx->child);
+ aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
+ pcrypt_aead_done, areq);
+ aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
+ aead_givcrypt_set_giv(creq, req->giv, req->seq);
+
+ err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
+ if (!err)
+ return -EINPROGRESS;
+
+ return err;
+}
+
+static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
+{
+ int cpu, cpu_index;
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aead *cipher;
+
+ ictx->tfm_count++;
+
+ cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
+
+ ctx->cb_cpu = cpumask_first(cpu_active_mask);
+ for (cpu = 0; cpu < cpu_index; cpu++)
+ ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
+
+ cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
+
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
+ + sizeof(struct aead_givcrypt_request)
+ + crypto_aead_reqsize(cipher);
+
+ return 0;
+}
+
+static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_aead(ctx->child);
+}
+
+static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
+{
+ struct crypto_instance *inst;
+ struct pcrypt_instance_ctx *ctx;
+ int err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst) {
+ inst = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_inst;
+
+ memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
+
+ ctx = crypto_instance_ctx(inst);
+ err = crypto_init_spawn(&ctx->spawn, alg, inst,
+ CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.cra_priority = alg->cra_priority + 100;
+ inst->alg.cra_blocksize = alg->cra_blocksize;
+ inst->alg.cra_alignmask = alg->cra_alignmask;
+
+out:
+ return inst;
+
+out_free_inst:
+ kfree(inst);
+ inst = ERR_PTR(err);
+ goto out;
+}
+
+static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
+ u32 type, u32 mask)
+{
+ struct crypto_instance *inst;
+ struct crypto_alg *alg;
+
+ alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
+ inst = pcrypt_alloc_instance(alg);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ inst->alg.cra_type = &crypto_aead_type;
+
+ inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
+ inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
+ inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
+
+ inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
+
+ inst->alg.cra_init = pcrypt_aead_init_tfm;
+ inst->alg.cra_exit = pcrypt_aead_exit_tfm;
+
+ inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
+ inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
+ inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
+ inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
+ inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return inst;
+}
+
+static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return ERR_CAST(algt);
+
+ switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ return pcrypt_alloc_aead(tb, algt->type, algt->mask);
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void pcrypt_free(struct crypto_instance *inst)
+{
+ struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
+
+ crypto_drop_spawn(&ctx->spawn);
+ kfree(inst);
+}
+
+static int pcrypt_cpumask_change_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct padata_pcrypt *pcrypt;
+ struct pcrypt_cpumask *new_mask, *old_mask;
+ struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
+
+ if (!(val & PADATA_CPU_SERIAL))
+ return 0;
+
+ pcrypt = container_of(self, struct padata_pcrypt, nblock);
+ new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
+ if (!new_mask)
+ return -ENOMEM;
+ if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
+ kfree(new_mask);
+ return -ENOMEM;
+ }
+
+ old_mask = pcrypt->cb_cpumask;
+
+ cpumask_copy(new_mask->mask, cpumask->cbcpu);
+ rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
+ synchronize_rcu_bh();
+
+ free_cpumask_var(old_mask->mask);
+ kfree(old_mask);
+ return 0;
+}
+
+static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
+{
+ int ret;
+
+ pinst->kobj.kset = pcrypt_kset;
+ ret = kobject_add(&pinst->kobj, NULL, name);
+ if (!ret)
+ kobject_uevent(&pinst->kobj, KOBJ_ADD);
+
+ return ret;
+}
+
+static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
+ const char *name)
+{
+ int ret = -ENOMEM;
+ struct pcrypt_cpumask *mask;
+
+ get_online_cpus();
+
+ pcrypt->wq = create_workqueue(name);
+ if (!pcrypt->wq)
+ goto err;
+
+ pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
+ if (!pcrypt->pinst)
+ goto err_destroy_workqueue;
+
+ mask = kmalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask)
+ goto err_free_padata;
+ if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
+ kfree(mask);
+ goto err_free_padata;
+ }
+
+ cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
+ rcu_assign_pointer(pcrypt->cb_cpumask, mask);
+
+ pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
+ ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
+ if (ret)
+ goto err_free_cpumask;
+
+ ret = pcrypt_sysfs_add(pcrypt->pinst, name);
+ if (ret)
+ goto err_unregister_notifier;
+
+ put_online_cpus();
+
+ return ret;
+
+err_unregister_notifier:
+ padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
+err_free_cpumask:
+ free_cpumask_var(mask->mask);
+ kfree(mask);
+err_free_padata:
+ padata_free(pcrypt->pinst);
+err_destroy_workqueue:
+ destroy_workqueue(pcrypt->wq);
+err:
+ put_online_cpus();
+
+ return ret;
+}
+
+static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
+{
+ free_cpumask_var(pcrypt->cb_cpumask->mask);
+ kfree(pcrypt->cb_cpumask);
+
+ padata_stop(pcrypt->pinst);
+ padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
+ destroy_workqueue(pcrypt->wq);
+ padata_free(pcrypt->pinst);
+}
+
+static struct crypto_template pcrypt_tmpl = {
+ .name = "pcrypt",
+ .alloc = pcrypt_alloc,
+ .free = pcrypt_free,
+ .module = THIS_MODULE,
+};
+
+static int __init pcrypt_init(void)
+{
+ int err = -ENOMEM;
+
+ pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
+ if (!pcrypt_kset)
+ goto err;
+
+ err = pcrypt_init_padata(&pencrypt, "pencrypt");
+ if (err)
+ goto err_unreg_kset;
+
+ err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
+ if (err)
+ goto err_deinit_pencrypt;
+
+ padata_start(pencrypt.pinst);
+ padata_start(pdecrypt.pinst);
+
+ return crypto_register_template(&pcrypt_tmpl);
+
+err_deinit_pencrypt:
+ pcrypt_fini_padata(&pencrypt);
+err_unreg_kset:
+ kset_unregister(pcrypt_kset);
+err:
+ return err;
+}
+
+static void __exit pcrypt_exit(void)
+{
+ pcrypt_fini_padata(&pencrypt);
+ pcrypt_fini_padata(&pdecrypt);
+
+ kset_unregister(pcrypt_kset);
+ crypto_unregister_template(&pcrypt_tmpl);
+}
+
+module_init(pcrypt_init);
+module_exit(pcrypt_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
+MODULE_DESCRIPTION("Parallel crypto wrapper");
diff --git a/crypto/proc.c b/crypto/proc.c
index 5dc07e442fca..58fef67d4f4d 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -25,28 +25,22 @@
#ifdef CONFIG_CRYPTO_FIPS
static struct ctl_table crypto_sysctl_table[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "fips_enabled",
.data = &fips_enabled,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = 0,
+ .proc_handler = proc_dointvec
},
+ {}
};
static struct ctl_table crypto_dir_table[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "crypto",
.mode = 0555,
.child = crypto_sysctl_table
},
- {
- .ctl_name = 0,
- },
+ {}
};
static struct ctl_table_header *crypto_sysctls;
@@ -115,13 +109,6 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "max keysize : %u\n",
alg->cra_cipher.cia_max_keysize);
break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- seq_printf(m, "type : digest\n");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n",
- alg->cra_digest.dia_digestsize);
- break;
case CRYPTO_ALG_TYPE_COMPRESS:
seq_printf(m, "type : compression\n");
break;
diff --git a/crypto/rng.c b/crypto/rng.c
index 6e94bc735578..f93cb5311182 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/string.h>
static DEFINE_MUTEX(crypto_default_rng_lock);
@@ -123,4 +124,4 @@ void crypto_put_default_rng(void)
EXPORT_SYMBOL_GPL(crypto_put_default_rng);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Random Number Genertor");
+MODULE_DESCRIPTION("Random Number Generator");
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 3de89a424401..41e529af0773 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
{
- if (!offset_in_page(walk->offset) || !more)
+ if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
scatterwalk_pagedone(walk, out, more);
}
EXPORT_SYMBOL_GPL(scatterwalk_done);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 5a013a8bf87a..4c4491229417 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 9efef20454cb..0416091bf45a 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -25,31 +25,21 @@
#include <crypto/sha.h>
#include <asm/byteorder.h>
-struct sha1_ctx {
- u64 count;
- u32 state[5];
- u8 buffer[64];
-};
-
static int sha1_init(struct shash_desc *desc)
{
- struct sha1_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
- static const struct sha1_ctx initstate = {
- 0,
- { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- { 0, }
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
- *sctx = initstate;
-
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha1_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
const u8 *src;
@@ -85,7 +75,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
- struct sha1_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
u32 i, index, padlen;
__be64 bits;
@@ -111,12 +101,31 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
return 0;
}
+static int sha1_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.final = sha1_final,
- .descsize = sizeof(struct sha1_ctx),
+ .export = sha1_export,
+ .import = sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 6349d8339d37..c48459ebf05b 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -25,12 +25,6 @@
#include <crypto/sha.h>
#include <asm/byteorder.h>
-struct sha256_ctx {
- u32 count[2];
- u32 state[8];
- u8 buf[128];
-};
-
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));
@@ -222,7 +216,7 @@ static void sha256_transform(u32 *state, const u8 *input)
static int sha224_init(struct shash_desc *desc)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA224_H0;
sctx->state[1] = SHA224_H1;
sctx->state[2] = SHA224_H2;
@@ -231,15 +225,14 @@ static int sha224_init(struct shash_desc *desc)
sctx->state[5] = SHA224_H5;
sctx->state[6] = SHA224_H6;
sctx->state[7] = SHA224_H7;
- sctx->count[0] = 0;
- sctx->count[1] = 0;
+ sctx->count = 0;
return 0;
}
static int sha256_init(struct shash_desc *desc)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
sctx->state[2] = SHA256_H2;
@@ -248,7 +241,7 @@ static int sha256_init(struct shash_desc *desc)
sctx->state[5] = SHA256_H5;
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
- sctx->count[0] = sctx->count[1] = 0;
+ sctx->count = 0;
return 0;
}
@@ -256,58 +249,54 @@ static int sha256_init(struct shash_desc *desc)
static int sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
- unsigned int i, index, part_len;
-
- /* Compute number of bytes mod 128 */
- index = (unsigned int)((sctx->count[0] >> 3) & 0x3f);
-
- /* Update number of bits */
- if ((sctx->count[0] += (len << 3)) < (len << 3)) {
- sctx->count[1]++;
- sctx->count[1] += (len >> 29);
- }
-
- part_len = 64 - index;
-
- /* Transform as many times as possible. */
- if (len >= part_len) {
- memcpy(&sctx->buf[index], data, part_len);
- sha256_transform(sctx->state, sctx->buf);
-
- for (i = part_len; i + 63 < len; i += 64)
- sha256_transform(sctx->state, &data[i]);
- index = 0;
- } else {
- i = 0;
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial, done;
+ const u8 *src;
+
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) > 63) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buf + partial, data, done + 64);
+ src = sctx->buf;
+ }
+
+ do {
+ sha256_transform(sctx->state, src);
+ done += 64;
+ src = data + done;
+ } while (done + 63 < len);
+
+ partial = 0;
}
-
- /* Buffer remaining input */
- memcpy(&sctx->buf[index], &data[i], len-i);
+ memcpy(sctx->buf + partial, src, len - done);
return 0;
}
static int sha256_final(struct shash_desc *desc, u8 *out)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
- __be32 bits[2];
+ __be64 bits;
unsigned int index, pad_len;
int i;
static const u8 padding[64] = { 0x80, };
/* Save number of bits */
- bits[1] = cpu_to_be32(sctx->count[0]);
- bits[0] = cpu_to_be32(sctx->count[1]);
+ bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64. */
- index = (sctx->count[0] >> 3) & 0x3f;
+ index = sctx->count & 0x3f;
pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
sha256_update(desc, padding, pad_len);
/* Append length (before padding) */
- sha256_update(desc, (const u8 *)bits, sizeof(bits));
+ sha256_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
@@ -331,12 +320,31 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
return 0;
}
+static int sha256_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
static struct shash_alg sha256 = {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init,
.update = sha256_update,
.final = sha256_final,
- .descsize = sizeof(struct sha256_ctx),
+ .export = sha256_export,
+ .import = sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
@@ -351,7 +359,7 @@ static struct shash_alg sha224 = {
.init = sha224_init,
.update = sha256_update,
.final = sha224_final,
- .descsize = sizeof(struct sha256_ctx),
+ .descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-generic",
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 3bea38d12242..9ed9f60316e5 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -21,12 +21,6 @@
#include <linux/percpu.h>
#include <asm/byteorder.h>
-struct sha512_ctx {
- u64 state[8];
- u32 count[4];
- u8 buf[128];
-};
-
static DEFINE_PER_CPU(u64[80], msg_schedule);
static inline u64 Ch(u64 x, u64 y, u64 z)
@@ -141,7 +135,7 @@ sha512_transform(u64 *state, const u8 *input)
static int
sha512_init(struct shash_desc *desc)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA512_H0;
sctx->state[1] = SHA512_H1;
sctx->state[2] = SHA512_H2;
@@ -150,7 +144,7 @@ sha512_init(struct shash_desc *desc)
sctx->state[5] = SHA512_H5;
sctx->state[6] = SHA512_H6;
sctx->state[7] = SHA512_H7;
- sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
+ sctx->count[0] = sctx->count[1] = 0;
return 0;
}
@@ -158,7 +152,7 @@ sha512_init(struct shash_desc *desc)
static int
sha384_init(struct shash_desc *desc)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA384_H0;
sctx->state[1] = SHA384_H1;
sctx->state[2] = SHA384_H2;
@@ -167,7 +161,7 @@ sha384_init(struct shash_desc *desc)
sctx->state[5] = SHA384_H5;
sctx->state[6] = SHA384_H6;
sctx->state[7] = SHA384_H7;
- sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
+ sctx->count[0] = sctx->count[1] = 0;
return 0;
}
@@ -175,20 +169,16 @@ sha384_init(struct shash_desc *desc)
static int
sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, part_len;
/* Compute number of bytes mod 128 */
- index = (unsigned int)((sctx->count[0] >> 3) & 0x7F);
-
- /* Update number of bits */
- if ((sctx->count[0] += (len << 3)) < (len << 3)) {
- if ((sctx->count[1] += 1) < 1)
- if ((sctx->count[2] += 1) < 1)
- sctx->count[3]++;
- sctx->count[1] += (len >> 29);
- }
+ index = sctx->count[0] & 0x7f;
+
+ /* Update number of bytes */
+ if (!(sctx->count[0] += len))
+ sctx->count[1]++;
part_len = 128 - index;
@@ -214,21 +204,19 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
static int
sha512_final(struct shash_desc *desc, u8 *hash)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
static u8 padding[128] = { 0x80, };
__be64 *dst = (__be64 *)hash;
- __be32 bits[4];
+ __be64 bits[2];
unsigned int index, pad_len;
int i;
/* Save number of bits */
- bits[3] = cpu_to_be32(sctx->count[0]);
- bits[2] = cpu_to_be32(sctx->count[1]);
- bits[1] = cpu_to_be32(sctx->count[2]);
- bits[0] = cpu_to_be32(sctx->count[3]);
+ bits[1] = cpu_to_be64(sctx->count[0] << 3);
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
/* Pad out to 112 mod 128. */
- index = (sctx->count[0] >> 3) & 0x7f;
+ index = sctx->count[0] & 0x7f;
pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
sha512_update(desc, padding, pad_len);
@@ -240,7 +228,7 @@ sha512_final(struct shash_desc *desc, u8 *hash)
dst[i] = cpu_to_be64(sctx->state[i]);
/* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(struct sha512_ctx));
+ memset(sctx, 0, sizeof(struct sha512_state));
return 0;
}
@@ -262,7 +250,7 @@ static struct shash_alg sha512 = {
.init = sha512_init,
.update = sha512_update,
.final = sha512_final,
- .descsize = sizeof(struct sha512_ctx),
+ .descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
@@ -276,7 +264,7 @@ static struct shash_alg sha384 = {
.init = sha384_init,
.update = sha512_update,
.final = sha384_final,
- .descsize = sizeof(struct sha512_ctx),
+ .descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
diff --git a/crypto/shash.c b/crypto/shash.c
index 2ccc8b0076ce..22fd9433141f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -22,6 +22,12 @@
static const struct crypto_type crypto_shash_type;
+static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return -ENOSYS;
+}
+
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -31,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
u8 *buffer, *alignbuffer;
int err;
- absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1));
+ absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -39,8 +45,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
err = shash->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ kzfree(buffer);
return err;
}
@@ -50,9 +55,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
- if (!shash->setkey)
- return -ENOSYS;
-
if ((unsigned long)key & alignmask)
return shash_setkey_unaligned(tfm, key, keylen);
@@ -74,15 +76,19 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned int unaligned_len = alignmask + 1 -
((unsigned long)data & alignmask);
- u8 buf[shash_align_buffer_size(unaligned_len, alignmask)]
+ u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)]
__attribute__ ((aligned));
+ u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
+ int err;
if (unaligned_len > len)
unaligned_len = len;
memcpy(buf, data, unaligned_len);
+ err = shash->update(desc, buf, unaligned_len);
+ memset(buf, 0, unaligned_len);
- return shash->update(desc, buf, unaligned_len) ?:
+ return err ?:
shash->update(desc, data + unaligned_len, len - unaligned_len);
}
@@ -106,12 +112,19 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
unsigned long alignmask = crypto_shash_alignmask(tfm);
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned int ds = crypto_shash_digestsize(tfm);
- u8 buf[shash_align_buffer_size(ds, alignmask)]
+ u8 ubuf[shash_align_buffer_size(ds, alignmask)]
__attribute__ ((aligned));
+ u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
err = shash->final(desc, buf);
+ if (err)
+ goto out;
+
memcpy(out, buf, ds);
+
+out:
+ memset(buf, 0, ds);
return err;
}
@@ -142,8 +155,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
- if (((unsigned long)data | (unsigned long)out) & alignmask ||
- !shash->finup)
+ if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_finup_unaligned(desc, data, len, out);
return shash->finup(desc, data, len, out);
@@ -154,8 +166,7 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_init(desc) ?:
- crypto_shash_update(desc, data, len) ?:
- crypto_shash_final(desc, out);
+ crypto_shash_finup(desc, data, len, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -165,27 +176,24 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
- if (((unsigned long)data | (unsigned long)out) & alignmask ||
- !shash->digest)
+ if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);
return shash->digest(desc, data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
-int crypto_shash_import(struct shash_desc *desc, const u8 *in)
+static int shash_default_export(struct shash_desc *desc, void *out)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *alg = crypto_shash_alg(tfm);
-
- memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
-
- if (alg->reinit)
- alg->reinit(desc);
+ memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
+ return 0;
+}
+static int shash_default_import(struct shash_desc *desc, const void *in)
+{
+ memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm));
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_shash_import);
static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
@@ -206,9 +214,8 @@ static int shash_async_init(struct ahash_request *req)
return crypto_shash_init(desc);
}
-static int shash_async_update(struct ahash_request *req)
+int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
{
- struct shash_desc *desc = ahash_request_ctx(req);
struct crypto_hash_walk walk;
int nbytes;
@@ -218,13 +225,51 @@ static int shash_async_update(struct ahash_request *req)
return nbytes;
}
+EXPORT_SYMBOL_GPL(shash_ahash_update);
+
+static int shash_async_update(struct ahash_request *req)
+{
+ return shash_ahash_update(req, ahash_request_ctx(req));
+}
static int shash_async_final(struct ahash_request *req)
{
return crypto_shash_final(ahash_request_ctx(req), req->result);
}
-static int shash_async_digest(struct ahash_request *req)
+int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
+{
+ struct crypto_hash_walk walk;
+ int nbytes;
+
+ nbytes = crypto_hash_walk_first(req, &walk);
+ if (!nbytes)
+ return crypto_shash_final(desc, req->result);
+
+ do {
+ nbytes = crypto_hash_walk_last(&walk) ?
+ crypto_shash_finup(desc, walk.data, nbytes,
+ req->result) :
+ crypto_shash_update(desc, walk.data, nbytes);
+ nbytes = crypto_hash_walk_done(&walk, nbytes);
+ } while (nbytes > 0);
+
+ return nbytes;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_finup);
+
+static int shash_async_finup(struct ahash_request *req)
+{
+ struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct shash_desc *desc = ahash_request_ctx(req);
+
+ desc->tfm = *ctx;
+ desc->flags = req->base.flags;
+
+ return shash_ahash_finup(req, desc);
+}
+
+int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
struct scatterlist *sg = req->src;
unsigned int offset = sg->offset;
@@ -232,34 +277,40 @@ static int shash_async_digest(struct ahash_request *req)
int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
- struct crypto_shash **ctx =
- crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
void *data;
- desc->tfm = *ctx;
- desc->flags = req->base.flags;
-
data = crypto_kmap(sg_page(sg), 0);
err = crypto_shash_digest(desc, data + offset, nbytes,
req->result);
crypto_kunmap(data, 0);
crypto_yield(desc->flags);
- goto out;
- }
+ } else
+ err = crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
- err = shash_async_init(req);
- if (err)
- goto out;
+ return err;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_digest);
- err = shash_async_update(req);
- if (err)
- goto out;
+static int shash_async_digest(struct ahash_request *req)
+{
+ struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct shash_desc *desc = ahash_request_ctx(req);
- err = shash_async_final(req);
+ desc->tfm = *ctx;
+ desc->flags = req->base.flags;
-out:
- return err;
+ return shash_ahash_digest(req, desc);
+}
+
+static int shash_async_export(struct ahash_request *req, void *out)
+{
+ return crypto_shash_export(ahash_request_ctx(req), out);
+}
+
+static int shash_async_import(struct ahash_request *req, const void *in)
+{
+ return crypto_shash_import(ahash_request_ctx(req), in);
}
static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
@@ -269,11 +320,11 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
crypto_free_shash(*ctx);
}
-static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
+int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
{
struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg);
- struct ahash_tfm *crt = &tfm->crt_ahash;
+ struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
@@ -291,11 +342,17 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->init = shash_async_init;
crt->update = shash_async_update;
- crt->final = shash_async_final;
+ crt->final = shash_async_final;
+ crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
- crt->setkey = shash_async_setkey;
- crt->digestsize = alg->digestsize;
+ if (alg->setkey)
+ crt->setkey = shash_async_setkey;
+ if (alg->export)
+ crt->export = shash_async_export;
+ if (alg->import)
+ crt->import = shash_async_import;
+
crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
return 0;
@@ -304,14 +361,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
unsigned int keylen)
{
- struct shash_desc *desc = crypto_hash_ctx(tfm);
+ struct shash_desc **descp = crypto_hash_ctx(tfm);
+ struct shash_desc *desc = *descp;
return crypto_shash_setkey(desc->tfm, key, keylen);
}
static int shash_compat_init(struct hash_desc *hdesc)
{
- struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc *desc = *descp;
desc->flags = hdesc->flags;
@@ -321,7 +380,8 @@ static int shash_compat_init(struct hash_desc *hdesc)
static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
unsigned int len)
{
- struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc *desc = *descp;
struct crypto_hash_walk walk;
int nbytes;
@@ -334,7 +394,9 @@ static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
{
- return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+
+ return crypto_shash_final(*descp, out);
}
static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
@@ -344,7 +406,8 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
- struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc *desc = *descp;
void *data;
desc->flags = hdesc->flags;
@@ -372,9 +435,11 @@ out:
static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
{
- struct shash_desc *desc= crypto_tfm_ctx(tfm);
+ struct shash_desc **descp = crypto_tfm_ctx(tfm);
+ struct shash_desc *desc = *descp;
crypto_free_shash(desc->tfm);
+ kzfree(desc);
}
static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
@@ -382,8 +447,9 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
struct hash_tfm *crt = &tfm->crt_hash;
struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg);
- struct shash_desc *desc = crypto_tfm_ctx(tfm);
+ struct shash_desc **descp = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
+ struct shash_desc *desc;
if (!crypto_mod_get(calg))
return -EAGAIN;
@@ -394,6 +460,14 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
return PTR_ERR(shash);
}
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!desc) {
+ crypto_free_shash(shash);
+ return -ENOMEM;
+ }
+
+ *descp = desc;
desc->tfm = shash;
tfm->exit = crypto_exit_shash_ops_compat;
@@ -413,8 +487,6 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK:
return crypto_init_shash_ops_compat(tfm);
- case CRYPTO_ALG_TYPE_AHASH_MASK:
- return crypto_init_shash_ops_async(tfm);
}
return -EINVAL;
@@ -423,26 +495,23 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
- struct shash_alg *salg = __crypto_shash_alg(alg);
-
switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK:
- return sizeof(struct shash_desc) + salg->descsize;
- case CRYPTO_ALG_TYPE_AHASH_MASK:
- return sizeof(struct crypto_shash *);
+ return sizeof(struct shash_desc *);
}
return 0;
}
-static int crypto_shash_init_tfm(struct crypto_tfm *tfm,
- const struct crypto_type *frontend)
+static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
+ struct crypto_shash *hash = __crypto_shash_cast(tfm);
+
+ hash->descsize = crypto_shash_alg(hash)->descsize;
return 0;
}
-static unsigned int crypto_shash_extsize(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
@@ -456,7 +525,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "type : shash\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", salg->digestsize);
- seq_printf(m, "descsize : %u\n", salg->descsize);
}
static const struct crypto_type crypto_shash_type = {
@@ -480,18 +548,43 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_shash);
-int crypto_register_shash(struct shash_alg *alg)
+static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
if (alg->digestsize > PAGE_SIZE / 8 ||
- alg->descsize > PAGE_SIZE / 8)
+ alg->descsize > PAGE_SIZE / 8 ||
+ alg->statesize > PAGE_SIZE / 8)
return -EINVAL;
base->cra_type = &crypto_shash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
+ if (!alg->finup)
+ alg->finup = shash_finup_unaligned;
+ if (!alg->digest)
+ alg->digest = shash_digest_unaligned;
+ if (!alg->export) {
+ alg->export = shash_default_export;
+ alg->import = shash_default_import;
+ alg->statesize = alg->descsize;
+ }
+ if (!alg->setkey)
+ alg->setkey = shash_no_setkey;
+
+ return 0;
+}
+
+int crypto_register_shash(struct shash_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int err;
+
+ err = shash_prepare_alg(alg);
+ if (err)
+ return err;
+
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_shash);
@@ -502,5 +595,44 @@ int crypto_unregister_shash(struct shash_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
+int shash_register_instance(struct crypto_template *tmpl,
+ struct shash_instance *inst)
+{
+ int err;
+
+ err = shash_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, shash_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(shash_register_instance);
+
+void shash_free_instance(struct crypto_instance *inst)
+{
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(shash_instance(inst));
+}
+EXPORT_SYMBOL_GPL(shash_free_instance);
+
+int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
+ struct shash_alg *alg,
+ struct crypto_instance *inst)
+{
+ return crypto_init_spawn2(&spawn->base, &alg->base, inst,
+ &crypto_shash_type);
+}
+EXPORT_SYMBOL_GPL(crypto_init_shash_spawn);
+
+struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+{
+ struct crypto_alg *alg;
+
+ alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask);
+ return IS_ERR(alg) ? ERR_CAST(alg) :
+ container_of(alg, struct shash_alg, base);
+}
+EXPORT_SYMBOL_GPL(shash_attr_alg);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synchronous cryptographic hash type");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d59ba5079d14..3ca68f9fc14d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -18,8 +18,8 @@
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/gfp.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
@@ -45,6 +45,9 @@
*/
static unsigned int sec;
+static char *alg = NULL;
+static u32 type;
+static u32 mask;
static int mode;
static char *tvmem[TVMEMSIZE];
@@ -391,6 +394,17 @@ out:
return 0;
}
+static void test_hash_sg_init(struct scatterlist *sg)
+{
+ int i;
+
+ sg_init_table(sg, TVMEMSIZE);
+ for (i = 0; i < TVMEMSIZE; i++) {
+ sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
+ memset(tvmem[i], 0xff, PAGE_SIZE);
+ }
+}
+
static void test_hash_speed(const char *algo, unsigned int sec,
struct hash_speed *speed)
{
@@ -420,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
goto out;
}
- sg_init_table(sg, TVMEMSIZE);
- for (i = 0; i < TVMEMSIZE; i++) {
- sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
- memset(tvmem[i], 0xff, PAGE_SIZE);
- }
-
+ test_hash_sg_init(sg);
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
printk(KERN_ERR
@@ -434,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
goto out;
}
+ if (speed[i].klen)
+ crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
+
printk(KERN_INFO "test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -455,6 +467,250 @@ out:
crypto_free_hash(tfm);
}
+struct tcrypt_result {
+ struct completion completion;
+ int err;
+};
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+ struct tcrypt_result *res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+static inline int do_one_ahash_op(struct ahash_request *req, int ret)
+{
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ struct tcrypt_result *tr = req->base.data;
+
+ ret = wait_for_completion_interruptible(&tr->completion);
+ if (!ret)
+ ret = tr->err;
+ INIT_COMPLETION(tr->completion);
+ }
+ return ret;
+}
+
+static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
+ char *out, int sec)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + sec * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+ if (ret)
+ return ret;
+ }
+
+ printk("%6u opers/sec, %9lu bytes/sec\n",
+ bcount / sec, ((long)bcount * blen) / sec);
+
+ return 0;
+}
+
+static int test_ahash_jiffies(struct ahash_request *req, int blen,
+ int plen, char *out, int sec)
+{
+ unsigned long start, end;
+ int bcount, pcount;
+ int ret;
+
+ if (plen == blen)
+ return test_ahash_jiffies_digest(req, blen, out, sec);
+
+ for (start = jiffies, end = start + sec * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = crypto_ahash_init(req);
+ if (ret)
+ return ret;
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ ret = do_one_ahash_op(req, crypto_ahash_update(req));
+ if (ret)
+ return ret;
+ }
+ /* we assume there is enough space in 'out' for the result */
+ ret = do_one_ahash_op(req, crypto_ahash_final(req));
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%6u opers/sec, %9lu bytes/sec\n",
+ bcount / sec, ((long)bcount * blen) / sec);
+
+ return 0;
+}
+
+static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
+ char *out)
+{
+ unsigned long cycles = 0;
+ int ret, i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+
+ ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+ if (ret)
+ goto out;
+
+ end = get_cycles();
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret)
+ return ret;
+
+ pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+ cycles / 8, cycles / (8 * blen));
+
+ return 0;
+}
+
+static int test_ahash_cycles(struct ahash_request *req, int blen,
+ int plen, char *out)
+{
+ unsigned long cycles = 0;
+ int i, pcount, ret;
+
+ if (plen == blen)
+ return test_ahash_cycles_digest(req, blen, out);
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto out;
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ ret = do_one_ahash_op(req, crypto_ahash_update(req));
+ if (ret)
+ goto out;
+ }
+ ret = do_one_ahash_op(req, crypto_ahash_final(req));
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto out;
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ ret = do_one_ahash_op(req, crypto_ahash_update(req));
+ if (ret)
+ goto out;
+ }
+ ret = do_one_ahash_op(req, crypto_ahash_final(req));
+ if (ret)
+ goto out;
+
+ end = get_cycles();
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret)
+ return ret;
+
+ pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+ cycles / 8, cycles / (8 * blen));
+
+ return 0;
+}
+
+static void test_ahash_speed(const char *algo, unsigned int sec,
+ struct hash_speed *speed)
+{
+ struct scatterlist sg[TVMEMSIZE];
+ struct tcrypt_result tresult;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ static char output[1024];
+ int i, ret;
+
+ printk(KERN_INFO "\ntesting speed of async %s\n", algo);
+
+ tfm = crypto_alloc_ahash(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ return;
+ }
+
+ if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
+ pr_err("digestsize(%u) > outputbuffer(%zu)\n",
+ crypto_ahash_digestsize(tfm), sizeof(output));
+ goto out;
+ }
+
+ test_hash_sg_init(sg);
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("ahash request allocation failure\n");
+ goto out;
+ }
+
+ init_completion(&tresult.completion);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &tresult);
+
+ for (i = 0; speed[i].blen != 0; i++) {
+ if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for tvmem (%lu)\n",
+ speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+ break;
+ }
+
+ pr_info("test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+
+ ahash_request_set_crypt(req, sg, output, speed[i].plen);
+
+ if (sec)
+ ret = test_ahash_jiffies(req, speed[i].blen,
+ speed[i].plen, output, sec);
+ else
+ ret = test_ahash_cycles(req, speed[i].blen,
+ speed[i].plen, output);
+
+ if (ret) {
+ pr_err("hashing failed ret=%d\n", ret);
+ break;
+ }
+ }
+
+ ahash_request_free(req);
+
+out:
+ crypto_free_ahash(tfm);
+}
+
static void test_available(void)
{
char **name = check;
@@ -716,6 +972,10 @@ static int do_test(int m)
ret += tcrypt_test("hmac(rmd160)");
break;
+ case 109:
+ ret += tcrypt_test("vmac(aes)");
+ break;
+
case 150:
ret += tcrypt_test("ansi_cprng");
break;
@@ -874,9 +1134,87 @@ static int do_test(int m)
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
+ case 318:
+ test_hash_speed("ghash-generic", sec, hash_speed_template_16);
+ if (mode > 300 && mode < 400) break;
+
case 399:
break;
+ case 400:
+ /* fall through */
+
+ case 401:
+ test_ahash_speed("md4", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 402:
+ test_ahash_speed("md5", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 403:
+ test_ahash_speed("sha1", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 404:
+ test_ahash_speed("sha256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 405:
+ test_ahash_speed("sha384", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 406:
+ test_ahash_speed("sha512", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 407:
+ test_ahash_speed("wp256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 408:
+ test_ahash_speed("wp384", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 409:
+ test_ahash_speed("wp512", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 410:
+ test_ahash_speed("tgr128", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 411:
+ test_ahash_speed("tgr160", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 412:
+ test_ahash_speed("tgr192", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 413:
+ test_ahash_speed("sha224", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 414:
+ test_ahash_speed("rmd128", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 415:
+ test_ahash_speed("rmd160", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 416:
+ test_ahash_speed("rmd256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 417:
+ test_ahash_speed("rmd320", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 499:
+ break;
+
case 1000:
test_available();
break;
@@ -885,6 +1223,12 @@ static int do_test(int m)
return ret;
}
+static int do_alg_test(const char *alg, u32 type, u32 mask)
+{
+ return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ?
+ 0 : -ENOENT;
+}
+
static int __init tcrypt_mod_init(void)
{
int err = -ENOMEM;
@@ -896,7 +1240,11 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv;
}
- err = do_test(mode);
+ if (alg)
+ err = do_alg_test(alg, type, mask);
+ else
+ err = do_test(mode);
+
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv;
@@ -928,6 +1276,9 @@ static void __exit tcrypt_mod_fini(void) { }
module_init(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
+module_param(alg, charp, 0);
+module_param(type, uint, 0);
+module_param(mask, uint, 0);
module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 966bbfaf95b1..10cb925132c9 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -25,6 +25,7 @@ struct cipher_speed_template {
struct hash_speed {
unsigned int blen; /* buffer length */
unsigned int plen; /* per-update length */
+ unsigned int klen; /* key length */
};
/*
@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
+static struct hash_speed hash_speed_template_16[] = {
+ { .blen = 16, .plen = 16, .klen = 16, },
+ { .blen = 64, .plen = 16, .klen = 16, },
+ { .blen = 64, .plen = 64, .klen = 16, },
+ { .blen = 256, .plen = 16, .klen = 16, },
+ { .blen = 256, .plen = 64, .klen = 16, },
+ { .blen = 256, .plen = 256, .klen = 16, },
+ { .blen = 1024, .plen = 16, .klen = 16, },
+ { .blen = 1024, .plen = 256, .klen = 16, },
+ { .blen = 1024, .plen = 1024, .klen = 16, },
+ { .blen = 2048, .plen = 16, .klen = 16, },
+ { .blen = 2048, .plen = 256, .klen = 16, },
+ { .blen = 2048, .plen = 1024, .klen = 16, },
+ { .blen = 2048, .plen = 2048, .klen = 16, },
+ { .blen = 4096, .plen = 16, .klen = 16, },
+ { .blen = 4096, .plen = 256, .klen = 16, },
+ { .blen = 4096, .plen = 1024, .klen = 16, },
+ { .blen = 4096, .plen = 4096, .klen = 16, },
+ { .blen = 8192, .plen = 16, .klen = 16, },
+ { .blen = 8192, .plen = 256, .klen = 16, },
+ { .blen = 8192, .plen = 1024, .klen = 16, },
+ { .blen = 8192, .plen = 4096, .klen = 16, },
+ { .blen = 8192, .plen = 8192, .klen = 16, },
+
+ /* End marker */
+ { .blen = 0, .plen = 0, .klen = 0, }
+};
+
#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e9e9d84293b9..fa8c8f78c8d4 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -22,6 +22,17 @@
#include <crypto/rng.h>
#include "internal.h"
+
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+
+/* a perfect nop */
+int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
+{
+ return 0;
+}
+
+#else
+
#include "testmgr.h"
/*
@@ -153,8 +164,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
+static int do_one_async_hash_op(struct ahash_request *req,
+ struct tcrypt_result *tr,
+ int ret)
+{
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ ret = wait_for_completion_interruptible(&tr->completion);
+ if (!ret)
+ ret = tr->err;
+ INIT_COMPLETION(tr->completion);
+ }
+ return ret;
+}
+
static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
- unsigned int tcount)
+ unsigned int tcount, bool use_digest)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
unsigned int i, j, k, temp;
@@ -190,10 +214,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
hash_buff = xbuf[0];
- ret = -EINVAL;
- if (WARN_ON(template[i].psize > PAGE_SIZE))
- goto out;
-
memcpy(hash_buff, template[i].plaintext, template[i].psize);
sg_init_one(&sg[0], hash_buff, template[i].psize);
@@ -210,23 +230,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &tresult.completion);
- if (!ret && !(ret = tresult.err)) {
- INIT_COMPLETION(tresult.completion);
- break;
+ if (use_digest) {
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_digest(req));
+ if (ret) {
+ pr_err("alg: hash: digest failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ } else {
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_init(req));
+ if (ret) {
+ pr_err("alt: hash: init failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_update(req));
+ if (ret) {
+ pr_err("alt: hash: update failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_final(req));
+ if (ret) {
+ pr_err("alt: hash: final failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
}
- /* fall through */
- default:
- printk(KERN_ERR "alg: hash: digest failed on test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
}
if (memcmp(result, template[i].digest,
@@ -1205,7 +1238,7 @@ static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
- int err, i, j, seedsize;
+ int err = 0, i, j, seedsize;
u8 *seed;
char result[32];
@@ -1406,7 +1439,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
return PTR_ERR(tfm);
}
- err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count);
+ err = test_hash(tfm, desc->suite.hash.vecs,
+ desc->suite.hash.count, true);
+ if (!err)
+ err = test_hash(tfm, desc->suite.hash.vecs,
+ desc->suite.hash.count, false);
crypto_free_ahash(tfm);
return err;
@@ -1481,9 +1518,54 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
return err;
}
+static int alg_test_null(const struct alg_test_desc *desc,
+ const char *driver, u32 type, u32 mask)
+{
+ return 0;
+}
+
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
+ .alg = "__driver-cbc-aes-aesni",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
+ .alg = "__driver-ecb-aes-aesni",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
+ .alg = "__ghash-pclmulqdqni",
+ .test = alg_test_null,
+ .suite = {
+ .hash = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
.fips_allowed = 1,
@@ -1627,6 +1709,30 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "cryptd(__driver-ecb-aes-aesni)",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
+ .alg = "cryptd(__ghash-pclmulqdqni)",
+ .test = alg_test_null,
+ .suite = {
+ .hash = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -1673,6 +1779,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ecb(__aes-aesni)",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
.alg = "ecb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -1947,6 +2068,15 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ghash",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = {
+ .vecs = ghash_tv_template,
+ .count = GHASH_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "hmac(md5)",
.test = alg_test_hash,
.suite = {
@@ -2252,6 +2382,15 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "vmac(aes)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = {
+ .vecs = aes_vmac128_tv_template,
+ .count = VMAC_AES_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "wp256",
.test = alg_test_hash,
.suite = {
@@ -2348,6 +2487,7 @@ static int alg_find_test(const char *alg)
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
int i;
+ int j;
int rc;
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
@@ -2369,14 +2509,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
}
i = alg_find_test(alg);
- if (i < 0)
+ j = alg_find_test(driver);
+ if (i < 0 && j < 0)
goto notest;
- if (fips_enabled && !alg_test_descs[i].fips_allowed)
+ if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) ||
+ (j >= 0 && !alg_test_descs[j].fips_allowed)))
goto non_fips_alg;
- rc = alg_test_descs[i].test(alg_test_descs + i, driver,
- type, mask);
+ rc = 0;
+ if (i >= 0)
+ rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
+ type, mask);
+ if (j >= 0)
+ rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
+ type, mask);
+
test_done:
if (fips_enabled && rc)
panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
@@ -2393,4 +2541,7 @@ notest:
non_fips_alg:
return -EINVAL;
}
+
+#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
+
EXPORT_SYMBOL_GPL(alg_test);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 69316228fc19..74e35377fd30 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1003,6 +1003,21 @@ static struct hash_testvec tgr128_tv_template[] = {
},
};
+#define GHASH_TEST_VECTORS 1
+
+static struct hash_testvec ghash_tv_template[] =
+{
+ {
+
+ .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61",
+ .ksize = 16,
+ .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
+ .psize = 16,
+ .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
+ "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
+ },
+};
+
/*
* HMAC-MD5 test vectors from RFC2202
* (These need to be fixed to not use strlen).
@@ -1654,6 +1669,78 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
+#define VMAC_AES_TEST_VECTORS 8
+static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
+ '\x02', '\x03', '\x02', '\x02',
+ '\x02', '\x04', '\x01', '\x07',
+ '\x04', '\x01', '\x04', '\x03',};
+static char vmac_string2[128] = {'a', 'b', 'c',};
+static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ };
+
+static struct hash_testvec aes_vmac128_tv_template[] = {
+ {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = NULL,
+ .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string1,
+ .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string2,
+ .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string3,
+ .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = NULL,
+ .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string1,
+ .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string2,
+ .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string3,
+ .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
+ .psize = 128,
+ .ksize = 16,
+ },
+};
+
/*
* SHA384 HMAC test vectors from RFC4231
*/
diff --git a/crypto/twofish.c b/crypto/twofish_generic.c
index dfcda231f87a..1f07b843e07c 100644
--- a/crypto/twofish.c
+++ b/crypto/twofish_generic.c
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
+MODULE_ALIAS("twofish");
diff --git a/crypto/vmac.c b/crypto/vmac.c
new file mode 100644
index 000000000000..0999274a27ac
--- /dev/null
+++ b/crypto/vmac.c
@@ -0,0 +1,675 @@
+/*
+ * Modified to interface to the Linux kernel
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+/* --------------------------------------------------------------------------
+ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ * This implementation is herby placed in the public domain.
+ * The authors offers no warranty. Use at your own risk.
+ * Please send bug reports to the authors.
+ * Last modified: 17 APR 08, 1700 PDT
+ * ----------------------------------------------------------------------- */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <asm/byteorder.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/vmac.h>
+#include <crypto/internal/hash.h>
+
+/*
+ * Constants and masks
+ */
+#define UINT64_C(x) x##ULL
+const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
+const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
+const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
+const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
+const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
+
+#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
+
+#ifdef __LITTLE_ENDIAN
+#define INDEX_HIGH 1
+#define INDEX_LOW 0
+#else
+#define INDEX_HIGH 0
+#define INDEX_LOW 1
+#endif
+
+/*
+ * The following routines are used in this implementation. They are
+ * written via macros to simulate zero-overhead call-by-reference.
+ *
+ * MUL64: 64x64->128-bit multiplication
+ * PMUL64: assumes top bits cleared on inputs
+ * ADD128: 128x128->128-bit addition
+ */
+
+#define ADD128(rh, rl, ih, il) \
+ do { \
+ u64 _il = (il); \
+ (rl) += (_il); \
+ if ((rl) < (_il)) \
+ (rh)++; \
+ (rh) += (ih); \
+ } while (0)
+
+#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
+
+#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
+ do { \
+ u64 _i1 = (i1), _i2 = (i2); \
+ u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
+ rh = MUL32(_i1>>32, _i2>>32); \
+ rl = MUL32(_i1, _i2); \
+ ADD128(rh, rl, (m >> 32), (m << 32)); \
+ } while (0)
+
+#define MUL64(rh, rl, i1, i2) \
+ do { \
+ u64 _i1 = (i1), _i2 = (i2); \
+ u64 m1 = MUL32(_i1, _i2>>32); \
+ u64 m2 = MUL32(_i1>>32, _i2); \
+ rh = MUL32(_i1>>32, _i2>>32); \
+ rl = MUL32(_i1, _i2); \
+ ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
+ ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
+ } while (0)
+
+/*
+ * For highest performance the L1 NH and L2 polynomial hashes should be
+ * carefully implemented to take advantage of one's target architechture.
+ * Here these two hash functions are defined multiple time; once for
+ * 64-bit architectures, once for 32-bit SSE2 architectures, and once
+ * for the rest (32-bit) architectures.
+ * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
+ * Optionally, nh_vmac_nhbytes can be defined (for multiples of
+ * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
+ * NH computations at once).
+ */
+
+#ifdef CONFIG_64BIT
+
+#define nh_16(mp, kp, nw, rh, rl) \
+ do { \
+ int i; u64 th, tl; \
+ rh = rl = 0; \
+ for (i = 0; i < nw; i += 2) { \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ } \
+ } while (0)
+
+#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
+ do { \
+ int i; u64 th, tl; \
+ rh1 = rl1 = rh = rl = 0; \
+ for (i = 0; i < nw; i += 2) { \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ ADD128(rh1, rl1, th, tl); \
+ } \
+ } while (0)
+
+#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
+#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
+ do { \
+ int i; u64 th, tl; \
+ rh = rl = 0; \
+ for (i = 0; i < nw; i += 8) { \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ ADD128(rh, rl, th, tl); \
+ } \
+ } while (0)
+
+#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
+ do { \
+ int i; u64 th, tl; \
+ rh1 = rl1 = rh = rl = 0; \
+ for (i = 0; i < nw; i += 8) { \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ ADD128(rh1, rl1, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
+ ADD128(rh1, rl1, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
+ ADD128(rh1, rl1, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
+ ADD128(rh1, rl1, th, tl); \
+ } \
+ } while (0)
+#endif
+
+#define poly_step(ah, al, kh, kl, mh, ml) \
+ do { \
+ u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
+ /* compute ab*cd, put bd into result registers */ \
+ PMUL64(t3h, t3l, al, kh); \
+ PMUL64(t2h, t2l, ah, kl); \
+ PMUL64(t1h, t1l, ah, 2*kh); \
+ PMUL64(ah, al, al, kl); \
+ /* add 2 * ac to result */ \
+ ADD128(ah, al, t1h, t1l); \
+ /* add together ad + bc */ \
+ ADD128(t2h, t2l, t3h, t3l); \
+ /* now (ah,al), (t2l,2*t2h) need summing */ \
+ /* first add the high registers, carrying into t2h */ \
+ ADD128(t2h, ah, z, t2l); \
+ /* double t2h and add top bit of ah */ \
+ t2h = 2 * t2h + (ah >> 63); \
+ ah &= m63; \
+ /* now add the low registers */ \
+ ADD128(ah, al, mh, ml); \
+ ADD128(ah, al, z, t2h); \
+ } while (0)
+
+#else /* ! CONFIG_64BIT */
+
+#ifndef nh_16
+#define nh_16(mp, kp, nw, rh, rl) \
+ do { \
+ u64 t1, t2, m1, m2, t; \
+ int i; \
+ rh = rl = t = 0; \
+ for (i = 0; i < nw; i += 2) { \
+ t1 = pe64_to_cpup(mp+i) + kp[i]; \
+ t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
+ m2 = MUL32(t1 >> 32, t2); \
+ m1 = MUL32(t1, t2 >> 32); \
+ ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
+ MUL32(t1, t2)); \
+ rh += (u64)(u32)(m1 >> 32) \
+ + (u32)(m2 >> 32); \
+ t += (u64)(u32)m1 + (u32)m2; \
+ } \
+ ADD128(rh, rl, (t >> 32), (t << 32)); \
+ } while (0)
+#endif
+
+static void poly_step_func(u64 *ahi, u64 *alo,
+ const u64 *kh, const u64 *kl,
+ const u64 *mh, const u64 *ml)
+{
+#define a0 (*(((u32 *)alo)+INDEX_LOW))
+#define a1 (*(((u32 *)alo)+INDEX_HIGH))
+#define a2 (*(((u32 *)ahi)+INDEX_LOW))
+#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
+#define k0 (*(((u32 *)kl)+INDEX_LOW))
+#define k1 (*(((u32 *)kl)+INDEX_HIGH))
+#define k2 (*(((u32 *)kh)+INDEX_LOW))
+#define k3 (*(((u32 *)kh)+INDEX_HIGH))
+
+ u64 p, q, t;
+ u32 t2;
+
+ p = MUL32(a3, k3);
+ p += p;
+ p += *(u64 *)mh;
+ p += MUL32(a0, k2);
+ p += MUL32(a1, k1);
+ p += MUL32(a2, k0);
+ t = (u32)(p);
+ p >>= 32;
+ p += MUL32(a0, k3);
+ p += MUL32(a1, k2);
+ p += MUL32(a2, k1);
+ p += MUL32(a3, k0);
+ t |= ((u64)((u32)p & 0x7fffffff)) << 32;
+ p >>= 31;
+ p += (u64)(((u32 *)ml)[INDEX_LOW]);
+ p += MUL32(a0, k0);
+ q = MUL32(a1, k3);
+ q += MUL32(a2, k2);
+ q += MUL32(a3, k1);
+ q += q;
+ p += q;
+ t2 = (u32)(p);
+ p >>= 32;
+ p += (u64)(((u32 *)ml)[INDEX_HIGH]);
+ p += MUL32(a0, k1);
+ p += MUL32(a1, k0);
+ q = MUL32(a2, k3);
+ q += MUL32(a3, k2);
+ q += q;
+ p += q;
+ *(u64 *)(alo) = (p << 32) | t2;
+ p >>= 32;
+ *(u64 *)(ahi) = p + t;
+
+#undef a0
+#undef a1
+#undef a2
+#undef a3
+#undef k0
+#undef k1
+#undef k2
+#undef k3
+}
+
+#define poly_step(ah, al, kh, kl, mh, ml) \
+ poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
+
+#endif /* end of specialized NH and poly definitions */
+
+/* At least nh_16 is defined. Defined others as needed here */
+#ifndef nh_16_2
+#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
+ do { \
+ nh_16(mp, kp, nw, rh, rl); \
+ nh_16(mp, ((kp)+2), nw, rh2, rl2); \
+ } while (0)
+#endif
+#ifndef nh_vmac_nhbytes
+#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
+ nh_16(mp, kp, nw, rh, rl)
+#endif
+#ifndef nh_vmac_nhbytes_2
+#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
+ do { \
+ nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
+ nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
+ } while (0)
+#endif
+
+static void vhash_abort(struct vmac_ctx *ctx)
+{
+ ctx->polytmp[0] = ctx->polykey[0] ;
+ ctx->polytmp[1] = ctx->polykey[1] ;
+ ctx->first_block_processed = 0;
+}
+
+static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
+{
+ u64 rh, rl, t, z = 0;
+
+ /* fully reduce (p1,p2)+(len,0) mod p127 */
+ t = p1 >> 63;
+ p1 &= m63;
+ ADD128(p1, p2, len, t);
+ /* At this point, (p1,p2) is at most 2^127+(len<<64) */
+ t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
+ ADD128(p1, p2, z, t);
+ p1 &= m63;
+
+ /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
+ t = p1 + (p2 >> 32);
+ t += (t >> 32);
+ t += (u32)t > 0xfffffffeu;
+ p1 += (t >> 32);
+ p2 += (p1 << 32);
+
+ /* compute (p1+k1)%p64 and (p2+k2)%p64 */
+ p1 += k1;
+ p1 += (0 - (p1 < k1)) & 257;
+ p2 += k2;
+ p2 += (0 - (p2 < k2)) & 257;
+
+ /* compute (p1+k1)*(p2+k2)%p64 */
+ MUL64(rh, rl, p1, p2);
+ t = rh >> 56;
+ ADD128(t, rl, z, rh);
+ rh <<= 8;
+ ADD128(t, rl, z, rh);
+ t += t << 8;
+ rl += t;
+ rl += (0 - (rl < t)) & 257;
+ rl += (0 - (rl > p64-1)) & 257;
+ return rl;
+}
+
+static void vhash_update(const unsigned char *m,
+ unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
+ struct vmac_ctx *ctx)
+{
+ u64 rh, rl, *mptr;
+ const u64 *kptr = (u64 *)ctx->nhkey;
+ int i;
+ u64 ch, cl;
+ u64 pkh = ctx->polykey[0];
+ u64 pkl = ctx->polykey[1];
+
+ mptr = (u64 *)m;
+ i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
+
+ ch = ctx->polytmp[0];
+ cl = ctx->polytmp[1];
+
+ if (!ctx->first_block_processed) {
+ ctx->first_block_processed = 1;
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ ADD128(ch, cl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ i--;
+ }
+
+ while (i--) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ }
+
+ ctx->polytmp[0] = ch;
+ ctx->polytmp[1] = cl;
+}
+
+static u64 vhash(unsigned char m[], unsigned int mbytes,
+ u64 *tagl, struct vmac_ctx *ctx)
+{
+ u64 rh, rl, *mptr;
+ const u64 *kptr = (u64 *)ctx->nhkey;
+ int i, remaining;
+ u64 ch, cl;
+ u64 pkh = ctx->polykey[0];
+ u64 pkl = ctx->polykey[1];
+
+ mptr = (u64 *)m;
+ i = mbytes / VMAC_NHBYTES;
+ remaining = mbytes % VMAC_NHBYTES;
+
+ if (ctx->first_block_processed) {
+ ch = ctx->polytmp[0];
+ cl = ctx->polytmp[1];
+ } else if (i) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
+ ch &= m62;
+ ADD128(ch, cl, pkh, pkl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ i--;
+ } else if (remaining) {
+ nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
+ ch &= m62;
+ ADD128(ch, cl, pkh, pkl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ goto do_l3;
+ } else {/* Empty String */
+ ch = pkh; cl = pkl;
+ goto do_l3;
+ }
+
+ while (i--) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ }
+ if (remaining) {
+ nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ }
+
+do_l3:
+ vhash_abort(ctx);
+ remaining *= 8;
+ return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
+}
+
+static u64 vmac(unsigned char m[], unsigned int mbytes,
+ unsigned char n[16], u64 *tagl,
+ struct vmac_ctx_t *ctx)
+{
+ u64 *in_n, *out_p;
+ u64 p, h;
+ int i;
+
+ in_n = ctx->__vmac_ctx.cached_nonce;
+ out_p = ctx->__vmac_ctx.cached_aes;
+
+ i = n[15] & 1;
+ if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
+ in_n[0] = *(u64 *)(n);
+ in_n[1] = *(u64 *)(n+8);
+ ((unsigned char *)in_n)[15] &= 0xFE;
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out_p, (unsigned char *)in_n);
+
+ ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
+ }
+ p = be64_to_cpup(out_p + i);
+ h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
+ return le64_to_cpu(p + h);
+}
+
+static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
+{
+ u64 in[2] = {0}, out[2];
+ unsigned i;
+ int err = 0;
+
+ err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
+ if (err)
+ return err;
+
+ /* Fill nh key */
+ ((unsigned char *)in)[0] = 0x80;
+ for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out, (unsigned char *)in);
+ ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
+ ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
+ ((unsigned char *)in)[15] += 1;
+ }
+
+ /* Fill poly key */
+ ((unsigned char *)in)[0] = 0xC0;
+ in[1] = 0;
+ for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out, (unsigned char *)in);
+ ctx->__vmac_ctx.polytmp[i] =
+ ctx->__vmac_ctx.polykey[i] =
+ be64_to_cpup(out) & mpoly;
+ ctx->__vmac_ctx.polytmp[i+1] =
+ ctx->__vmac_ctx.polykey[i+1] =
+ be64_to_cpup(out+1) & mpoly;
+ ((unsigned char *)in)[15] += 1;
+ }
+
+ /* Fill ip key */
+ ((unsigned char *)in)[0] = 0xE0;
+ in[1] = 0;
+ for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
+ do {
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out, (unsigned char *)in);
+ ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
+ ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
+ ((unsigned char *)in)[15] += 1;
+ } while (ctx->__vmac_ctx.l3key[i] >= p64
+ || ctx->__vmac_ctx.l3key[i+1] >= p64);
+ }
+
+ /* Invalidate nonce/aes cache and reset other elements */
+ ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
+ ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
+ ctx->__vmac_ctx.first_block_processed = 0;
+
+ return err;
+}
+
+static int vmac_setkey(struct crypto_shash *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+
+ if (keylen != VMAC_KEY_LEN) {
+ crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ return vmac_set_key((u8 *)key, ctx);
+}
+
+static int vmac_init(struct shash_desc *pdesc)
+{
+ return 0;
+}
+
+static int vmac_update(struct shash_desc *pdesc, const u8 *p,
+ unsigned int len)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+
+ vhash_update(p, len, &ctx->__vmac_ctx);
+
+ return 0;
+}
+
+static int vmac_final(struct shash_desc *pdesc, u8 *out)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+ vmac_t mac;
+ u8 nonce[16] = {};
+
+ mac = vmac(NULL, 0, nonce, NULL, ctx);
+ memcpy(out, &mac, sizeof(vmac_t));
+ memset(&mac, 0, sizeof(vmac_t));
+ memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+ return 0;
+}
+
+static int vmac_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_cipher *cipher;
+ struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+ struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ return 0;
+}
+
+static void vmac_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+ crypto_free_cipher(ctx->child);
+}
+
+static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct shash_instance *inst;
+ struct crypto_alg *alg;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ if (err)
+ return err;
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
+ CRYPTO_ALG_TYPE_MASK);
+ if (IS_ERR(alg))
+ return PTR_ERR(alg);
+
+ inst = shash_alloc_instance("vmac", alg);
+ err = PTR_ERR(inst);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ err = crypto_init_spawn(shash_instance_ctx(inst), alg,
+ shash_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ inst->alg.digestsize = sizeof(vmac_t);
+ inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
+ inst->alg.base.cra_init = vmac_init_tfm;
+ inst->alg.base.cra_exit = vmac_exit_tfm;
+
+ inst->alg.init = vmac_init;
+ inst->alg.update = vmac_update;
+ inst->alg.final = vmac_final;
+ inst->alg.setkey = vmac_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return err;
+}
+
+static struct crypto_template vmac_tmpl = {
+ .name = "vmac",
+ .create = vmac_create,
+ .free = shash_free_instance,
+ .module = THIS_MODULE,
+};
+
+static int __init vmac_module_init(void)
+{
+ return crypto_register_template(&vmac_tmpl);
+}
+
+static void __exit vmac_module_exit(void)
+{
+ crypto_unregister_template(&vmac_tmpl);
+}
+
+module_init(vmac_module_init);
+module_exit(vmac_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VMAC hash algorithm");
+
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index b63b633e549c..bb7b67fba349 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -19,211 +19,142 @@
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>
*/
-#include <crypto/scatterwalk.h>
-#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
#include <linux/err.h>
-#include <linux/hardirq.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/rtnetlink.h>
-#include <linux/slab.h>
-#include <linux/scatterlist.h>
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
0x02020202, 0x02020202, 0x02020202, 0x02020202,
0x03030303, 0x03030303, 0x03030303, 0x03030303};
+
/*
* +------------------------
* | <parent tfm>
* +------------------------
- * | crypto_xcbc_ctx
+ * | xcbc_tfm_ctx
* +------------------------
- * | odds (block size)
+ * | consts (block size * 2)
* +------------------------
- * | prev (block size)
+ */
+struct xcbc_tfm_ctx {
+ struct crypto_cipher *child;
+ u8 ctx[];
+};
+
+/*
* +------------------------
- * | key (block size)
+ * | <shash desc>
* +------------------------
- * | consts (block size * 3)
+ * | xcbc_desc_ctx
+ * +------------------------
+ * | odds (block size)
+ * +------------------------
+ * | prev (block size)
* +------------------------
*/
-struct crypto_xcbc_ctx {
- struct crypto_cipher *child;
- u8 *odds;
- u8 *prev;
- u8 *key;
- u8 *consts;
- void (*xor)(u8 *a, const u8 *b, unsigned int bs);
- unsigned int keylen;
+struct xcbc_desc_ctx {
unsigned int len;
+ u8 ctx[];
};
-static void xor_128(u8 *a, const u8 *b, unsigned int bs)
-{
- ((u32 *)a)[0] ^= ((u32 *)b)[0];
- ((u32 *)a)[1] ^= ((u32 *)b)[1];
- ((u32 *)a)[2] ^= ((u32 *)b)[2];
- ((u32 *)a)[3] ^= ((u32 *)b)[3];
-}
-
-static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
- struct crypto_xcbc_ctx *ctx)
+static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
+ const u8 *inkey, unsigned int keylen)
{
- int bs = crypto_hash_blocksize(parent);
+ unsigned long alignmask = crypto_shash_alignmask(parent);
+ struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
+ int bs = crypto_shash_blocksize(parent);
+ u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
int err = 0;
u8 key1[bs];
- if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
- return err;
+ if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
+ return err;
- crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts);
+ crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs);
+ crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2);
+ crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks);
return crypto_cipher_setkey(ctx->child, key1, bs);
-}
-
-static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
- const u8 *inkey, unsigned int keylen)
-{
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
-
- if (keylen != crypto_cipher_blocksize(ctx->child))
- return -EINVAL;
- ctx->keylen = keylen;
- memcpy(ctx->key, inkey, keylen);
- ctx->consts = (u8*)ks;
-
- return _crypto_xcbc_digest_setkey(parent, ctx);
}
-static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
+static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
{
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm);
- int bs = crypto_hash_blocksize(pdesc->tfm);
+ unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
+ struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
+ int bs = crypto_shash_blocksize(pdesc->tfm);
+ u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs;
ctx->len = 0;
- memset(ctx->odds, 0, bs);
- memset(ctx->prev, 0, bs);
+ memset(prev, 0, bs);
return 0;
}
-static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
- struct scatterlist *sg,
- unsigned int nbytes)
+static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
+ unsigned int len)
{
- struct crypto_hash *parent = pdesc->tfm;
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
- struct crypto_cipher *tfm = ctx->child;
- int bs = crypto_hash_blocksize(parent);
-
- for (;;) {
- struct page *pg = sg_page(sg);
- unsigned int offset = sg->offset;
- unsigned int slen = sg->length;
-
- if (unlikely(slen > nbytes))
- slen = nbytes;
-
- nbytes -= slen;
-
- while (slen > 0) {
- unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
- char *p = crypto_kmap(pg, 0) + offset;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(ctx->odds + ctx->len, p, len);
- ctx->len += len;
- slen -= len;
-
- /* checking the rest of the page */
- if (len + offset >= PAGE_SIZE) {
- offset = 0;
- pg++;
- } else
- offset += len;
-
- crypto_kunmap(p, 0);
- crypto_yield(pdesc->flags);
- continue;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(ctx->odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- ctx->xor(ctx->prev, ctx->odds, bs);
- crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev);
-
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
- ctx->xor(ctx->prev, p, bs);
- crypto_cipher_encrypt_one(tfm, ctx->prev,
- ctx->prev);
- p += bs;
- len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(ctx->odds, p, len);
- ctx->len = len;
- }
- crypto_kunmap(p, 0);
- crypto_yield(pdesc->flags);
- slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
- offset = 0;
- pg++;
- }
-
- if (!nbytes)
- break;
- sg = scatterwalk_sg_next(sg);
+ struct crypto_shash *parent = pdesc->tfm;
+ unsigned long alignmask = crypto_shash_alignmask(parent);
+ struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
+ struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
+ struct crypto_cipher *tfm = tctx->child;
+ int bs = crypto_shash_blocksize(parent);
+ u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *prev = odds + bs;
+
+ /* checking the data can fill the block */
+ if ((ctx->len + len) <= bs) {
+ memcpy(odds + ctx->len, p, len);
+ ctx->len += len;
+ return 0;
}
- return 0;
-}
+ /* filling odds with new data and encrypting it */
+ memcpy(odds + ctx->len, p, bs - ctx->len);
+ len -= bs - ctx->len;
+ p += bs - ctx->len;
-static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
- struct scatterlist *sg,
- unsigned int nbytes)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
- return crypto_xcbc_digest_update2(pdesc, sg, nbytes);
-}
+ crypto_xor(prev, odds, bs);
+ crypto_cipher_encrypt_one(tfm, prev, prev);
-static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
-{
- struct crypto_hash *parent = pdesc->tfm;
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
- struct crypto_cipher *tfm = ctx->child;
- int bs = crypto_hash_blocksize(parent);
- int err = 0;
-
- if (ctx->len == bs) {
- u8 key2[bs];
+ /* clearing the length */
+ ctx->len = 0;
- if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
- return err;
+ /* encrypting the rest of data */
+ while (len > bs) {
+ crypto_xor(prev, p, bs);
+ crypto_cipher_encrypt_one(tfm, prev, prev);
+ p += bs;
+ len -= bs;
+ }
- crypto_cipher_encrypt_one(tfm, key2,
- (u8 *)(ctx->consts + bs));
+ /* keeping the surplus of blocksize */
+ if (len) {
+ memcpy(odds, p, len);
+ ctx->len = len;
+ }
- ctx->xor(ctx->prev, ctx->odds, bs);
- ctx->xor(ctx->prev, key2, bs);
- _crypto_xcbc_digest_setkey(parent, ctx);
+ return 0;
+}
- crypto_cipher_encrypt_one(tfm, out, ctx->prev);
- } else {
- u8 key3[bs];
+static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ unsigned long alignmask = crypto_shash_alignmask(parent);
+ struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
+ struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
+ struct crypto_cipher *tfm = tctx->child;
+ int bs = crypto_shash_blocksize(parent);
+ u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1);
+ u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *prev = odds + bs;
+ unsigned int offset = 0;
+
+ if (ctx->len != bs) {
unsigned int rlen;
- u8 *p = ctx->odds + ctx->len;
+ u8 *p = odds + ctx->len;
+
*p = 0x80;
p++;
@@ -231,32 +162,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
if (rlen)
memset(p, 0, rlen);
- if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
- return err;
-
- crypto_cipher_encrypt_one(tfm, key3,
- (u8 *)(ctx->consts + bs * 2));
-
- ctx->xor(ctx->prev, ctx->odds, bs);
- ctx->xor(ctx->prev, key3, bs);
-
- _crypto_xcbc_digest_setkey(parent, ctx);
-
- crypto_cipher_encrypt_one(tfm, out, ctx->prev);
+ offset += bs;
}
- return 0;
-}
+ crypto_xor(prev, odds, bs);
+ crypto_xor(prev, consts + offset, bs);
-static int crypto_xcbc_digest(struct hash_desc *pdesc,
- struct scatterlist *sg, unsigned int nbytes, u8 *out)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
+ crypto_cipher_encrypt_one(tfm, out, prev);
- crypto_xcbc_digest_init(pdesc);
- crypto_xcbc_digest_update2(pdesc, sg, nbytes);
- return crypto_xcbc_digest_final(pdesc, out);
+ return 0;
}
static int xcbc_init_tfm(struct crypto_tfm *tfm)
@@ -264,95 +178,95 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm)
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
- int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
+ struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- switch(bs) {
- case 16:
- ctx->xor = xor_128;
- break;
- default:
- return -EINVAL;
- }
-
ctx->child = cipher;
- ctx->odds = (u8*)(ctx+1);
- ctx->prev = ctx->odds + bs;
- ctx->key = ctx->prev + bs;
return 0;
};
static void xcbc_exit_tfm(struct crypto_tfm *tfm)
{
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
+ struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
-static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
+static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
+ struct shash_instance *inst;
struct crypto_alg *alg;
+ unsigned long alignmask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err)
- return ERR_PTR(err);
+ return err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ return PTR_ERR(alg);
switch(alg->cra_blocksize) {
case 16:
break;
default:
- inst = ERR_PTR(-EINVAL);
goto out_put_alg;
}
- inst = crypto_alloc_instance("xcbc", alg);
+ inst = shash_alloc_instance("xcbc", alg);
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_hash_type;
-
- inst->alg.cra_hash.digestsize = alg->cra_blocksize;
- inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
- ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
- inst->alg.cra_init = xcbc_init_tfm;
- inst->alg.cra_exit = xcbc_exit_tfm;
-
- inst->alg.cra_hash.init = crypto_xcbc_digest_init;
- inst->alg.cra_hash.update = crypto_xcbc_digest_update;
- inst->alg.cra_hash.final = crypto_xcbc_digest_final;
- inst->alg.cra_hash.digest = crypto_xcbc_digest;
- inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey;
+ err = crypto_init_spawn(shash_instance_ctx(inst), alg,
+ shash_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto out_free_inst;
+
+ alignmask = alg->cra_alignmask | 3;
+ inst->alg.base.cra_alignmask = alignmask;
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+
+ inst->alg.digestsize = alg->cra_blocksize;
+ inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx),
+ crypto_tfm_ctx_alignment()) +
+ (alignmask &
+ ~(crypto_tfm_ctx_alignment() - 1)) +
+ alg->cra_blocksize * 2;
+
+ inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx),
+ alignmask + 1) +
+ alg->cra_blocksize * 2;
+ inst->alg.base.cra_init = xcbc_init_tfm;
+ inst->alg.base.cra_exit = xcbc_exit_tfm;
+
+ inst->alg.init = crypto_xcbc_digest_init;
+ inst->alg.update = crypto_xcbc_digest_update;
+ inst->alg.final = crypto_xcbc_digest_final;
+ inst->alg.setkey = crypto_xcbc_digest_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
-}
-
-static void xcbc_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
+ return err;
}
static struct crypto_template crypto_xcbc_tmpl = {
.name = "xcbc",
- .alloc = xcbc_alloc,
- .free = xcbc_free,
+ .create = xcbc_create,
+ .free = shash_free_instance,
.module = THIS_MODULE,
};
diff --git a/crypto/xor.c b/crypto/xor.c
index fc5b836f3430..b75182d8ab14 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -18,6 +18,7 @@
#define BH_TRACE 0
#include <linux/module.h>
+#include <linux/gfp.h>
#include <linux/raid/xor.h>
#include <linux/jiffies.h>
#include <asm/xor.h>
diff --git a/crypto/xts.c b/crypto/xts.c
index d87b0f3102c3..555ecaab1e54 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -224,7 +224,7 @@ static struct crypto_instance *alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("xts", alg);
if (IS_ERR(inst))