diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 45 | ||||
-rw-r--r-- | lib/crypto/Kconfig | 3 | ||||
-rw-r--r-- | lib/crypto/Makefile | 3 | ||||
-rw-r--r-- | lib/crypto/blake2s.c | 8 | ||||
-rw-r--r-- | lib/crypto/chacha20poly1305.c | 8 | ||||
-rw-r--r-- | lib/crypto/curve25519.c | 8 | ||||
-rw-r--r-- | lib/crypto/sm4.c | 176 | ||||
-rw-r--r-- | lib/debugobjects.c | 7 | ||||
-rw-r--r-- | lib/linear_ranges.c | 31 | ||||
-rw-r--r-- | lib/mpi/mpiutil.c | 2 | ||||
-rw-r--r-- | lib/string.c | 16 | ||||
-rw-r--r-- | lib/test_lockup.c | 8 |
12 files changed, 265 insertions, 50 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5ddd575159fb..12b805dabbc9 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1235,7 +1235,7 @@ config PROVE_LOCKING depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT select LOCKDEP select DEBUG_SPINLOCK - select DEBUG_MUTEXES + select DEBUG_MUTEXES if !PREEMPT_RT select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_RWSEMS select DEBUG_WW_MUTEX_SLOWPATH @@ -1299,7 +1299,7 @@ config LOCK_STAT depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT select LOCKDEP select DEBUG_SPINLOCK - select DEBUG_MUTEXES + select DEBUG_MUTEXES if !PREEMPT_RT select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_LOCK_ALLOC default n @@ -1335,7 +1335,7 @@ config DEBUG_SPINLOCK config DEBUG_MUTEXES bool "Mutex debugging: basic checks" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !PREEMPT_RT help This feature allows mutex semantics violations to be detected and reported. @@ -1345,7 +1345,8 @@ config DEBUG_WW_MUTEX_SLOWPATH depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT select DEBUG_LOCK_ALLOC select DEBUG_SPINLOCK - select DEBUG_MUTEXES + select DEBUG_MUTEXES if !PREEMPT_RT + select DEBUG_RT_MUTEXES if PREEMPT_RT help This feature enables slowpath testing for w/w mutex users by injecting additional -EDEADLK wound/backoff cases. Together with @@ -1368,7 +1369,7 @@ config DEBUG_LOCK_ALLOC bool "Lock debugging: detect incorrect freeing of live locks" depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT select DEBUG_SPINLOCK - select DEBUG_MUTEXES + select DEBUG_MUTEXES if !PREEMPT_RT select DEBUG_RT_MUTEXES if RT_MUTEXES select LOCKDEP help @@ -1679,33 +1680,6 @@ config DEBUG_WQ_FORCE_RR_CPU feature by default. When enabled, memory and cache locality will be impacted. -config DEBUG_BLOCK_EXT_DEVT - bool "Force extended block device numbers and spread them" - depends on DEBUG_KERNEL - depends on BLOCK - default n - help - BIG FAT WARNING: ENABLING THIS OPTION MIGHT BREAK BOOTING ON - SOME DISTRIBUTIONS. DO NOT ENABLE THIS UNLESS YOU KNOW WHAT - YOU ARE DOING. Distros, please enable this and fix whatever - is broken. - - Conventionally, block device numbers are allocated from - predetermined contiguous area. However, extended block area - may introduce non-contiguous block device numbers. This - option forces most block device numbers to be allocated from - the extended space and spreads them to discover kernel or - userland code paths which assume predetermined contiguous - device number allocation. - - Note that turning on this debug option shuffles all the - device numbers for all IDE and SCSI devices including libata - ones, so root partition specified using device number - directly (via rdev or root=MAJ:MIN) won't work anymore. - Textual device names (root=/dev/sdXn) will continue to work. - - Say N if you are unsure. - config CPU_HOTPLUG_STATE_CONTROL bool "Enable CPU hotplug state control" depends on DEBUG_KERNEL @@ -1971,6 +1945,13 @@ config FAIL_MMC_REQUEST and to test how the mmc host driver handles retries from the block device. +config FAIL_SUNRPC + bool "Fault-injection capability for SunRPC" + depends on FAULT_INJECTION_DEBUG_FS && SUNRPC_DEBUG + help + Provide fault-injection capability for SunRPC and + its consumers. + config FAULT_INJECTION_STACKTRACE_FILTER bool "stacktrace filter for fault-injection capabilities" depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 14c032de276e..545ccbddf6a1 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -128,3 +128,6 @@ config CRYPTO_LIB_CHACHA20POLY1305 config CRYPTO_LIB_SHA256 tristate + +config CRYPTO_LIB_SM4 + tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 3a435629d9ce..73205ed269ba 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -38,6 +38,9 @@ libpoly1305-y += poly1305.o obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o +obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o +libsm4-y := sm4.o + ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) libblake2s-y += blake2s-selftest.o libchacha20poly1305-y += chacha20poly1305-selftest.o diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index c64ac8bfb6a9..4055aa593ec4 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -73,7 +73,7 @@ void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, } EXPORT_SYMBOL(blake2s256_hmac); -static int __init mod_init(void) +static int __init blake2s_mod_init(void) { if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && WARN_ON(!blake2s_selftest())) @@ -81,12 +81,12 @@ static int __init mod_init(void) return 0; } -static void __exit mod_exit(void) +static void __exit blake2s_mod_exit(void) { } -module_init(mod_init); -module_exit(mod_exit); +module_init(blake2s_mod_init); +module_exit(blake2s_mod_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("BLAKE2s hash function"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index c2fcdb98cc02..fa6a9440fc95 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -354,7 +354,7 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len } EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace); -static int __init mod_init(void) +static int __init chacha20poly1305_init(void) { if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && WARN_ON(!chacha20poly1305_selftest())) @@ -362,12 +362,12 @@ static int __init mod_init(void) return 0; } -static void __exit mod_exit(void) +static void __exit chacha20poly1305_exit(void) { } -module_init(mod_init); -module_exit(mod_exit); +module_init(chacha20poly1305_init); +module_exit(chacha20poly1305_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c index fb29739e8c29..064b352c6907 100644 --- a/lib/crypto/curve25519.c +++ b/lib/crypto/curve25519.c @@ -13,7 +13,7 @@ #include <linux/module.h> #include <linux/init.h> -static int __init mod_init(void) +static int __init curve25519_init(void) { if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && WARN_ON(!curve25519_selftest())) @@ -21,12 +21,12 @@ static int __init mod_init(void) return 0; } -static void __exit mod_exit(void) +static void __exit curve25519_exit(void) { } -module_init(mod_init); -module_exit(mod_exit); +module_init(curve25519_init); +module_exit(curve25519_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Curve25519 scalar multiplication"); diff --git a/lib/crypto/sm4.c b/lib/crypto/sm4.c new file mode 100644 index 000000000000..633b59fed9db --- /dev/null +++ b/lib/crypto/sm4.c @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4, as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2018 ARM Limited or its affiliates. + * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> + */ + +#include <linux/module.h> +#include <asm/unaligned.h> +#include <crypto/sm4.h> + +static const u32 fk[4] = { + 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc +}; + +static const u32 __cacheline_aligned ck[32] = { + 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, + 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9, + 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249, + 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, + 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229, + 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299, + 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, + 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279 +}; + +static const u8 __cacheline_aligned sbox[256] = { + 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, + 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, + 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, + 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, + 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, + 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, + 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, + 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, + 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, + 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, + 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, + 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, + 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, + 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, + 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, + 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, + 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, + 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, + 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, + 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, + 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, + 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, + 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, + 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, + 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, + 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, + 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, + 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, + 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, + 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, + 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, + 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 +}; + +static inline u32 sm4_t_non_lin_sub(u32 x) +{ + u32 out; + + out = (u32)sbox[x & 0xff]; + out |= (u32)sbox[(x >> 8) & 0xff] << 8; + out |= (u32)sbox[(x >> 16) & 0xff] << 16; + out |= (u32)sbox[(x >> 24) & 0xff] << 24; + + return out; +} + +static inline u32 sm4_key_lin_sub(u32 x) +{ + return x ^ rol32(x, 13) ^ rol32(x, 23); +} + +static inline u32 sm4_enc_lin_sub(u32 x) +{ + return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24); +} + +static inline u32 sm4_key_sub(u32 x) +{ + return sm4_key_lin_sub(sm4_t_non_lin_sub(x)); +} + +static inline u32 sm4_enc_sub(u32 x) +{ + return sm4_enc_lin_sub(sm4_t_non_lin_sub(x)); +} + +static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk) +{ + return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk); +} + + +/** + * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016 + * @ctx: The location where the computed key will be stored. + * @in_key: The supplied key. + * @key_len: The length of the supplied key. + * + * Returns 0 on success. The function fails only if an invalid key size (or + * pointer) is supplied. + */ +int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key, + unsigned int key_len) +{ + u32 rk[4]; + const u32 *key = (u32 *)in_key; + int i; + + if (key_len != SM4_KEY_SIZE) + return -EINVAL; + + rk[0] = get_unaligned_be32(&key[0]) ^ fk[0]; + rk[1] = get_unaligned_be32(&key[1]) ^ fk[1]; + rk[2] = get_unaligned_be32(&key[2]) ^ fk[2]; + rk[3] = get_unaligned_be32(&key[3]) ^ fk[3]; + + for (i = 0; i < 32; i += 4) { + rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]); + rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]); + rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]); + rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]); + + ctx->rkey_enc[i + 0] = rk[0]; + ctx->rkey_enc[i + 1] = rk[1]; + ctx->rkey_enc[i + 2] = rk[2]; + ctx->rkey_enc[i + 3] = rk[3]; + ctx->rkey_dec[31 - 0 - i] = rk[0]; + ctx->rkey_dec[31 - 1 - i] = rk[1]; + ctx->rkey_dec[31 - 2 - i] = rk[2]; + ctx->rkey_dec[31 - 3 - i] = rk[3]; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sm4_expandkey); + +/** + * sm4_crypt_block - Encrypt or decrypt a single SM4 block + * @rk: The rkey_enc for encrypt or rkey_dec for decrypt + * @out: Buffer to store output data + * @in: Buffer containing the input data + */ +void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in) +{ + u32 x[4], i; + + x[0] = get_unaligned_be32(in + 0 * 4); + x[1] = get_unaligned_be32(in + 1 * 4); + x[2] = get_unaligned_be32(in + 2 * 4); + x[3] = get_unaligned_be32(in + 3 * 4); + + for (i = 0; i < 32; i += 4) { + x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]); + x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]); + x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]); + x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]); + } + + put_unaligned_be32(x[3 - 0], out + 0 * 4); + put_unaligned_be32(x[3 - 1], out + 1 * 4); + put_unaligned_be32(x[3 - 2], out + 2 * 4); + put_unaligned_be32(x[3 - 3], out + 3 * 4); +} +EXPORT_SYMBOL_GPL(sm4_crypt_block); + +MODULE_DESCRIPTION("Generic SM4 library"); +MODULE_LICENSE("GPL v2"); diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 9e14ae02306b..6946f8e204e3 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -557,7 +557,12 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack struct debug_obj *obj; unsigned long flags; - fill_pool(); + /* + * On RT enabled kernels the pool refill must happen in preemptible + * context: + */ + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) + fill_pool(); db = get_bucket((unsigned long) addr); diff --git a/lib/linear_ranges.c b/lib/linear_ranges.c index ced5c15d3f04..a1a7dfa881de 100644 --- a/lib/linear_ranges.c +++ b/lib/linear_ranges.c @@ -241,5 +241,36 @@ int linear_range_get_selector_high(const struct linear_range *r, } EXPORT_SYMBOL_GPL(linear_range_get_selector_high); +/** + * linear_range_get_selector_within - return linear range selector for value + * @r: pointer to linear range where selector is looked from + * @val: value for which the selector is searched + * @selector: address where found selector value is updated + * + * Return selector for which range value is closest match for given + * input value. Value is matching if it is equal or lower than given + * value. But return maximum selector if given value is higher than + * maximum value. + */ +void linear_range_get_selector_within(const struct linear_range *r, + unsigned int val, unsigned int *selector) +{ + if (r->min > val) { + *selector = r->min_sel; + return; + } + + if (linear_range_get_max_value(r) < val) { + *selector = r->max_sel; + return; + } + + if (r->step == 0) + *selector = r->min_sel; + else + *selector = (val - r->min) / r->step + r->min_sel; +} +EXPORT_SYMBOL_GPL(linear_range_get_selector_within); + MODULE_DESCRIPTION("linear-ranges helper"); MODULE_LICENSE("GPL"); diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c index 9a75ca3f7edf..bc81419f400c 100644 --- a/lib/mpi/mpiutil.c +++ b/lib/mpi/mpiutil.c @@ -148,7 +148,7 @@ int mpi_resize(MPI a, unsigned nlimbs) return 0; /* no need to do it */ if (a->d) { - p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL); + p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL); if (!p) return -ENOMEM; memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); diff --git a/lib/string.c b/lib/string.c index 77bd0b1d3296..b2de45a581f4 100644 --- a/lib/string.c +++ b/lib/string.c @@ -29,6 +29,7 @@ #include <linux/errno.h> #include <linux/slab.h> +#include <asm/unaligned.h> #include <asm/byteorder.h> #include <asm/word-at-a-time.h> #include <asm/page.h> @@ -935,6 +936,21 @@ __visible int memcmp(const void *cs, const void *ct, size_t count) const unsigned char *su1, *su2; int res = 0; +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (count >= sizeof(unsigned long)) { + const unsigned long *u1 = cs; + const unsigned long *u2 = ct; + do { + if (get_unaligned(u1) != get_unaligned(u2)) + break; + u1++; + u2++; + count -= sizeof(unsigned long); + } while (count >= sizeof(unsigned long)); + cs = u1; + ct = u2; + } +#endif for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) if ((res = *su1 - *su2) != 0) break; diff --git a/lib/test_lockup.c b/lib/test_lockup.c index 864554e76973..906b598740a7 100644 --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -485,13 +485,13 @@ static int __init test_lockup_init(void) offsetof(spinlock_t, lock.wait_lock.magic), SPINLOCK_MAGIC) || test_magic(lock_rwlock_ptr, - offsetof(rwlock_t, rtmutex.wait_lock.magic), + offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic), SPINLOCK_MAGIC) || test_magic(lock_mutex_ptr, - offsetof(struct mutex, lock.wait_lock.magic), + offsetof(struct mutex, rtmutex.wait_lock.magic), SPINLOCK_MAGIC) || test_magic(lock_rwsem_ptr, - offsetof(struct rw_semaphore, rtmutex.wait_lock.magic), + offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic), SPINLOCK_MAGIC)) return -EINVAL; #else @@ -502,7 +502,7 @@ static int __init test_lockup_init(void) offsetof(rwlock_t, magic), RWLOCK_MAGIC) || test_magic(lock_mutex_ptr, - offsetof(struct mutex, wait_lock.rlock.magic), + offsetof(struct mutex, wait_lock.magic), SPINLOCK_MAGIC) || test_magic(lock_rwsem_ptr, offsetof(struct rw_semaphore, wait_lock.magic), |