diff options
Diffstat (limited to 'lib')
38 files changed, 1557 insertions, 637 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 6a843639814f..55c27e1ec9cb 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -118,8 +118,18 @@ config INDIRECT_IOMEM_FALLBACK mmio accesses when the IO memory address is not a registered emulated region. +config TRACE_MMIO_ACCESS + bool "Register read/write tracing" + depends on TRACING && ARCH_HAVE_TRACE_MMIO_ACCESS + help + Create tracepoints for MMIO read/write operations. These trace events + can be used for logging all MMIO read/write operations. + source "lib/crypto/Kconfig" +config LIB_MEMNEQ + bool + config CRC_CCITT tristate "CRC-CCITT functions" help diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2e24db4bff19..403071ff0bcf 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -498,7 +498,7 @@ config STACK_VALIDATION runtime stack traces are more reliable. For more information, see - tools/objtool/Documentation/stack-validation.txt. + tools/objtool/Documentation/objtool.txt. config NOINSTR_VALIDATION bool @@ -699,6 +699,14 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT help Debug objects boot parameter default value +config SHRINKER_DEBUG + bool "Enable shrinker debugging support" + depends on DEBUG_FS + help + Say Y to enable the shrinker debugfs interface which provides + visibility into the kernel memory shrinkers subsystem. + Disable it to avoid an extra memory footprint. + config HAVE_DEBUG_KMEMLEAK bool @@ -1560,7 +1568,7 @@ config DEBUG_KOBJECT_RELEASE help kobjects are reference counted objects. This means that their last reference count put is not predictable, and the kobject can - live on past the point at which a driver decides to drop it's + live on past the point at which a driver decides to drop its initial reference to the kobject gained on allocation. An example of this would be a struct device which has just been unregistered. diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index c4fe15d38b60..fd15230a703b 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -84,6 +84,9 @@ config UBSAN_SHIFT config UBSAN_DIV_ZERO bool "Perform checking for integer divide-by-zero" depends on $(cc-option,-fsanitize=integer-divide-by-zero) + # https://github.com/ClangBuiltLinux/linux/issues/1657 + # https://github.com/llvm/llvm-project/issues/56289 + depends on !CC_IS_CLANG help This option enables -fsanitize=integer-divide-by-zero which checks for integer division by zero. This is effectively redundant with the @@ -94,7 +97,7 @@ config UBSAN_UNREACHABLE bool "Perform checking for unreachable code" # objtool already handles unreachable checking and gets angry about # seeing UBSan instrumentation located in unreachable places. - depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || X86_SMAP)) + depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || HAVE_UACCESS_VALIDATION)) depends on $(cc-option,-fsanitize=unreachable) help This option enables -fsanitize=unreachable which checks for control diff --git a/lib/Makefile b/lib/Makefile index ea54294d73bf..0eb8114600c1 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -29,7 +29,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ - idr.o extable.o sha1.o irq_regs.o argv_split.o \ + idr.o extable.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ @@ -46,7 +46,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ list_sort.o uuid.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o rhashtable.o \ + percpu-refcount.o rhashtable.o base64.o \ once.o refcount.o usercopy.o errseq.o bucket_locks.o \ generic-radix-tree.o obj-$(CONFIG_STRING_SELFTEST) += test_string.o @@ -151,6 +151,8 @@ lib-y += logic_pio.o lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o +obj-$(CONFIG_TRACE_MMIO_ACCESS) += trace_readwrite.o + obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_BTREE) += btree.o @@ -251,6 +253,7 @@ obj-$(CONFIG_DIMLIB) += dim/ obj-$(CONFIG_SIGNATURE) += digsig.o lib-$(CONFIG_CLZ_TAB) += clz_tab.o +lib-$(CONFIG_LIB_MEMNEQ) += memneq.o obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o diff --git a/lib/base64.c b/lib/base64.c new file mode 100644 index 000000000000..b736a7a431c5 --- /dev/null +++ b/lib/base64.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * base64.c - RFC4648-compliant base64 encoding + * + * Copyright (c) 2020 Hannes Reinecke, SUSE + * + * Based on the base64url routines from fs/crypto/fname.c + * (which are using the URL-safe base64 encoding), + * modified to use the standard coding table from RFC4648 section 4. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/export.h> +#include <linux/string.h> +#include <linux/base64.h> + +static const char base64_table[65] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +/** + * base64_encode() - base64-encode some binary data + * @src: the binary data to encode + * @srclen: the length of @src in bytes + * @dst: (output) the base64-encoded string. Not NUL-terminated. + * + * Encodes data using base64 encoding, i.e. the "Base 64 Encoding" specified + * by RFC 4648, including the '='-padding. + * + * Return: the length of the resulting base64-encoded string in bytes. + */ +int base64_encode(const u8 *src, int srclen, char *dst) +{ + u32 ac = 0; + int bits = 0; + int i; + char *cp = dst; + + for (i = 0; i < srclen; i++) { + ac = (ac << 8) | src[i]; + bits += 8; + do { + bits -= 6; + *cp++ = base64_table[(ac >> bits) & 0x3f]; + } while (bits >= 6); + } + if (bits) { + *cp++ = base64_table[(ac << (6 - bits)) & 0x3f]; + bits -= 6; + } + while (bits < 0) { + *cp++ = '='; + bits += 2; + } + return cp - dst; +} +EXPORT_SYMBOL_GPL(base64_encode); + +/** + * base64_decode() - base64-decode a string + * @src: the string to decode. Doesn't need to be NUL-terminated. + * @srclen: the length of @src in bytes + * @dst: (output) the decoded binary data + * + * Decodes a string using base64 encoding, i.e. the "Base 64 Encoding" + * specified by RFC 4648, including the '='-padding. + * + * This implementation hasn't been optimized for performance. + * + * Return: the length of the resulting decoded binary data in bytes, + * or -1 if the string isn't a valid base64 string. + */ +int base64_decode(const char *src, int srclen, u8 *dst) +{ + u32 ac = 0; + int bits = 0; + int i; + u8 *bp = dst; + + for (i = 0; i < srclen; i++) { + const char *p = strchr(base64_table, src[i]); + + if (src[i] == '=') { + ac = (ac << 6); + bits += 6; + if (bits >= 8) + bits -= 8; + continue; + } + if (p == NULL || src[i] == 0) + return -1; + ac = (ac << 6) | (p - base64_table); + bits += 6; + if (bits >= 8) { + bits -= 8; + *bp++ = (u8)(ac >> bits); + } + } + if (ac & ((1 << bits) - 1)) + return -1; + return bp - dst; +} +EXPORT_SYMBOL_GPL(base64_decode); diff --git a/lib/bitmap.c b/lib/bitmap.c index 0d5c2ece0bcb..b18e31ea6e66 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -45,19 +45,19 @@ * for the best explanations of this ordering. */ -int __bitmap_equal(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int bits) +bool __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] != bitmap2[k]) - return 0; + return false; if (bits % BITS_PER_LONG) if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 0; + return false; - return 1; + return true; } EXPORT_SYMBOL(__bitmap_equal); @@ -303,33 +303,33 @@ void __bitmap_replace(unsigned long *dst, } EXPORT_SYMBOL(__bitmap_replace); -int __bitmap_intersects(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int bits) +bool __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & bitmap2[k]) - return 1; + return true; if (bits % BITS_PER_LONG) if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 1; - return 0; + return true; + return false; } EXPORT_SYMBOL(__bitmap_intersects); -int __bitmap_subset(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int bits) +bool __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & ~bitmap2[k]) - return 0; + return false; if (bits % BITS_PER_LONG) if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 0; - return 1; + return false; + return true; } EXPORT_SYMBOL(__bitmap_subset); @@ -527,33 +527,39 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp, * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal * bitmask and decimal list to userspace by sysfs ABI. * Drivers might be using a normal attribute for this kind of ABIs. A - * normal attribute typically has show entry as below: - * static ssize_t example_attribute_show(struct device *dev, + * normal attribute typically has show entry as below:: + * + * static ssize_t example_attribute_show(struct device *dev, * struct device_attribute *attr, char *buf) - * { + * { * ... * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max); - * } + * } + * * show entry of attribute has no offset and count parameters and this * means the file is limited to one page only. * bitmap_print_to_pagebuf() API works terribly well for this kind of - * normal attribute with buf parameter and without offset, count: - * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, + * normal attribute with buf parameter and without offset, count:: + * + * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, * int nmaskbits) - * { - * } + * { + * } + * * The problem is once we have a large bitmap, we have a chance to get a * bitmask or list more than one page. Especially for list, it could be * as complex as 0,3,5,7,9,... We have no simple way to know it exact size. * It turns out bin_attribute is a way to break this limit. bin_attribute - * has show entry as below: - * static ssize_t - * example_bin_attribute_show(struct file *filp, struct kobject *kobj, + * has show entry as below:: + * + * static ssize_t + * example_bin_attribute_show(struct file *filp, struct kobject *kobj, * struct bin_attribute *attr, char *buf, * loff_t offset, size_t count) - * { + * { * ... - * } + * } + * * With the new offset and count parameters, this makes sysfs ABI be able * to support file size more than one page. For example, offset could be * >= 4096. @@ -577,6 +583,7 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp, * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf(). * It is intended to workaround sysfs limitations discussed above and should be * used carefully in general case for the following reasons: + * * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf(). * - Memory complexity is O(nbits), comparing to O(1) for snprintf(). * - @off and @count are NOT offset and number of bits to print. @@ -1505,5 +1512,59 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits) buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); } EXPORT_SYMBOL(bitmap_to_arr32); +#endif + +#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) +/** + * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u64 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits) +{ + int n; + + for (n = nbits; n > 0; n -= 64) { + u64 val = *buf++; + *bitmap++ = val; + if (n > 32) + *bitmap++ = val >> 32; + } + + /* + * Clear tail bits in the last word beyond nbits. + * + * Negative index is OK because here we point to the word next + * to the last word of the bitmap, except for nbits == 0, which + * is tested implicitly. + */ + if (nbits % BITS_PER_LONG) + bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits); +} +EXPORT_SYMBOL(bitmap_from_arr64); + +/** + * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits + * @buf: array of u64 (in host byte order), the dest bitmap + * @bitmap: array of unsigned longs, the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) +{ + const unsigned long *end = bitmap + BITS_TO_LONGS(nbits); + + while (bitmap < end) { + *buf = *bitmap++; + if (bitmap < end) + *buf |= (u64)(*bitmap++) << 32; + buf++; + } + + /* Clear tail bits in the last element of array beyond nbits. */ + if (nbits % 64) + buf[-1] &= GENMASK_ULL(nbits % 64, 0); +} +EXPORT_SYMBOL(bitmap_to_arr64); #endif diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c index 1974b355c148..1d26a1647da5 100644 --- a/lib/crc-itu-t.c +++ b/lib/crc-itu-t.c @@ -7,7 +7,7 @@ #include <linux/module.h> #include <linux/crc-itu-t.h> -/** CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^15 + 1) */ +/* CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^5 + 1) */ const u16 crc_itu_t_table[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 9856e291f414..9ff549f63540 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -71,6 +71,7 @@ config CRYPTO_LIB_CURVE25519 tristate "Curve25519 scalar multiplication library" depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n + select LIB_MEMNEQ help Enable the Curve25519 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific @@ -120,6 +121,9 @@ config CRYPTO_LIB_CHACHA20POLY1305 select CRYPTO_LIB_POLY1305 select CRYPTO_ALGAPI +config CRYPTO_LIB_SHA1 + tristate + config CRYPTO_LIB_SHA256 tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 26be2bbe09c5..919cbb2c220d 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -34,6 +34,9 @@ libpoly1305-y := poly1305-donna32.o libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o libpoly1305-y += poly1305.o +obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o +libsha1-y := sha1.o + obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c index 409e4b728770..7d77dea15587 100644 --- a/lib/crypto/blake2s-selftest.c +++ b/lib/crypto/blake2s-selftest.c @@ -4,6 +4,8 @@ */ #include <crypto/internal/blake2s.h> +#include <linux/kernel.h> +#include <linux/random.h> #include <linux/string.h> /* @@ -587,5 +589,44 @@ bool __init blake2s_selftest(void) } } + for (i = 0; i < 32; ++i) { + enum { TEST_ALIGNMENT = 16 }; + u8 unaligned_block[BLAKE2S_BLOCK_SIZE + TEST_ALIGNMENT - 1] + __aligned(TEST_ALIGNMENT); + u8 blocks[BLAKE2S_BLOCK_SIZE * 2]; + struct blake2s_state state1, state2; + + get_random_bytes(blocks, sizeof(blocks)); + get_random_bytes(&state, sizeof(state)); + +#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \ + defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S) + memcpy(&state1, &state, sizeof(state1)); + memcpy(&state2, &state, sizeof(state2)); + blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE); + blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE); + if (memcmp(&state1, &state2, sizeof(state1))) { + pr_err("blake2s random compress self-test %d: FAIL\n", + i + 1); + success = false; + } +#endif + + memcpy(&state1, &state, sizeof(state1)); + blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE); + for (l = 1; l < TEST_ALIGNMENT; ++l) { + memcpy(unaligned_block + l, blocks, + BLAKE2S_BLOCK_SIZE); + memcpy(&state2, &state, sizeof(state2)); + blake2s_compress(&state2, unaligned_block + l, 1, + BLAKE2S_BLOCK_SIZE); + if (memcmp(&state1, &state2, sizeof(state1))) { + pr_err("blake2s random compress align %d self-test %d: FAIL\n", + l, i + 1); + success = false; + } + } + } + return success; } diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index c71c09621c09..98e688c6d891 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -16,16 +16,44 @@ #include <linux/init.h> #include <linux/bug.h> +static inline void blake2s_set_lastblock(struct blake2s_state *state) +{ + state->f[0] = -1; +} + void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) { - __blake2s_update(state, in, inlen, false); + const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2S_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); + blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); + in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; } EXPORT_SYMBOL(blake2s_update); void blake2s_final(struct blake2s_state *state, u8 *out) { WARN_ON(IS_ENABLED(DEBUG) && !out); - __blake2s_final(state, out, false); + blake2s_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ + blake2s_compress(state, state->buf, 1, state->buflen); + cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); + memcpy(out, state->h, state->outlen); memzero_explicit(state, sizeof(*state)); } EXPORT_SYMBOL(blake2s_final); @@ -38,12 +66,7 @@ static int __init blake2s_mod_init(void) return 0; } -static void __exit blake2s_mod_exit(void) -{ -} - module_init(blake2s_mod_init); -module_exit(blake2s_mod_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("BLAKE2s hash function"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/lib/sha1.c b/lib/crypto/sha1.c index 0494766fc574..1aebe7be9401 100644 --- a/lib/sha1.c +++ b/lib/crypto/sha1.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/export.h> +#include <linux/module.h> #include <linux/bitops.h> #include <linux/string.h> #include <crypto/sha1.h> @@ -135,3 +136,5 @@ void sha1_init(__u32 *buf) buf[4] = 0xc3d2e1f0; } EXPORT_SYMBOL(sha1_init); + +MODULE_LICENSE("GPL"); diff --git a/lib/idr.c b/lib/idr.c index f4ab4f4aa3c7..7ecdfdb5309e 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -491,7 +491,8 @@ void ida_free(struct ida *ida, unsigned int id) struct ida_bitmap *bitmap; unsigned long flags; - BUG_ON((int)id < 0); + if ((int)id < 0) + return; xas_lock_irqsave(&xas, flags); bitmap = xas_load(&xas); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 6dd5330f7a99..0e0be334dbee 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -168,174 +168,6 @@ static int copyin(void *to, const void __user *from, size_t n) return n; } -static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, - struct iov_iter *i) -{ - size_t skip, copy, left, wanted; - const struct iovec *iov; - char __user *buf; - void *kaddr, *from; - - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - - might_fault(); - wanted = bytes; - iov = i->iov; - skip = i->iov_offset; - buf = iov->iov_base + skip; - copy = min(bytes, iov->iov_len - skip); - - if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) { - kaddr = kmap_atomic(page); - from = kaddr + offset; - - /* first chunk, usually the only one */ - left = copyout(buf, from, copy); - copy -= left; - skip += copy; - from += copy; - bytes -= copy; - - while (unlikely(!left && bytes)) { - iov++; - buf = iov->iov_base; - copy = min(bytes, iov->iov_len); - left = copyout(buf, from, copy); - copy -= left; - skip = copy; - from += copy; - bytes -= copy; - } - if (likely(!bytes)) { - kunmap_atomic(kaddr); - goto done; - } - offset = from - kaddr; - buf += copy; - kunmap_atomic(kaddr); - copy = min(bytes, iov->iov_len - skip); - } - /* Too bad - revert to non-atomic kmap */ - - kaddr = kmap(page); - from = kaddr + offset; - left = copyout(buf, from, copy); - copy -= left; - skip += copy; - from += copy; - bytes -= copy; - while (unlikely(!left && bytes)) { - iov++; - buf = iov->iov_base; - copy = min(bytes, iov->iov_len); - left = copyout(buf, from, copy); - copy -= left; - skip = copy; - from += copy; - bytes -= copy; - } - kunmap(page); - -done: - if (skip == iov->iov_len) { - iov++; - skip = 0; - } - i->count -= wanted - bytes; - i->nr_segs -= iov - i->iov; - i->iov = iov; - i->iov_offset = skip; - return wanted - bytes; -} - -static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, - struct iov_iter *i) -{ - size_t skip, copy, left, wanted; - const struct iovec *iov; - char __user *buf; - void *kaddr, *to; - - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - - might_fault(); - wanted = bytes; - iov = i->iov; - skip = i->iov_offset; - buf = iov->iov_base + skip; - copy = min(bytes, iov->iov_len - skip); - - if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) { - kaddr = kmap_atomic(page); - to = kaddr + offset; - - /* first chunk, usually the only one */ - left = copyin(to, buf, copy); - copy -= left; - skip += copy; - to += copy; - bytes -= copy; - - while (unlikely(!left && bytes)) { - iov++; - buf = iov->iov_base; - copy = min(bytes, iov->iov_len); - left = copyin(to, buf, copy); - copy -= left; - skip = copy; - to += copy; - bytes -= copy; - } - if (likely(!bytes)) { - kunmap_atomic(kaddr); - goto done; - } - offset = to - kaddr; - buf += copy; - kunmap_atomic(kaddr); - copy = min(bytes, iov->iov_len - skip); - } - /* Too bad - revert to non-atomic kmap */ - - kaddr = kmap(page); - to = kaddr + offset; - left = copyin(to, buf, copy); - copy -= left; - skip += copy; - to += copy; - bytes -= copy; - while (unlikely(!left && bytes)) { - iov++; - buf = iov->iov_base; - copy = min(bytes, iov->iov_len); - left = copyin(to, buf, copy); - copy -= left; - skip = copy; - to += copy; - bytes -= copy; - } - kunmap(page); - -done: - if (skip == iov->iov_len) { - iov++; - skip = 0; - } - i->count -= wanted - bytes; - i->nr_segs -= iov - i->iov; - i->iov = iov; - i->iov_offset = skip; - return wanted - bytes; -} - #ifdef PIPE_PARANOIA static bool sanity(const struct iov_iter *i) { @@ -689,6 +521,7 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, struct pipe_inode_info *pipe = i->pipe; unsigned int p_mask = pipe->ring_size - 1; unsigned int i_head; + unsigned int valid = pipe->head; size_t n, off, xfer = 0; if (!sanity(i)) @@ -702,11 +535,17 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); chunk -= rem; kunmap_local(p); - i->head = i_head; - i->iov_offset = off + chunk; - xfer += chunk; - if (rem) + if (chunk) { + i->head = i_head; + i->iov_offset = off + chunk; + xfer += chunk; + valid = i_head + 1; + } + if (rem) { + pipe->bufs[i_head & p_mask].len -= rem; + pipe_discard_from(pipe, valid); break; + } n -= chunk; off = 0; i_head++; @@ -848,24 +687,14 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { - if (likely(iter_is_iovec(i))) - return copy_page_to_iter_iovec(page, offset, bytes, i); - if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { + if (unlikely(iov_iter_is_pipe(i))) { + return copy_page_to_iter_pipe(page, offset, bytes, i); + } else { void *kaddr = kmap_local_page(page); size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); kunmap_local(kaddr); return wanted; } - if (iov_iter_is_pipe(i)) - return copy_page_to_iter_pipe(page, offset, bytes, i); - if (unlikely(iov_iter_is_discard(i))) { - if (unlikely(i->count < bytes)) - bytes = i->count; - i->count -= bytes; - return bytes; - } - WARN_ON(1); - return 0; } size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, @@ -896,17 +725,12 @@ EXPORT_SYMBOL(copy_page_to_iter); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { - if (unlikely(!page_copy_sane(page, offset, bytes))) - return 0; - if (likely(iter_is_iovec(i))) - return copy_page_from_iter_iovec(page, offset, bytes, i); - if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { + if (page_copy_sane(page, offset, bytes)) { void *kaddr = kmap_local_page(page); size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); kunmap_local(kaddr); return wanted; } - WARN_ON(1); return 0; } EXPORT_SYMBOL(copy_page_from_iter); @@ -1029,17 +853,22 @@ static void pipe_advance(struct iov_iter *i, size_t size) static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) { - struct bvec_iter bi; + const struct bio_vec *bvec, *end; - bi.bi_size = i->count; - bi.bi_bvec_done = i->iov_offset; - bi.bi_idx = 0; - bvec_iter_advance(i->bvec, &bi, size); + if (!i->count) + return; + i->count -= size; + + size += i->iov_offset; - i->bvec += bi.bi_idx; - i->nr_segs -= bi.bi_idx; - i->count = bi.bi_size; - i->iov_offset = bi.bi_bvec_done; + for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { + if (likely(size < bvec->bv_len)) + break; + size -= bvec->bv_len; + } + i->iov_offset = size; + i->nr_segs -= bvec - i->bvec; + i->bvec = bvec; } static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) @@ -1268,6 +1097,98 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) } EXPORT_SYMBOL(iov_iter_discard); +static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, + unsigned len_mask) +{ + size_t size = i->count; + size_t skip = i->iov_offset; + unsigned k; + + for (k = 0; k < i->nr_segs; k++, skip = 0) { + size_t len = i->iov[k].iov_len - skip; + + if (len > size) + len = size; + if (len & len_mask) + return false; + if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) + return false; + + size -= len; + if (!size) + break; + } + return true; +} + +static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, + unsigned len_mask) +{ + size_t size = i->count; + unsigned skip = i->iov_offset; + unsigned k; + + for (k = 0; k < i->nr_segs; k++, skip = 0) { + size_t len = i->bvec[k].bv_len - skip; + + if (len > size) + len = size; + if (len & len_mask) + return false; + if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) + return false; + + size -= len; + if (!size) + break; + } + return true; +} + +/** + * iov_iter_is_aligned() - Check if the addresses and lengths of each segments + * are aligned to the parameters. + * + * @i: &struct iov_iter to restore + * @addr_mask: bit mask to check against the iov element's addresses + * @len_mask: bit mask to check against the iov element's lengths + * + * Return: false if any addresses or lengths intersect with the provided masks + */ +bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, + unsigned len_mask) +{ + if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) + return iov_iter_aligned_iovec(i, addr_mask, len_mask); + + if (iov_iter_is_bvec(i)) + return iov_iter_aligned_bvec(i, addr_mask, len_mask); + + if (iov_iter_is_pipe(i)) { + unsigned int p_mask = i->pipe->ring_size - 1; + size_t size = i->count; + + if (size & len_mask) + return false; + if (size && allocated(&i->pipe->bufs[i->head & p_mask])) { + if (i->iov_offset & addr_mask) + return false; + } + + return true; + } + + if (iov_iter_is_xarray(i)) { + if (i->count & len_mask) + return false; + if ((i->xarray_start + i->iov_offset) & addr_mask) + return false; + } + + return true; +} +EXPORT_SYMBOL_GPL(iov_iter_is_aligned); + static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) { unsigned long res = 0; @@ -1434,7 +1355,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i, { unsigned nr, offset; pgoff_t index, count; - size_t size = maxsize, actual; + size_t size = maxsize; loff_t pos; if (!size || !maxpages) @@ -1461,57 +1382,40 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i, if (nr == 0) return 0; - actual = PAGE_SIZE * nr; - actual -= offset; - if (nr == count && size > 0) { - unsigned last_offset = (nr > 1) ? 0 : offset; - actual -= PAGE_SIZE - (last_offset + size); - } - return actual; + return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); } /* must be done on non-empty ITER_IOVEC one */ -static unsigned long first_iovec_segment(const struct iov_iter *i, - size_t *size, size_t *start, - size_t maxsize, unsigned maxpages) +static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) { size_t skip; long k; for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { - unsigned long addr = (unsigned long)i->iov[k].iov_base + skip; size_t len = i->iov[k].iov_len - skip; if (unlikely(!len)) continue; - if (len > maxsize) - len = maxsize; - len += (*start = addr % PAGE_SIZE); - if (len > maxpages * PAGE_SIZE) - len = maxpages * PAGE_SIZE; - *size = len; - return addr & PAGE_MASK; + if (*size > len) + *size = len; + return (unsigned long)i->iov[k].iov_base + skip; } BUG(); // if it had been empty, we wouldn't get called } /* must be done on non-empty ITER_BVEC one */ static struct page *first_bvec_segment(const struct iov_iter *i, - size_t *size, size_t *start, - size_t maxsize, unsigned maxpages) + size_t *size, size_t *start) { struct page *page; size_t skip = i->iov_offset, len; len = i->bvec->bv_len - skip; - if (len > maxsize) - len = maxsize; + if (*size > len) + *size = len; skip += i->bvec->bv_offset; page = i->bvec->bv_page + skip / PAGE_SIZE; - len += (*start = skip % PAGE_SIZE); - if (len > maxpages * PAGE_SIZE) - len = maxpages * PAGE_SIZE; - *size = len; + *start = skip % PAGE_SIZE; return page; } @@ -1519,13 +1423,14 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) { - size_t len; int n, res; if (maxsize > i->count) maxsize = i->count; if (!maxsize) return 0; + if (maxsize > MAX_RW_COUNT) + maxsize = MAX_RW_COUNT; if (likely(iter_is_iovec(i))) { unsigned int gup_flags = 0; @@ -1536,21 +1441,27 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, if (i->nofault) gup_flags |= FOLL_NOFAULT; - addr = first_iovec_segment(i, &len, start, maxsize, maxpages); - n = DIV_ROUND_UP(len, PAGE_SIZE); + addr = first_iovec_segment(i, &maxsize); + *start = addr % PAGE_SIZE; + addr &= PAGE_MASK; + n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); + if (n > maxpages) + n = maxpages; res = get_user_pages_fast(addr, n, gup_flags, pages); if (unlikely(res <= 0)) return res; - return (res == n ? len : res * PAGE_SIZE) - *start; + return min_t(size_t, maxsize, res * PAGE_SIZE - *start); } if (iov_iter_is_bvec(i)) { struct page *page; - page = first_bvec_segment(i, &len, start, maxsize, maxpages); - n = DIV_ROUND_UP(len, PAGE_SIZE); - while (n--) + page = first_bvec_segment(i, &maxsize, start); + n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); + if (n > maxpages) + n = maxpages; + for (int k = 0; k < n; k++) get_page(*pages++ = page++); - return len - *start; + return min_t(size_t, maxsize, n * PAGE_SIZE - *start); } if (iov_iter_is_pipe(i)) return pipe_get_pages(i, pages, maxsize, maxpages, start); @@ -1602,7 +1513,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, struct page **p; unsigned nr, offset; pgoff_t index, count; - size_t size = maxsize, actual; + size_t size = maxsize; loff_t pos; if (!size) @@ -1631,13 +1542,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, if (nr == 0) return 0; - actual = PAGE_SIZE * nr; - actual -= offset; - if (nr == count && size > 0) { - unsigned last_offset = (nr > 1) ? 0 : offset; - actual -= PAGE_SIZE - (last_offset + size); - } - return actual; + return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); } ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, @@ -1645,13 +1550,14 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, size_t *start) { struct page **p; - size_t len; int n, res; if (maxsize > i->count) maxsize = i->count; if (!maxsize) return 0; + if (maxsize > MAX_RW_COUNT) + maxsize = MAX_RW_COUNT; if (likely(iter_is_iovec(i))) { unsigned int gup_flags = 0; @@ -1662,8 +1568,10 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, if (i->nofault) gup_flags |= FOLL_NOFAULT; - addr = first_iovec_segment(i, &len, start, maxsize, ~0U); - n = DIV_ROUND_UP(len, PAGE_SIZE); + addr = first_iovec_segment(i, &maxsize); + *start = addr % PAGE_SIZE; + addr &= PAGE_MASK; + n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); p = get_pages_array(n); if (!p) return -ENOMEM; @@ -1674,19 +1582,19 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, return res; } *pages = p; - return (res == n ? len : res * PAGE_SIZE) - *start; + return min_t(size_t, maxsize, res * PAGE_SIZE - *start); } if (iov_iter_is_bvec(i)) { struct page *page; - page = first_bvec_segment(i, &len, start, maxsize, ~0U); - n = DIV_ROUND_UP(len, PAGE_SIZE); + page = first_bvec_segment(i, &maxsize, start); + n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); *pages = p = get_pages_array(n); if (!p) return -ENOMEM; - while (n--) + for (int k = 0; k < n; k++) get_page(*p++ = page++); - return len - *start; + return min_t(size_t, maxsize, n * PAGE_SIZE - *start); } if (iov_iter_is_pipe(i)) return pipe_get_pages_alloc(i, pages, maxsize, start); diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 96f96e42ce06..5e223327196a 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -9,8 +9,8 @@ * These symbols point to the .kunit_test_suites section and are defined in * include/asm-generic/vmlinux.lds.h, and consequently must be extern. */ -extern struct kunit_suite * const * const __kunit_suites_start[]; -extern struct kunit_suite * const * const __kunit_suites_end[]; +extern struct kunit_suite * const __kunit_suites_start[]; +extern struct kunit_suite * const __kunit_suites_end[]; #if IS_BUILTIN(CONFIG_KUNIT) @@ -55,7 +55,7 @@ static void kunit_parse_filter_glob(struct kunit_test_filter *parsed, /* Create a copy of suite with only tests that match test_glob. */ static struct kunit_suite * -kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob) +kunit_filter_tests(const struct kunit_suite *const suite, const char *test_glob) { int n = 0; struct kunit_case *filtered, *test_case; @@ -69,15 +69,15 @@ kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob) if (n == 0) return NULL; - /* Use memcpy to workaround copy->name being const. */ - copy = kmalloc(sizeof(*copy), GFP_KERNEL); + copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL); if (!copy) return ERR_PTR(-ENOMEM); - memcpy(copy, suite, sizeof(*copy)); filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL); - if (!filtered) + if (!filtered) { + kfree(copy); return ERR_PTR(-ENOMEM); + } n = 0; kunit_suite_for_each_test_case(suite, test_case) { @@ -92,62 +92,18 @@ kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob) static char *kunit_shutdown; core_param(kunit_shutdown, kunit_shutdown, charp, 0644); -static struct kunit_suite * const * -kunit_filter_subsuite(struct kunit_suite * const * const subsuite, - struct kunit_test_filter *filter) -{ - int i, n = 0; - struct kunit_suite **filtered, *filtered_suite; - - n = 0; - for (i = 0; subsuite[i]; ++i) { - if (glob_match(filter->suite_glob, subsuite[i]->name)) - ++n; - } - - if (n == 0) - return NULL; - - filtered = kmalloc_array(n + 1, sizeof(*filtered), GFP_KERNEL); - if (!filtered) - return ERR_PTR(-ENOMEM); - - n = 0; - for (i = 0; subsuite[i] != NULL; ++i) { - if (!glob_match(filter->suite_glob, subsuite[i]->name)) - continue; - filtered_suite = kunit_filter_tests(subsuite[i], filter->test_glob); - if (IS_ERR(filtered_suite)) - return ERR_CAST(filtered_suite); - else if (filtered_suite) - filtered[n++] = filtered_suite; - } - filtered[n] = NULL; - - return filtered; -} - +/* Stores an array of suites, end points one past the end */ struct suite_set { - struct kunit_suite * const * const *start; - struct kunit_suite * const * const *end; + struct kunit_suite * const *start; + struct kunit_suite * const *end; }; -static void kunit_free_subsuite(struct kunit_suite * const *subsuite) -{ - unsigned int i; - - for (i = 0; subsuite[i]; i++) - kfree(subsuite[i]); - - kfree(subsuite); -} - static void kunit_free_suite_set(struct suite_set suite_set) { - struct kunit_suite * const * const *suites; + struct kunit_suite * const *suites; for (suites = suite_set.start; suites < suite_set.end; suites++) - kunit_free_subsuite(*suites); + kfree(*suites); kfree(suite_set.start); } @@ -156,7 +112,7 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set, int *err) { int i; - struct kunit_suite * const **copy, * const *filtered_subsuite; + struct kunit_suite **copy, *filtered_suite; struct suite_set filtered; struct kunit_test_filter filter; @@ -171,14 +127,19 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set, kunit_parse_filter_glob(&filter, filter_glob); - for (i = 0; i < max; ++i) { - filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], &filter); - if (IS_ERR(filtered_subsuite)) { - *err = PTR_ERR(filtered_subsuite); + for (i = 0; &suite_set->start[i] != suite_set->end; i++) { + if (!glob_match(filter.suite_glob, suite_set->start[i]->name)) + continue; + + filtered_suite = kunit_filter_tests(suite_set->start[i], filter.test_glob); + if (IS_ERR(filtered_suite)) { + *err = PTR_ERR(filtered_suite); return filtered; } - if (filtered_subsuite) - *copy++ = filtered_subsuite; + if (!filtered_suite) + continue; + + *copy++ = filtered_suite; } filtered.end = copy; @@ -201,52 +162,33 @@ static void kunit_handle_shutdown(void) } -static void kunit_print_tap_header(struct suite_set *suite_set) -{ - struct kunit_suite * const * const *suites, * const *subsuite; - int num_of_suites = 0; - - for (suites = suite_set->start; suites < suite_set->end; suites++) - for (subsuite = *suites; *subsuite != NULL; subsuite++) - num_of_suites++; - - pr_info("TAP version 14\n"); - pr_info("1..%d\n", num_of_suites); -} - static void kunit_exec_run_tests(struct suite_set *suite_set) { - struct kunit_suite * const * const *suites; + size_t num_suites = suite_set->end - suite_set->start; - kunit_print_tap_header(suite_set); + pr_info("TAP version 14\n"); + pr_info("1..%zu\n", num_suites); - for (suites = suite_set->start; suites < suite_set->end; suites++) - __kunit_test_suites_init(*suites); + __kunit_test_suites_init(suite_set->start, num_suites); } static void kunit_exec_list_tests(struct suite_set *suite_set) { - unsigned int i; - struct kunit_suite * const * const *suites; + struct kunit_suite * const *suites; struct kunit_case *test_case; /* Hack: print a tap header so kunit.py can find the start of KUnit output. */ pr_info("TAP version 14\n"); for (suites = suite_set->start; suites < suite_set->end; suites++) - for (i = 0; (*suites)[i] != NULL; i++) { - kunit_suite_for_each_test_case((*suites)[i], test_case) { - pr_info("%s.%s\n", (*suites)[i]->name, test_case->name); - } + kunit_suite_for_each_test_case((*suites), test_case) { + pr_info("%s.%s\n", (*suites)->name, test_case->name); } } int kunit_run_all_tests(void) { - struct suite_set suite_set = { - .start = __kunit_suites_start, - .end = __kunit_suites_end, - }; + struct suite_set suite_set = {__kunit_suites_start, __kunit_suites_end}; int err = 0; if (filter_glob_param) { @@ -264,11 +206,10 @@ int kunit_run_all_tests(void) else pr_err("kunit executor: unknown action '%s'\n", action_param); - if (filter_glob_param) { /* a copy was made of each array */ + if (filter_glob_param) { /* a copy was made of each suite */ kunit_free_suite_set(suite_set); } - out: kunit_handle_shutdown(); return err; diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index eac6ff480273..0cea31c27b23 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -9,8 +9,6 @@ #include <kunit/test.h> static void kfree_at_end(struct kunit *test, const void *to_free); -static void free_subsuite_at_end(struct kunit *test, - struct kunit_suite *const *to_free); static struct kunit_suite *alloc_fake_suite(struct kunit *test, const char *suite_name, struct kunit_case *test_cases); @@ -41,126 +39,80 @@ static void parse_filter_test(struct kunit *test) kfree(filter.test_glob); } -static void filter_subsuite_test(struct kunit *test) +static void filter_suites_test(struct kunit *test) { - struct kunit_suite *subsuite[3] = {NULL, NULL, NULL}; - struct kunit_suite * const *filtered; - struct kunit_test_filter filter = { - .suite_glob = "suite2", - .test_glob = NULL, - }; + struct kunit_suite *subsuite[3] = {NULL, NULL}; + struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]}; + struct suite_set got; + int err = 0; subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases); subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases); /* Want: suite1, suite2, NULL -> suite2, NULL */ - filtered = kunit_filter_subsuite(subsuite, &filter); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered); - free_subsuite_at_end(test, filtered); + got = kunit_filter_suites(&suite_set, "suite2", &err); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); + KUNIT_ASSERT_EQ(test, err, 0); + kfree_at_end(test, got.start); /* Validate we just have suite2 */ - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]); - KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2"); - KUNIT_EXPECT_FALSE(test, filtered[1]); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]); + KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2"); + + /* Contains one element (end is 1 past end) */ + KUNIT_ASSERT_EQ(test, got.end - got.start, 1); } -static void filter_subsuite_test_glob_test(struct kunit *test) +static void filter_suites_test_glob_test(struct kunit *test) { - struct kunit_suite *subsuite[3] = {NULL, NULL, NULL}; - struct kunit_suite * const *filtered; - struct kunit_test_filter filter = { - .suite_glob = "suite2", - .test_glob = "test2", - }; + struct kunit_suite *subsuite[3] = {NULL, NULL}; + struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]}; + struct suite_set got; + int err = 0; subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases); subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases); /* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */ - filtered = kunit_filter_subsuite(subsuite, &filter); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered); - free_subsuite_at_end(test, filtered); + got = kunit_filter_suites(&suite_set, "suite2.test2", &err); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start); + KUNIT_ASSERT_EQ(test, err, 0); + kfree_at_end(test, got.start); /* Validate we just have suite2 */ - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]); - KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2"); - KUNIT_EXPECT_FALSE(test, filtered[1]); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]); + KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2"); + KUNIT_ASSERT_EQ(test, got.end - got.start, 1); /* Now validate we just have test2 */ - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]->test_cases); - KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->test_cases[0].name, "test2"); - KUNIT_EXPECT_FALSE(test, filtered[0]->test_cases[1].name); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases); + KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->test_cases[0].name, "test2"); + KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name); } -static void filter_subsuite_to_empty_test(struct kunit *test) +static void filter_suites_to_empty_test(struct kunit *test) { - struct kunit_suite *subsuite[3] = {NULL, NULL, NULL}; - struct kunit_suite * const *filtered; - struct kunit_test_filter filter = { - .suite_glob = "not_found", - .test_glob = NULL, - }; + struct kunit_suite *subsuite[3] = {NULL, NULL}; + struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]}; + struct suite_set got; + int err = 0; subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases); subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases); - filtered = kunit_filter_subsuite(subsuite, &filter); - free_subsuite_at_end(test, filtered); /* just in case */ + got = kunit_filter_suites(&suite_set, "not_found", &err); + KUNIT_ASSERT_EQ(test, err, 0); + kfree_at_end(test, got.start); /* just in case */ - KUNIT_EXPECT_FALSE_MSG(test, filtered, - "should be NULL to indicate no match"); -} - -static void kfree_subsuites_at_end(struct kunit *test, struct suite_set *suite_set) -{ - struct kunit_suite * const * const *suites; - - kfree_at_end(test, suite_set->start); - for (suites = suite_set->start; suites < suite_set->end; suites++) - free_subsuite_at_end(test, *suites); -} - -static void filter_suites_test(struct kunit *test) -{ - /* Suites per-file are stored as a NULL terminated array */ - struct kunit_suite *subsuites[2][2] = { - {NULL, NULL}, - {NULL, NULL}, - }; - /* Match the memory layout of suite_set */ - struct kunit_suite * const * const suites[2] = { - subsuites[0], subsuites[1], - }; - - const struct suite_set suite_set = { - .start = suites, - .end = suites + 2, - }; - struct suite_set filtered = {.start = NULL, .end = NULL}; - int err = 0; - - /* Emulate two files, each having one suite */ - subsuites[0][0] = alloc_fake_suite(test, "suite0", dummy_test_cases); - subsuites[1][0] = alloc_fake_suite(test, "suite1", dummy_test_cases); - - /* Filter out suite1 */ - filtered = kunit_filter_suites(&suite_set, "suite0", &err); - kfree_subsuites_at_end(test, &filtered); /* let us use ASSERTs without leaking */ - KUNIT_EXPECT_EQ(test, err, 0); - KUNIT_ASSERT_EQ(test, filtered.end - filtered.start, (ptrdiff_t)1); - - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0][0]); - KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0"); + KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end, + "should be empty to indicate no match"); } static struct kunit_case executor_test_cases[] = { KUNIT_CASE(parse_filter_test), - KUNIT_CASE(filter_subsuite_test), - KUNIT_CASE(filter_subsuite_test_glob_test), - KUNIT_CASE(filter_subsuite_to_empty_test), KUNIT_CASE(filter_suites_test), + KUNIT_CASE(filter_suites_test_glob_test), + KUNIT_CASE(filter_suites_to_empty_test), {} }; @@ -190,20 +142,6 @@ static void kfree_at_end(struct kunit *test, const void *to_free) (void *)to_free); } -static void free_subsuite_res_free(struct kunit_resource *res) -{ - kunit_free_subsuite(res->data); -} - -static void free_subsuite_at_end(struct kunit *test, - struct kunit_suite *const *to_free) -{ - if (IS_ERR_OR_NULL(to_free)) - return; - kunit_alloc_resource(test, NULL, free_subsuite_res_free, - GFP_KERNEL, (void *)to_free); -} - static struct kunit_suite *alloc_fake_suite(struct kunit *test, const char *suite_name, struct kunit_case *test_cases) diff --git a/lib/kunit/test.c b/lib/kunit/test.c index a5053a07409f..b73d5bb5c473 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -10,7 +10,9 @@ #include <kunit/test.h> #include <kunit/test-bug.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/panic.h> #include <linux/sched/debug.h> #include <linux/sched.h> @@ -501,6 +503,9 @@ int kunit_run_tests(struct kunit_suite *suite) struct kunit_result_stats suite_stats = { 0 }; struct kunit_result_stats total_stats = { 0 }; + /* Taint the kernel so we know we've run tests. */ + add_taint(TAINT_TEST, LOCKDEP_STILL_OK); + if (suite->suite_init) { suite->suite_init_err = suite->suite_init(suite); if (suite->suite_init_err) { @@ -581,11 +586,11 @@ static void kunit_init_suite(struct kunit_suite *suite) suite->suite_init_err = 0; } -int __kunit_test_suites_init(struct kunit_suite * const * const suites) +int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites) { unsigned int i; - for (i = 0; suites[i] != NULL; i++) { + for (i = 0; i < num_suites; i++) { kunit_init_suite(suites[i]); kunit_run_tests(suites[i]); } @@ -598,17 +603,54 @@ static void kunit_exit_suite(struct kunit_suite *suite) kunit_debugfs_destroy_suite(suite); } -void __kunit_test_suites_exit(struct kunit_suite **suites) +void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites) { unsigned int i; - for (i = 0; suites[i] != NULL; i++) + for (i = 0; i < num_suites; i++) kunit_exit_suite(suites[i]); kunit_suite_counter = 1; } EXPORT_SYMBOL_GPL(__kunit_test_suites_exit); +#ifdef CONFIG_MODULES +static void kunit_module_init(struct module *mod) +{ + __kunit_test_suites_init(mod->kunit_suites, mod->num_kunit_suites); +} + +static void kunit_module_exit(struct module *mod) +{ + __kunit_test_suites_exit(mod->kunit_suites, mod->num_kunit_suites); +} + +static int kunit_module_notify(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct module *mod = data; + + switch (val) { + case MODULE_STATE_LIVE: + kunit_module_init(mod); + break; + case MODULE_STATE_GOING: + kunit_module_exit(mod); + break; + case MODULE_STATE_COMING: + case MODULE_STATE_UNFORMED: + break; + } + + return 0; +} + +static struct notifier_block kunit_mod_nb = { + .notifier_call = kunit_module_notify, + .priority = 0, +}; +#endif + struct kunit_kmalloc_array_params { size_t n; size_t size; @@ -703,13 +745,19 @@ EXPORT_SYMBOL_GPL(kunit_cleanup); static int __init kunit_init(void) { kunit_debugfs_init(); - +#ifdef CONFIG_MODULES + return register_module_notifier(&kunit_mod_nb); +#else return 0; +#endif } late_initcall(kunit_init); static void __exit kunit_exit(void) { +#ifdef CONFIG_MODULES + unregister_module_notifier(&kunit_mod_nb); +#endif kunit_debugfs_cleanup(); } module_exit(kunit_exit); diff --git a/lib/lockref.c b/lib/lockref.c index c6f0b183b937..45e93ece8ba0 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -111,31 +111,6 @@ int lockref_put_not_zero(struct lockref *lockref) EXPORT_SYMBOL(lockref_put_not_zero); /** - * lockref_get_or_lock - Increments count unless the count is 0 or dead - * @lockref: pointer to lockref structure - * Return: 1 if count updated successfully or 0 if count was zero - * and we got the lock instead. - */ -int lockref_get_or_lock(struct lockref *lockref) -{ - CMPXCHG_LOOP( - new.count++; - if (old.count <= 0) - break; - , - return 1; - ); - - spin_lock(&lockref->lock); - if (lockref->count <= 0) - return 0; - lockref->count++; - spin_unlock(&lockref->lock); - return 1; -} -EXPORT_SYMBOL(lockref_get_or_lock); - -/** * lockref_put_return - Decrement reference count if possible * @lockref: pointer to lockref structure * diff --git a/lib/memneq.c b/lib/memneq.c new file mode 100644 index 000000000000..fb11608b1ec1 --- /dev/null +++ b/lib/memneq.c @@ -0,0 +1,176 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + * James Yonan <james@openvpn.net> + * Daniel Borkmann <dborkman@redhat.com> + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of OpenVPN Technologies nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <crypto/algapi.h> +#include <asm/unaligned.h> + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= get_unaligned((unsigned long *)a) ^ + get_unaligned((unsigned long *)b); + OPTIMIZER_HIDE_VAR(neq); + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + OPTIMIZER_HIDE_VAR(neq); + a += 1; + b += 1; + size -= 1; + } + return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ + unsigned long neq = 0; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (sizeof(unsigned long) == 8) { + neq |= get_unaligned((unsigned long *)a) ^ + get_unaligned((unsigned long *)b); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned long *)(a + 8)) ^ + get_unaligned((unsigned long *)(b + 8)); + OPTIMIZER_HIDE_VAR(neq); + } else if (sizeof(unsigned int) == 4) { + neq |= get_unaligned((unsigned int *)a) ^ + get_unaligned((unsigned int *)b); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned int *)(a + 4)) ^ + get_unaligned((unsigned int *)(b + 4)); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned int *)(a + 8)) ^ + get_unaligned((unsigned int *)(b + 8)); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned int *)(a + 12)) ^ + get_unaligned((unsigned int *)(b + 12)); + OPTIMIZER_HIDE_VAR(neq); + } else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); + OPTIMIZER_HIDE_VAR(neq); + } + + return neq; +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes. Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, + size_t size) +{ + switch (size) { + case 16: + return __crypto_memneq_16(a, b); + default: + return __crypto_memneq_generic(a, b, size); + } +} +EXPORT_SYMBOL(__crypto_memneq); + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/lib/mpi/mpi-add.c b/lib/mpi/mpi-add.c index 2cdae54c1bd0..9056fc5167fc 100644 --- a/lib/mpi/mpi-add.c +++ b/lib/mpi/mpi-add.c @@ -138,7 +138,7 @@ void mpi_sub(MPI w, MPI u, MPI v) mpi_add(w, u, vv); mpi_free(vv); } - +EXPORT_SYMBOL_GPL(mpi_sub); void mpi_addm(MPI w, MPI u, MPI v, MPI m) { diff --git a/lib/mpi/mpi-mul.c b/lib/mpi/mpi-mul.c index 8f5fa200f297..7f4eda8560dc 100644 --- a/lib/mpi/mpi-mul.c +++ b/lib/mpi/mpi-mul.c @@ -82,6 +82,7 @@ void mpi_mul(MPI w, MPI u, MPI v) if (tmp_limb) mpi_free_limb_space(tmp_limb); } +EXPORT_SYMBOL_GPL(mpi_mul); void mpi_mulm(MPI w, MPI u, MPI v, MPI m) { diff --git a/lib/nodemask.c b/lib/nodemask.c index 3aa454c54c0d..e22647f5181b 100644 --- a/lib/nodemask.c +++ b/lib/nodemask.c @@ -3,9 +3,9 @@ #include <linux/module.h> #include <linux/random.h> -int __next_node_in(int node, const nodemask_t *srcp) +unsigned int __next_node_in(int node, const nodemask_t *srcp) { - int ret = __next_node(node, srcp); + unsigned int ret = __next_node(node, srcp); if (ret == MAX_NUMNODES) ret = __first_node(srcp); diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 475f0c064bf6..7e3e43679b73 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -91,6 +91,7 @@ DEFINE_TEST_ARRAY(u32) = { {-4U, 5U, 1U, -9U, -20U, true, false, true}, }; +#if BITS_PER_LONG == 64 DEFINE_TEST_ARRAY(u64) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, @@ -114,6 +115,7 @@ DEFINE_TEST_ARRAY(u64) = { false, true, false}, {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true}, }; +#endif DEFINE_TEST_ARRAY(s8) = { {0, 0, 0, 0, 0, false, false, false}, @@ -188,6 +190,8 @@ DEFINE_TEST_ARRAY(s32) = { {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, }; + +#if BITS_PER_LONG == 64 DEFINE_TEST_ARRAY(s64) = { {0, 0, 0, 0, 0, false, false, false}, @@ -216,6 +220,7 @@ DEFINE_TEST_ARRAY(s64) = { {-128, -1, -129, -127, 128, false, false, false}, {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, }; +#endif #define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ t _r; \ @@ -650,6 +655,7 @@ static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(s16_overflow_test), KUNIT_CASE(u32_overflow_test), KUNIT_CASE(s32_overflow_test), +/* Clang 13 and earlier generate unwanted libcalls on 32-bit. */ #if BITS_PER_LONG == 64 KUNIT_CASE(u64_overflow_test), KUNIT_CASE(s64_overflow_test), diff --git a/lib/sbitmap.c b/lib/sbitmap.c index ae4fd4de9ebe..29eb0484215a 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -528,7 +528,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, sbitmap_deferred_clear(map); if (map->word == (1UL << (map_depth - 1)) - 1) - continue; + goto next; nr = find_first_zero_bit(&map->word, map_depth); if (nr + nr_tags <= map_depth) { @@ -539,6 +539,8 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, get_mask = ((1UL << map_tags) - 1) << nr; do { val = READ_ONCE(map->word); + if ((val & ~get_mask) != val) + goto next; ret = atomic_long_cmpxchg(ptr, val, get_mask | val); } while (ret != val); get_mask = (get_mask & ~ret) >> nr; @@ -549,6 +551,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, return get_mask; } } +next: /* Jump to next index. */ if (++index >= sb->map_nr) index = 0; diff --git a/lib/siphash.c b/lib/siphash.c index 71d315a6ad62..15bc5b6f368c 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -1,6 +1,5 @@ -/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * This file is provided under a dual BSD/GPLv2 license. +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * * SipHash: a fast short-input PRF * https://131002.net/siphash/ diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 0c82f07f74fc..d5923a640457 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -585,6 +585,30 @@ static void __init test_bitmap_arr32(void) } } +static void __init test_bitmap_arr64(void) +{ + unsigned int nbits, next_bit; + u64 arr[EXP1_IN_BITS / 64]; + DECLARE_BITMAP(bmap2, EXP1_IN_BITS); + + memset(arr, 0xa5, sizeof(arr)); + + for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) { + memset(bmap2, 0xff, sizeof(arr)); + bitmap_to_arr64(arr, exp1, nbits); + bitmap_from_arr64(bmap2, arr, nbits); + expect_eq_bitmap(bmap2, exp1, nbits); + + next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits); + if (next_bit < round_up(nbits, BITS_PER_LONG)) + pr_err("bitmap_copy_arr64(nbits == %d:" + " tail is not safely cleared: %d\n", nbits, next_bit); + + if (nbits < EXP1_IN_BITS - 64) + expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5); + } +} + static void noinline __init test_mem_optimisations(void) { DECLARE_BITMAP(bmap1, 1024); @@ -852,6 +876,7 @@ static void __init selftest(void) test_copy(); test_replace(); test_bitmap_arr32(); + test_bitmap_arr64(); test_bitmap_parse(); test_bitmap_parselist(); test_bitmap_printlist(); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 2a7836e115b4..5820704165a6 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -14733,9 +14733,9 @@ static struct skb_segment_test skb_segment_tests[] __initconst = { .build_skb = build_test_skb_linear_no_head_frag, .features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO | - NETIF_F_LLTX_BIT | NETIF_F_GRO | + NETIF_F_LLTX | NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_STAG_TX_BIT + NETIF_F_HW_VLAN_STAG_TX } }; diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 1bccd6cd5f48..c82b65947ce6 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -31,9 +31,12 @@ MODULE_IMPORT_NS(TEST_FIRMWARE); #define TEST_FIRMWARE_NAME "test-firmware.bin" #define TEST_FIRMWARE_NUM_REQS 4 #define TEST_FIRMWARE_BUF_SIZE SZ_1K +#define TEST_UPLOAD_MAX_SIZE SZ_2K +#define TEST_UPLOAD_BLK_SIZE 37 /* Avoid powers of two in testing */ static DEFINE_MUTEX(test_fw_mutex); static const struct firmware *test_firmware; +static LIST_HEAD(test_upload_list); struct test_batched_req { u8 idx; @@ -63,6 +66,7 @@ struct test_batched_req { * @reqs: stores all requests information * @read_fw_idx: index of thread from which we want to read firmware results * from through the read_fw trigger. + * @upload_name: firmware name to be used with upload_read sysfs node * @test_result: a test may use this to collect the result from the call * of the request_firmware*() calls used in their tests. In order of * priority we always keep first any setup error. If no setup errors were @@ -101,6 +105,7 @@ struct test_config { bool send_uevent; u8 num_requests; u8 read_fw_idx; + char *upload_name; /* * These below don't belong her but we'll move them once we create @@ -112,8 +117,34 @@ struct test_config { struct device *device); }; +struct upload_inject_err { + const char *prog; + enum fw_upload_err err_code; +}; + +struct test_firmware_upload { + char *name; + struct list_head node; + char *buf; + size_t size; + bool cancel_request; + struct upload_inject_err inject; + struct fw_upload *fwl; +}; + static struct test_config *test_fw_config; +static struct test_firmware_upload *upload_lookup_name(const char *name) +{ + struct test_firmware_upload *tst; + + list_for_each_entry(tst, &test_upload_list, node) + if (strncmp(name, tst->name, strlen(tst->name)) == 0) + return tst; + + return NULL; +} + static ssize_t test_fw_misc_read(struct file *f, char __user *buf, size_t size, loff_t *offset) { @@ -198,6 +229,7 @@ static int __test_firmware_config_init(void) test_fw_config->req_firmware = request_firmware; test_fw_config->test_result = 0; test_fw_config->reqs = NULL; + test_fw_config->upload_name = NULL; return 0; @@ -277,6 +309,13 @@ static ssize_t config_show(struct device *dev, test_fw_config->sync_direct ? "true" : "false"); len += scnprintf(buf + len, PAGE_SIZE - len, "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx); + if (test_fw_config->upload_name) + len += scnprintf(buf + len, PAGE_SIZE - len, + "upload_name:\t%s\n", + test_fw_config->upload_name); + else + len += scnprintf(buf + len, PAGE_SIZE - len, + "upload_name:\tEMTPY\n"); mutex_unlock(&test_fw_mutex); @@ -392,6 +431,32 @@ static ssize_t config_name_show(struct device *dev, } static DEVICE_ATTR_RW(config_name); +static ssize_t config_upload_name_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct test_firmware_upload *tst; + int ret = count; + + mutex_lock(&test_fw_mutex); + tst = upload_lookup_name(buf); + if (tst) + test_fw_config->upload_name = tst->name; + else + ret = -EINVAL; + mutex_unlock(&test_fw_mutex); + + return ret; +} + +static ssize_t config_upload_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return config_test_show_str(buf, test_fw_config->upload_name); +} +static DEVICE_ATTR_RW(config_upload_name); + static ssize_t config_num_requests_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -989,6 +1054,278 @@ out: } static DEVICE_ATTR_WO(trigger_batched_requests_async); +static void upload_release(struct test_firmware_upload *tst) +{ + firmware_upload_unregister(tst->fwl); + kfree(tst->buf); + kfree(tst->name); + kfree(tst); +} + +static void upload_release_all(void) +{ + struct test_firmware_upload *tst, *tmp; + + list_for_each_entry_safe(tst, tmp, &test_upload_list, node) { + list_del(&tst->node); + upload_release(tst); + } + test_fw_config->upload_name = NULL; +} + +/* + * This table is replicated from .../firmware_loader/sysfs_upload.c + * and needs to be kept in sync. + */ +static const char * const fw_upload_err_str[] = { + [FW_UPLOAD_ERR_NONE] = "none", + [FW_UPLOAD_ERR_HW_ERROR] = "hw-error", + [FW_UPLOAD_ERR_TIMEOUT] = "timeout", + [FW_UPLOAD_ERR_CANCELED] = "user-abort", + [FW_UPLOAD_ERR_BUSY] = "device-busy", + [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size", + [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error", + [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout", +}; + +static void upload_err_inject_error(struct test_firmware_upload *tst, + const u8 *p, const char *prog) +{ + enum fw_upload_err err; + + for (err = FW_UPLOAD_ERR_NONE + 1; err < FW_UPLOAD_ERR_MAX; err++) { + if (strncmp(p, fw_upload_err_str[err], + strlen(fw_upload_err_str[err])) == 0) { + tst->inject.prog = prog; + tst->inject.err_code = err; + return; + } + } +} + +static void upload_err_inject_prog(struct test_firmware_upload *tst, + const u8 *p) +{ + static const char * const progs[] = { + "preparing:", "transferring:", "programming:" + }; + int i; + + for (i = 0; i < ARRAY_SIZE(progs); i++) { + if (strncmp(p, progs[i], strlen(progs[i])) == 0) { + upload_err_inject_error(tst, p + strlen(progs[i]), + progs[i]); + return; + } + } +} + +#define FIVE_MINUTES_MS (5 * 60 * 1000) +static enum fw_upload_err +fw_upload_wait_on_cancel(struct test_firmware_upload *tst) +{ + int ms_delay; + + for (ms_delay = 0; ms_delay < FIVE_MINUTES_MS; ms_delay += 100) { + msleep(100); + if (tst->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + } + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err test_fw_upload_prepare(struct fw_upload *fwl, + const u8 *data, u32 size) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + enum fw_upload_err ret = FW_UPLOAD_ERR_NONE; + const char *progress = "preparing:"; + + tst->cancel_request = false; + + if (!size || size > TEST_UPLOAD_MAX_SIZE) { + ret = FW_UPLOAD_ERR_INVALID_SIZE; + goto err_out; + } + + if (strncmp(data, "inject:", strlen("inject:")) == 0) + upload_err_inject_prog(tst, data + strlen("inject:")); + + memset(tst->buf, 0, TEST_UPLOAD_MAX_SIZE); + tst->size = size; + + if (tst->inject.err_code == FW_UPLOAD_ERR_NONE || + strncmp(tst->inject.prog, progress, strlen(progress)) != 0) + return FW_UPLOAD_ERR_NONE; + + if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED) + ret = fw_upload_wait_on_cancel(tst); + else + ret = tst->inject.err_code; + +err_out: + /* + * The cleanup op only executes if the prepare op succeeds. + * If the prepare op fails, it must do it's own clean-up. + */ + tst->inject.err_code = FW_UPLOAD_ERR_NONE; + tst->inject.prog = NULL; + + return ret; +} + +static enum fw_upload_err test_fw_upload_write(struct fw_upload *fwl, + const u8 *data, u32 offset, + u32 size, u32 *written) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + const char *progress = "transferring:"; + u32 blk_size; + + if (tst->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + blk_size = min_t(u32, TEST_UPLOAD_BLK_SIZE, size); + memcpy(tst->buf + offset, data + offset, blk_size); + + *written = blk_size; + + if (tst->inject.err_code == FW_UPLOAD_ERR_NONE || + strncmp(tst->inject.prog, progress, strlen(progress)) != 0) + return FW_UPLOAD_ERR_NONE; + + if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED) + return fw_upload_wait_on_cancel(tst); + + return tst->inject.err_code; +} + +static enum fw_upload_err test_fw_upload_complete(struct fw_upload *fwl) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + const char *progress = "programming:"; + + if (tst->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + if (tst->inject.err_code == FW_UPLOAD_ERR_NONE || + strncmp(tst->inject.prog, progress, strlen(progress)) != 0) + return FW_UPLOAD_ERR_NONE; + + if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED) + return fw_upload_wait_on_cancel(tst); + + return tst->inject.err_code; +} + +static void test_fw_upload_cancel(struct fw_upload *fwl) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + + tst->cancel_request = true; +} + +static void test_fw_cleanup(struct fw_upload *fwl) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + + tst->inject.err_code = FW_UPLOAD_ERR_NONE; + tst->inject.prog = NULL; +} + +static const struct fw_upload_ops upload_test_ops = { + .prepare = test_fw_upload_prepare, + .write = test_fw_upload_write, + .poll_complete = test_fw_upload_complete, + .cancel = test_fw_upload_cancel, + .cleanup = test_fw_cleanup +}; + +static ssize_t upload_register_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct test_firmware_upload *tst; + struct fw_upload *fwl; + char *name; + int ret; + + name = kstrndup(buf, count, GFP_KERNEL); + if (!name) + return -ENOMEM; + + mutex_lock(&test_fw_mutex); + tst = upload_lookup_name(name); + if (tst) { + ret = -EEXIST; + goto free_name; + } + + tst = kzalloc(sizeof(*tst), GFP_KERNEL); + if (!tst) { + ret = -ENOMEM; + goto free_name; + } + + tst->name = name; + tst->buf = kzalloc(TEST_UPLOAD_MAX_SIZE, GFP_KERNEL); + if (!tst->buf) { + ret = -ENOMEM; + goto free_tst; + } + + fwl = firmware_upload_register(THIS_MODULE, dev, tst->name, + &upload_test_ops, tst); + if (IS_ERR(fwl)) { + ret = PTR_ERR(fwl); + goto free_buf; + } + + tst->fwl = fwl; + list_add_tail(&tst->node, &test_upload_list); + mutex_unlock(&test_fw_mutex); + return count; + +free_buf: + kfree(tst->buf); + +free_tst: + kfree(tst); + +free_name: + mutex_unlock(&test_fw_mutex); + kfree(name); + + return ret; +} +static DEVICE_ATTR_WO(upload_register); + +static ssize_t upload_unregister_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct test_firmware_upload *tst; + int ret = count; + + mutex_lock(&test_fw_mutex); + tst = upload_lookup_name(buf); + if (!tst) { + ret = -EINVAL; + goto out; + } + + if (test_fw_config->upload_name == tst->name) + test_fw_config->upload_name = NULL; + + list_del(&tst->node); + upload_release(tst); + +out: + mutex_unlock(&test_fw_mutex); + return ret; +} +static DEVICE_ATTR_WO(upload_unregister); + static ssize_t test_result_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1051,6 +1388,45 @@ out: } static DEVICE_ATTR_RO(read_firmware); +static ssize_t upload_read_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct test_firmware_upload *tst = NULL; + struct test_firmware_upload *tst_iter; + int ret = -EINVAL; + + if (!test_fw_config->upload_name) { + pr_err("Set config_upload_name before using upload_read\n"); + return -EINVAL; + } + + mutex_lock(&test_fw_mutex); + list_for_each_entry(tst_iter, &test_upload_list, node) + if (tst_iter->name == test_fw_config->upload_name) { + tst = tst_iter; + break; + } + + if (!tst) { + pr_err("Firmware name not found: %s\n", + test_fw_config->upload_name); + goto out; + } + + if (tst->size > PAGE_SIZE) { + pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); + goto out; + } + + memcpy(buf, tst->buf, tst->size); + ret = tst->size; +out: + mutex_unlock(&test_fw_mutex); + return ret; +} +static DEVICE_ATTR_RO(upload_read); + #define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr static struct attribute *test_dev_attrs[] = { @@ -1066,6 +1442,7 @@ static struct attribute *test_dev_attrs[] = { TEST_FW_DEV_ATTR(config_sync_direct), TEST_FW_DEV_ATTR(config_send_uevent), TEST_FW_DEV_ATTR(config_read_fw_idx), + TEST_FW_DEV_ATTR(config_upload_name), /* These don't use the config at all - they could be ported! */ TEST_FW_DEV_ATTR(trigger_request), @@ -1082,6 +1459,9 @@ static struct attribute *test_dev_attrs[] = { TEST_FW_DEV_ATTR(release_all_firmware), TEST_FW_DEV_ATTR(test_result), TEST_FW_DEV_ATTR(read_firmware), + TEST_FW_DEV_ATTR(upload_read), + TEST_FW_DEV_ATTR(upload_register), + TEST_FW_DEV_ATTR(upload_unregister), NULL, }; @@ -1128,6 +1508,7 @@ static void __exit test_firmware_exit(void) mutex_lock(&test_fw_mutex); release_firmware(test_firmware); misc_deregister(&test_fw_misc_device); + upload_release_all(); __test_firmware_config_free(); kfree(test_fw_config); mutex_unlock(&test_fw_mutex); diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c index 25ae1ac2624a..9ebf6f5549f3 100644 --- a/lib/test_free_pages.c +++ b/lib/test_free_pages.c @@ -17,7 +17,7 @@ static void test_free_pages(gfp_t gfp) for (i = 0; i < 1000 * 1000; i++) { unsigned long addr = __get_free_pages(gfp, 3); - struct page *page = virt_to_page(addr); + struct page *page = virt_to_page((void *)addr); /* Simulate page cache getting a speculative reference */ get_page(page); diff --git a/lib/test_hmm.c b/lib/test_hmm.c index cfe632047839..e3965cafd27c 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -32,11 +32,32 @@ #include "test_hmm_uapi.h" -#define DMIRROR_NDEVICES 2 +#define DMIRROR_NDEVICES 4 #define DMIRROR_RANGE_FAULT_TIMEOUT 1000 #define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U) #define DEVMEM_CHUNKS_RESERVE 16 +/* + * For device_private pages, dpage is just a dummy struct page + * representing a piece of device memory. dmirror_devmem_alloc_page + * allocates a real system memory page as backing storage to fake a + * real device. zone_device_data points to that backing page. But + * for device_coherent memory, the struct page represents real + * physical CPU-accessible memory that we can use directly. + */ +#define BACKING_PAGE(page) (is_device_private_page((page)) ? \ + (page)->zone_device_data : (page)) + +static unsigned long spm_addr_dev0; +module_param(spm_addr_dev0, long, 0644); +MODULE_PARM_DESC(spm_addr_dev0, + "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE."); + +static unsigned long spm_addr_dev1; +module_param(spm_addr_dev1, long, 0644); +MODULE_PARM_DESC(spm_addr_dev1, + "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE."); + static const struct dev_pagemap_ops dmirror_devmem_ops; static const struct mmu_interval_notifier_ops dmirror_min_ops; static dev_t dmirror_dev; @@ -87,6 +108,7 @@ struct dmirror_chunk { struct dmirror_device { struct cdev cdevice; struct hmm_devmem *devmem; + unsigned int zone_device_type; unsigned int devmem_capacity; unsigned int devmem_count; @@ -114,6 +136,21 @@ static int dmirror_bounce_init(struct dmirror_bounce *bounce, return 0; } +static bool dmirror_is_private_zone(struct dmirror_device *mdevice) +{ + return (mdevice->zone_device_type == + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? true : false; +} + +static enum migrate_vma_direction +dmirror_select_device(struct dmirror *dmirror) +{ + return (dmirror->mdevice->zone_device_type == + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? + MIGRATE_VMA_SELECT_DEVICE_PRIVATE : + MIGRATE_VMA_SELECT_DEVICE_COHERENT; +} + static void dmirror_bounce_fini(struct dmirror_bounce *bounce) { vfree(bounce->ptr); @@ -454,28 +491,44 @@ fini: return ret; } -static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, +static int dmirror_allocate_chunk(struct dmirror_device *mdevice, struct page **ppage) { struct dmirror_chunk *devmem; - struct resource *res; + struct resource *res = NULL; unsigned long pfn; unsigned long pfn_first; unsigned long pfn_last; void *ptr; + int ret = -ENOMEM; devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); if (!devmem) - return false; + return ret; - res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, - "hmm_dmirror"); - if (IS_ERR(res)) + switch (mdevice->zone_device_type) { + case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE: + res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, + "hmm_dmirror"); + if (IS_ERR_OR_NULL(res)) + goto err_devmem; + devmem->pagemap.range.start = res->start; + devmem->pagemap.range.end = res->end; + devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; + break; + case HMM_DMIRROR_MEMORY_DEVICE_COHERENT: + devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ? + spm_addr_dev0 : + spm_addr_dev1; + devmem->pagemap.range.end = devmem->pagemap.range.start + + DEVMEM_CHUNK_SIZE - 1; + devmem->pagemap.type = MEMORY_DEVICE_COHERENT; + break; + default: + ret = -EINVAL; goto err_devmem; + } - devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; - devmem->pagemap.range.start = res->start; - devmem->pagemap.range.end = res->end; devmem->pagemap.nr_range = 1; devmem->pagemap.ops = &dmirror_devmem_ops; devmem->pagemap.owner = mdevice; @@ -496,10 +549,14 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, mdevice->devmem_capacity = new_capacity; mdevice->devmem_chunks = new_chunks; } - ptr = memremap_pages(&devmem->pagemap, numa_node_id()); - if (IS_ERR(ptr)) + if (IS_ERR_OR_NULL(ptr)) { + if (ptr) + ret = PTR_ERR(ptr); + else + ret = -EFAULT; goto err_release; + } devmem->mdevice = mdevice; pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT; @@ -528,30 +585,35 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, } spin_unlock(&mdevice->lock); - return true; + return 0; err_release: mutex_unlock(&mdevice->devmem_lock); - release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range)); + if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) + release_mem_region(devmem->pagemap.range.start, + range_len(&devmem->pagemap.range)); err_devmem: kfree(devmem); - return false; + return ret; } static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) { struct page *dpage = NULL; - struct page *rpage; + struct page *rpage = NULL; /* - * This is a fake device so we alloc real system memory to store - * our device memory. + * For ZONE_DEVICE private type, this is a fake device so we allocate + * real system memory to store our device memory. + * For ZONE_DEVICE coherent type we use the actual dpage to store the + * data and ignore rpage. */ - rpage = alloc_page(GFP_HIGHUSER); - if (!rpage) - return NULL; - + if (dmirror_is_private_zone(mdevice)) { + rpage = alloc_page(GFP_HIGHUSER); + if (!rpage) + return NULL; + } spin_lock(&mdevice->lock); if (mdevice->free_pages) { @@ -561,7 +623,7 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) spin_unlock(&mdevice->lock); } else { spin_unlock(&mdevice->lock); - if (!dmirror_allocate_chunk(mdevice, &dpage)) + if (dmirror_allocate_chunk(mdevice, &dpage)) goto error; } @@ -570,7 +632,8 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) return dpage; error: - __free_page(rpage); + if (rpage) + __free_page(rpage); return NULL; } @@ -596,12 +659,16 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, * unallocated pte_none() or read-only zero page. */ spage = migrate_pfn_to_page(*src); + if (WARN(spage && is_zone_device_page(spage), + "page already in device spage pfn: 0x%lx\n", + page_to_pfn(spage))) + continue; dpage = dmirror_devmem_alloc_page(mdevice); if (!dpage) continue; - rpage = dpage->zone_device_data; + rpage = BACKING_PAGE(dpage); if (spage) copy_highpage(rpage, spage); else @@ -615,6 +682,8 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, */ rpage->zone_device_data = dmirror; + pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n", + page_to_pfn(spage), page_to_pfn(dpage)); *dst = migrate_pfn(page_to_pfn(dpage)); if ((*src & MIGRATE_PFN_WRITE) || (!spage && args->vma->vm_flags & VM_WRITE)) @@ -692,11 +761,7 @@ static int dmirror_migrate_finalize_and_map(struct migrate_vma *args, if (!dpage) continue; - /* - * Store the page that holds the data so the page table - * doesn't have to deal with ZONE_DEVICE private pages. - */ - entry = dpage->zone_device_data; + entry = BACKING_PAGE(dpage); if (*dst & MIGRATE_PFN_WRITE) entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE); entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); @@ -732,7 +797,7 @@ static int dmirror_exclusive(struct dmirror *dmirror, mmap_read_lock(mm); for (addr = start; addr < end; addr = next) { - unsigned long mapped; + unsigned long mapped = 0; int i; if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT)) @@ -741,7 +806,13 @@ static int dmirror_exclusive(struct dmirror *dmirror, next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT); ret = make_device_exclusive_range(mm, addr, next, pages, NULL); - mapped = dmirror_atomic_map(addr, next, pages, dmirror); + /* + * Do dmirror_atomic_map() iff all pages are marked for + * exclusive access to avoid accessing uninitialized + * fields of pages. + */ + if (ret == (next - addr) >> PAGE_SHIFT) + mapped = dmirror_atomic_map(addr, next, pages, dmirror); for (i = 0; i < ret; i++) { if (pages[i]) { unlock_page(pages[i]); @@ -776,15 +847,126 @@ static int dmirror_exclusive(struct dmirror *dmirror, return ret; } -static int dmirror_migrate(struct dmirror *dmirror, - struct hmm_dmirror_cmd *cmd) +static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args, + struct dmirror *dmirror) +{ + const unsigned long *src = args->src; + unsigned long *dst = args->dst; + unsigned long start = args->start; + unsigned long end = args->end; + unsigned long addr; + + for (addr = start; addr < end; addr += PAGE_SIZE, + src++, dst++) { + struct page *dpage, *spage; + + spage = migrate_pfn_to_page(*src); + if (!spage || !(*src & MIGRATE_PFN_MIGRATE)) + continue; + + if (WARN_ON(!is_device_private_page(spage) && + !is_device_coherent_page(spage))) + continue; + spage = BACKING_PAGE(spage); + dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); + if (!dpage) + continue; + pr_debug("migrating from dev to sys pfn src: 0x%lx pfn dst: 0x%lx\n", + page_to_pfn(spage), page_to_pfn(dpage)); + + lock_page(dpage); + xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); + copy_highpage(dpage, spage); + *dst = migrate_pfn(page_to_pfn(dpage)); + if (*src & MIGRATE_PFN_WRITE) + *dst |= MIGRATE_PFN_WRITE; + } + return 0; +} + +static unsigned long +dmirror_successful_migrated_pages(struct migrate_vma *migrate) +{ + unsigned long cpages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->src[i] & MIGRATE_PFN_VALID && + migrate->src[i] & MIGRATE_PFN_MIGRATE) + cpages++; + } + return cpages; +} + +static int dmirror_migrate_to_system(struct dmirror *dmirror, + struct hmm_dmirror_cmd *cmd) { unsigned long start, end, addr; unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; struct vm_area_struct *vma; - unsigned long src_pfns[64]; - unsigned long dst_pfns[64]; + unsigned long src_pfns[64] = { 0 }; + unsigned long dst_pfns[64] = { 0 }; + struct migrate_vma args; + unsigned long next; + int ret; + + start = cmd->addr; + end = start + size; + if (end < start) + return -EINVAL; + + /* Since the mm is for the mirrored process, get a reference first. */ + if (!mmget_not_zero(mm)) + return -EINVAL; + + cmd->cpages = 0; + mmap_read_lock(mm); + for (addr = start; addr < end; addr = next) { + vma = vma_lookup(mm, addr); + if (!vma || !(vma->vm_flags & VM_READ)) { + ret = -EINVAL; + goto out; + } + next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); + if (next > vma->vm_end) + next = vma->vm_end; + + args.vma = vma; + args.src = src_pfns; + args.dst = dst_pfns; + args.start = addr; + args.end = next; + args.pgmap_owner = dmirror->mdevice; + args.flags = dmirror_select_device(dmirror); + + ret = migrate_vma_setup(&args); + if (ret) + goto out; + + pr_debug("Migrating from device mem to sys mem\n"); + dmirror_devmem_fault_alloc_and_copy(&args, dmirror); + + migrate_vma_pages(&args); + cmd->cpages += dmirror_successful_migrated_pages(&args); + migrate_vma_finalize(&args); + } +out: + mmap_read_unlock(mm); + mmput(mm); + + return ret; +} + +static int dmirror_migrate_to_device(struct dmirror *dmirror, + struct hmm_dmirror_cmd *cmd) +{ + unsigned long start, end, addr; + unsigned long size = cmd->npages << PAGE_SHIFT; + struct mm_struct *mm = dmirror->notifier.mm; + struct vm_area_struct *vma; + unsigned long src_pfns[64] = { 0 }; + unsigned long dst_pfns[64] = { 0 }; struct dmirror_bounce bounce; struct migrate_vma args; unsigned long next; @@ -821,6 +1003,7 @@ static int dmirror_migrate(struct dmirror *dmirror, if (ret) goto out; + pr_debug("Migrating from sys mem to device mem\n"); dmirror_migrate_alloc_and_copy(&args, dmirror); migrate_vma_pages(&args); dmirror_migrate_finalize_and_map(&args, dmirror); @@ -829,7 +1012,10 @@ static int dmirror_migrate(struct dmirror *dmirror, mmap_read_unlock(mm); mmput(mm); - /* Return the migrated data for verification. */ + /* + * Return the migrated data for verification. + * Only for pages in device zone + */ ret = dmirror_bounce_init(&bounce, start, size); if (ret) return ret; @@ -872,6 +1058,12 @@ static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range, *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL; else *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE; + } else if (is_device_coherent_page(page)) { + /* Is the page migrated to this device or some other? */ + if (dmirror->mdevice == dmirror_page_to_device(page)) + *perm = HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL; + else + *perm = HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE; } else if (is_zero_pfn(page_to_pfn(page))) *perm = HMM_DMIRROR_PROT_ZERO; else @@ -1059,8 +1251,12 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp, ret = dmirror_write(dmirror, &cmd); break; - case HMM_DMIRROR_MIGRATE: - ret = dmirror_migrate(dmirror, &cmd); + case HMM_DMIRROR_MIGRATE_TO_DEV: + ret = dmirror_migrate_to_device(dmirror, &cmd); + break; + + case HMM_DMIRROR_MIGRATE_TO_SYS: + ret = dmirror_migrate_to_system(dmirror, &cmd); break; case HMM_DMIRROR_EXCLUSIVE: @@ -1122,14 +1318,13 @@ static const struct file_operations dmirror_fops = { static void dmirror_devmem_free(struct page *page) { - struct page *rpage = page->zone_device_data; + struct page *rpage = BACKING_PAGE(page); struct dmirror_device *mdevice; - if (rpage) + if (rpage != page) __free_page(rpage); mdevice = dmirror_page_to_device(page); - spin_lock(&mdevice->lock); mdevice->cfree++; page->zone_device_data = mdevice->free_pages; @@ -1137,43 +1332,11 @@ static void dmirror_devmem_free(struct page *page) spin_unlock(&mdevice->lock); } -static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args, - struct dmirror *dmirror) -{ - const unsigned long *src = args->src; - unsigned long *dst = args->dst; - unsigned long start = args->start; - unsigned long end = args->end; - unsigned long addr; - - for (addr = start; addr < end; addr += PAGE_SIZE, - src++, dst++) { - struct page *dpage, *spage; - - spage = migrate_pfn_to_page(*src); - if (!spage || !(*src & MIGRATE_PFN_MIGRATE)) - continue; - spage = spage->zone_device_data; - - dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); - if (!dpage) - continue; - - lock_page(dpage); - xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); - copy_highpage(dpage, spage); - *dst = migrate_pfn(page_to_pfn(dpage)); - if (*src & MIGRATE_PFN_WRITE) - *dst |= MIGRATE_PFN_WRITE; - } - return 0; -} - static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf) { struct migrate_vma args; - unsigned long src_pfns; - unsigned long dst_pfns; + unsigned long src_pfns = 0; + unsigned long dst_pfns = 0; struct page *rpage; struct dmirror *dmirror; vm_fault_t ret; @@ -1193,7 +1356,7 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf) args.src = &src_pfns; args.dst = &dst_pfns; args.pgmap_owner = dmirror->mdevice; - args.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; + args.flags = dmirror_select_device(dmirror); if (migrate_vma_setup(&args)) return VM_FAULT_SIGBUS; @@ -1231,10 +1394,8 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id) if (ret) return ret; - /* Build a list of free ZONE_DEVICE private struct pages */ - dmirror_allocate_chunk(mdevice, NULL); - - return 0; + /* Build a list of free ZONE_DEVICE struct pages */ + return dmirror_allocate_chunk(mdevice, NULL); } static void dmirror_device_remove(struct dmirror_device *mdevice) @@ -1247,8 +1408,9 @@ static void dmirror_device_remove(struct dmirror_device *mdevice) mdevice->devmem_chunks[i]; memunmap_pages(&devmem->pagemap); - release_mem_region(devmem->pagemap.range.start, - range_len(&devmem->pagemap.range)); + if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) + release_mem_region(devmem->pagemap.range.start, + range_len(&devmem->pagemap.range)); kfree(devmem); } kfree(mdevice->devmem_chunks); @@ -1260,14 +1422,26 @@ static void dmirror_device_remove(struct dmirror_device *mdevice) static int __init hmm_dmirror_init(void) { int ret; - int id; + int id = 0; + int ndevices = 0; ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES, "HMM_DMIRROR"); if (ret) goto err_unreg; - for (id = 0; id < DMIRROR_NDEVICES; id++) { + memset(dmirror_devices, 0, DMIRROR_NDEVICES * sizeof(dmirror_devices[0])); + dmirror_devices[ndevices++].zone_device_type = + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; + dmirror_devices[ndevices++].zone_device_type = + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; + if (spm_addr_dev0 && spm_addr_dev1) { + dmirror_devices[ndevices++].zone_device_type = + HMM_DMIRROR_MEMORY_DEVICE_COHERENT; + dmirror_devices[ndevices++].zone_device_type = + HMM_DMIRROR_MEMORY_DEVICE_COHERENT; + } + for (id = 0; id < ndevices; id++) { ret = dmirror_device_init(dmirror_devices + id, id); if (ret) goto err_chrdev; @@ -1289,7 +1463,8 @@ static void __exit hmm_dmirror_exit(void) int id; for (id = 0; id < DMIRROR_NDEVICES; id++) - dmirror_device_remove(dmirror_devices + id); + if (dmirror_devices[id].zone_device_type) + dmirror_device_remove(dmirror_devices + id); unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES); } diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h index f14dea5dcd06..e31d58c9034a 100644 --- a/lib/test_hmm_uapi.h +++ b/lib/test_hmm_uapi.h @@ -31,10 +31,11 @@ struct hmm_dmirror_cmd { /* Expose the address space of the calling process through hmm device file */ #define HMM_DMIRROR_READ _IOWR('H', 0x00, struct hmm_dmirror_cmd) #define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd) -#define HMM_DMIRROR_MIGRATE _IOWR('H', 0x02, struct hmm_dmirror_cmd) -#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x03, struct hmm_dmirror_cmd) -#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x04, struct hmm_dmirror_cmd) -#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_MIGRATE_TO_DEV _IOWR('H', 0x02, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_MIGRATE_TO_SYS _IOWR('H', 0x03, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x04, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd) +#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd) /* * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT. @@ -49,6 +50,8 @@ struct hmm_dmirror_cmd { * device the ioctl() is made * HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE: Migrated device private page on some * other device + * HMM_DMIRROR_PROT_DEV_COHERENT: Migrate device coherent page on the device + * the ioctl() is made */ enum { HMM_DMIRROR_PROT_ERROR = 0xFF, @@ -60,6 +63,14 @@ enum { HMM_DMIRROR_PROT_ZERO = 0x10, HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL = 0x20, HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE = 0x30, + HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL = 0x40, + HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE = 0x50, +}; + +enum { + /* 0 is reserved to catch uninitialized type fields */ + HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1, + HMM_DMIRROR_MEMORY_DEVICE_COHERENT, }; #endif /* _LIB_TEST_HMM_UAPI_H */ diff --git a/lib/test_kasan.c b/lib/test_kasan.c index c233b1a4e984..58c1b01ccfe2 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -131,6 +131,7 @@ static void kmalloc_oob_right(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); /* * An unaligned access past the requested kmalloc size. * Only generic KASAN can precisely detect these. @@ -159,6 +160,7 @@ static void kmalloc_oob_left(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); kfree(ptr); } @@ -171,6 +173,7 @@ static void kmalloc_node_oob_right(struct kunit *test) ptr = kmalloc_node(size, GFP_KERNEL, 0); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); kfree(ptr); } @@ -191,6 +194,7 @@ static void kmalloc_pagealloc_oob_right(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0); kfree(ptr); @@ -271,6 +275,7 @@ static void kmalloc_large_oob_right(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); kfree(ptr); } @@ -410,6 +415,8 @@ static void kmalloc_oob_16(struct kunit *test) ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); + OPTIMIZER_HIDE_VAR(ptr1); + OPTIMIZER_HIDE_VAR(ptr2); KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); kfree(ptr1); kfree(ptr2); @@ -756,6 +763,8 @@ static void ksize_unpoisons_memory(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); real_size = ksize(ptr); + OPTIMIZER_HIDE_VAR(ptr); + /* This access shouldn't trigger a KASAN report. */ ptr[size] = 'x'; @@ -778,6 +787,7 @@ static void ksize_uaf(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); kfree(ptr); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); diff --git a/lib/test_printf.c b/lib/test_printf.c index 07309c45f327..4bd15a593fbd 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -30,6 +30,12 @@ #define PAD_SIZE 16 #define FILL_CHAR '$' +#define NOWARN(option, comment, block) \ + __diag_push(); \ + __diag_ignore_all(#option, comment); \ + block \ + __diag_pop(); + KSTM_MODULE_GLOBALS(); static char *test_buffer __initdata; @@ -78,12 +84,17 @@ do_test(int bufsize, const char *expect, int elen, return 1; } - if (memchr_inv(test_buffer + written + 1, FILL_CHAR, BUF_SIZE + PAD_SIZE - (written + 1))) { + if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) { pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n", bufsize, fmt); return 1; } + if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) { + pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt); + return 1; + } + if (memcmp(test_buffer, expect, written)) { pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n", bufsize, fmt, test_buffer, written, expect); @@ -154,9 +165,11 @@ test_number(void) test("0x1234abcd ", "%#-12x", 0x1234abcd); test(" 0x1234abcd", "%#12x", 0x1234abcd); test("0|001| 12|+123| 1234|-123|-1234", "%d|%03d|%3d|%+d|% d|%+d|% d", 0, 1, 12, 123, 1234, -123, -1234); - test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1); - test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1); - test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627); + NOWARN(-Wformat, "Intentionally test narrowing conversion specifiers.", { + test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1); + test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1); + test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627); + }) /* * POSIX/C99: »The result of converting zero with an explicit * precision of zero shall be no characters.« Hence the output diff --git a/lib/test_siphash.c b/lib/test_siphash.c index a6d854d933bf..a96788d0141d 100644 --- a/lib/test_siphash.c +++ b/lib/test_siphash.c @@ -1,8 +1,7 @@ -/* Test cases for siphash.c +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * - * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * This file is provided under a dual BSD/GPLv2 license. + * Test cases for siphash.c * * SipHash: a fast short-input PRF * https://131002.net/siphash/ diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index cf41fd6df42a..4f2f2d1bac56 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -74,12 +74,13 @@ test_report_one_done(void) static int random_size_align_alloc_test(void) { - unsigned long size, align, rnd; + unsigned long size, align; + unsigned int rnd; void *ptr; int i; for (i = 0; i < test_loop_count; i++) { - get_random_bytes(&rnd, sizeof(rnd)); + rnd = prandom_u32(); /* * Maximum 1024 pages, if PAGE_SIZE is 4096. @@ -150,7 +151,7 @@ static int random_size_alloc_test(void) int i; for (i = 0; i < test_loop_count; i++) { - get_random_bytes(&n, sizeof(i)); + n = prandom_u32(); n = (n % 100) + 1; p = vmalloc(n * PAGE_SIZE); @@ -294,14 +295,14 @@ pcpu_alloc_test(void) for (i = 0; i < 35000; i++) { unsigned int r; - get_random_bytes(&r, sizeof(i)); + r = prandom_u32(); size = (r % (PAGE_SIZE / 4)) + 1; /* * Maximum PAGE_SIZE */ - get_random_bytes(&r, sizeof(i)); - align = 1 << ((i % 11) + 1); + r = prandom_u32(); + align = 1 << ((r % 11) + 1); pcpu[i] = __alloc_percpu(size, align); if (!pcpu[i]) @@ -396,7 +397,7 @@ static void shuffle_array(int *arr, int n) int i, j; for (i = n - 1; i > 0; i--) { - get_random_bytes(&rnd, sizeof(rnd)); + rnd = prandom_u32(); /* Cut the range. */ j = rnd % i; diff --git a/lib/trace_readwrite.c b/lib/trace_readwrite.c new file mode 100644 index 000000000000..88637038b30c --- /dev/null +++ b/lib/trace_readwrite.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Register read and write tracepoints + * + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/ftrace.h> +#include <linux/module.h> +#include <asm-generic/io.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/rwmmio.h> + +#ifdef CONFIG_TRACE_MMIO_ACCESS +void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr) +{ + trace_rwmmio_write(caller_addr, val, width, addr); +} +EXPORT_SYMBOL_GPL(log_write_mmio); +EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_write); + +void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr) +{ + trace_rwmmio_post_write(caller_addr, val, width, addr); +} +EXPORT_SYMBOL_GPL(log_post_write_mmio); +EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_write); + +void log_read_mmio(u8 width, const volatile void __iomem *addr, + unsigned long caller_addr) +{ + trace_rwmmio_read(caller_addr, width, addr); +} +EXPORT_SYMBOL_GPL(log_read_mmio); +EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_read); + +void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, + unsigned long caller_addr) +{ + trace_rwmmio_post_read(caller_addr, val, width, addr); +} +EXPORT_SYMBOL_GPL(log_post_read_mmio); +EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_read); +#endif /* CONFIG_TRACE_MMIO_ACCESS */ diff --git a/lib/vsprintf.c b/lib/vsprintf.c index fb77f7bfd126..3c1853a9d1c0 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -769,8 +769,7 @@ static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out) static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); unsigned long flags; - if (!system_unbound_wq || - (!rng_is_initialized() && !rng_has_arch_random()) || + if (!system_unbound_wq || !rng_is_initialized() || !spin_trylock_irqsave(&filling, flags)) return -EAGAIN; diff --git a/lib/xarray.c b/lib/xarray.c index 54e646e8e6ee..ea9ce1f0b386 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -264,9 +264,10 @@ static void xa_node_free(struct xa_node *node) * xas_destroy() - Free any resources allocated during the XArray operation. * @xas: XArray operation state. * - * This function is now internal-only. + * Most users will not need to call this function; it is called for you + * by xas_nomem(). */ -static void xas_destroy(struct xa_state *xas) +void xas_destroy(struct xa_state *xas) { struct xa_node *next, *node = xas->xa_alloc; |