summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/842/842_debugfs.h5
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug76
-rw-r--r--lib/Makefile5
-rw-r--r--lib/atomic64.c32
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/clz_ctz.c4
-rw-r--r--lib/cmdline.c5
-rw-r--r--lib/cpu_rmap.c5
-rw-r--r--lib/crc-ccitt.c4
-rw-r--r--lib/crc-itu-t.c4
-rw-r--r--lib/crc-t10dif.c4
-rw-r--r--lib/crc16.c4
-rw-r--r--lib/crc4.c4
-rw-r--r--lib/crc7.c4
-rw-r--r--lib/crypto/Makefile4
-rw-r--r--lib/crypto/arc4.c74
-rw-r--r--lib/debugobjects.c321
-rw-r--r--lib/decompress_unlz4.c5
-rw-r--r--lib/devres.c3
-rw-r--r--lib/digsig.c2
-rw-r--r--lib/dim/Makefile7
-rw-r--r--lib/dim/dim.c83
-rw-r--r--lib/dim/net_dim.c190
-rw-r--r--lib/dim/rdma_dim.c108
-rw-r--r--lib/dynamic_debug.c12
-rw-r--r--lib/fault-inject.c73
-rw-r--r--lib/fonts/fonts.c103
-rw-r--r--lib/genalloc.c129
-rw-r--r--lib/hexdump.c6
-rw-r--r--lib/idr.c14
-rw-r--r--lib/iomap_copy.c14
-rw-r--r--lib/ioremap.c11
-rw-r--r--lib/iov_iter.c15
-rw-r--r--lib/klist.c3
-rw-r--r--lib/kobject.c4
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/mpi/longlong.h16
-rw-r--r--lib/mpi/mpi-pow.c6
-rw-r--r--lib/notifier-error-inject.c13
-rw-r--r--lib/objagg.c6
-rw-r--r--lib/parser.c4
-rw-r--r--lib/percpu-refcount.c13
-rw-r--r--lib/raid6/Makefile98
-rw-r--r--lib/raid6/neon.c5
-rw-r--r--lib/raid6/s390vx.uc2
-rw-r--r--lib/rbtree.c40
-rw-r--r--lib/reed_solomon/Makefile2
-rw-r--r--lib/reed_solomon/decode_rs.c115
-rw-r--r--lib/reed_solomon/reed_solomon.c12
-rw-r--r--lib/reed_solomon/test_rslib.c518
-rw-r--r--lib/rhashtable.c5
-rw-r--r--lib/sbitmap.c10
-rw-r--r--lib/scatterlist.c49
-rw-r--r--lib/sg_pool.c39
-rw-r--r--lib/sg_split.c4
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/string.c11
-rw-r--r--lib/string_helpers.c96
-rw-r--r--lib/test_blackhole_dev.c100
-rw-r--r--lib/test_kasan.c104
-rw-r--r--lib/test_meminit.c364
-rw-r--r--lib/test_overflow.c11
-rw-r--r--lib/test_rhashtable.c5
-rw-r--r--lib/test_stackinit.c21
-rw-r--r--lib/test_string.c83
-rw-r--r--lib/test_xarray.c38
-rw-r--r--lib/ubsan.c6
-rw-r--r--lib/vdso/Kconfig36
-rw-r--r--lib/vdso/Makefile22
-rw-r--r--lib/vdso/gettimeofday.c239
-rw-r--r--lib/vsprintf.c4
-rw-r--r--lib/xarray.c12
73 files changed, 2860 insertions, 607 deletions
diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h
index 277e403e8701..4469407c3e0d 100644
--- a/lib/842/842_debugfs.h
+++ b/lib/842/842_debugfs.h
@@ -22,8 +22,6 @@ static int __init sw842_debugfs_create(void)
return -ENODEV;
sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
- if (IS_ERR(sw842_debugfs_root))
- return PTR_ERR(sw842_debugfs_root);
for (i = 0; i < ARRAY_SIZE(template_count); i++) {
char name[32];
@@ -46,8 +44,7 @@ static int __init sw842_debugfs_create(void)
static void __exit sw842_debugfs_remove(void)
{
- if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root))
- debugfs_remove_recursive(sw842_debugfs_root);
+ debugfs_remove_recursive(sw842_debugfs_root);
}
#endif
diff --git a/lib/Kconfig b/lib/Kconfig
index e09b3e081a53..f33d66fc0e86 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -554,6 +554,14 @@ config SIGNATURE
Digital signature verification. Currently only RSA is supported.
Implementation is done using GnuPG MPI library
+config DIMLIB
+ bool "DIM library"
+ default y
+ help
+ Dynamic Interrupt Moderation library.
+ Implements an algorithm for dynamically change CQ modertion values
+ according to run time performance.
+
#
# libfdt files, only selected if needed.
#
@@ -568,6 +576,11 @@ config OID_REGISTRY
config UCS2_STRING
tristate
+#
+# generic vdso
+#
+source "lib/vdso/Kconfig"
+
source "lib/fonts/Kconfig"
config SG_SPLIT
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index cbdfae379896..bc6673ab3a08 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -305,19 +305,26 @@ config DEBUG_FS
If unsure, say N.
-config HEADERS_CHECK
- bool "Run 'make headers_check' when building vmlinux"
+config HEADERS_INSTALL
+ bool "Install uapi headers to usr/include"
depends on !UML
help
- This option will extract the user-visible kernel headers whenever
- building the kernel, and will run basic sanity checks on them to
- ensure that exported files do not attempt to include files which
- were not exported, etc.
+ This option will install uapi headers (headers exported to user-space)
+ into the usr/include directory for use during the kernel build.
+ This is unneeded for building the kernel itself, but needed for some
+ user-space program samples. It is also needed by some features such
+ as uapi header sanity checks.
+
+config HEADERS_CHECK
+ bool "Run sanity checks on uapi headers when building 'all'"
+ depends on HEADERS_INSTALL
+ help
+ This option will run basic sanity checks on uapi headers when
+ building the 'all' target, for example, ensure that they do not
+ attempt to include files which were not exported, etc.
If you're making modifications to header files which are
- relevant for userspace, say 'Y', and check the headers
- exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
- your build tree), to make sure they're suitable.
+ relevant for userspace, say 'Y'.
config OPTIMIZE_INLINING
bool "Allow compiler to uninline functions marked 'inline'"
@@ -1095,7 +1102,7 @@ config PROVE_LOCKING
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
- select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER
+ select DEBUG_RWSEMS
select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
select TRACE_IRQFLAGS
@@ -1132,7 +1139,7 @@ config PROVE_LOCKING
the proof of observed correctness is also maintained for an
arbitrary combination of these separate locking variants.
- For more details, see Documentation/locking/lockdep-design.txt.
+ For more details, see Documentation/locking/lockdep-design.rst.
config LOCK_STAT
bool "Lock usage statistics"
@@ -1146,7 +1153,7 @@ config LOCK_STAT
help
This feature enables tracking lock contention points
- For more details, see Documentation/locking/lockstat.txt
+ For more details, see Documentation/locking/lockstat.rst
This also enables lock events required by "perf lock",
subcommand of perf.
@@ -1199,10 +1206,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
config DEBUG_RWSEMS
bool "RW Semaphore debugging: basic checks"
- depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER
+ depends on DEBUG_KERNEL
help
- This debugging feature allows mismatched rw semaphore locks and unlocks
- to be detected and reported.
+ This debugging feature allows mismatched rw semaphore locks
+ and unlocks to be detected and reported.
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
@@ -1701,7 +1708,7 @@ config LKDTM
called lkdtm.
Documentation on how to use the module can be found in
- Documentation/fault-injection/provoke-crashes.txt
+ Documentation/fault-injection/provoke-crashes.rst
config TEST_LIST_SORT
tristate "Linked list sorting test"
@@ -1754,6 +1761,18 @@ config RBTREE_TEST
A benchmark measuring the performance of the rbtree library.
Also includes rbtree invariant checks.
+config REED_SOLOMON_TEST
+ tristate "Reed-Solomon library test"
+ depends on DEBUG_KERNEL || m
+ select REED_SOLOMON
+ select REED_SOLOMON_ENC16
+ select REED_SOLOMON_DEC16
+ help
+ This option enables the self-test function of rslib at boot,
+ or at module load time.
+
+ If unsure, say N.
+
config INTERVAL_TREE_TEST
tristate "Interval tree test"
depends on DEBUG_KERNEL
@@ -1858,6 +1877,14 @@ config TEST_PARMAN
If unsure, say N.
+config TEST_IRQ_TIMINGS
+ bool "IRQ timings selftest"
+ depends on IRQ_TIMINGS
+ help
+ Enable this option to test the irq timings code on boot.
+
+ If unsure, say N.
+
config TEST_LKM
tristate "Test module loading with 'hello world' module"
depends on m
@@ -1909,6 +1936,15 @@ config TEST_BPF
If unsure, say N.
+config TEST_BLACKHOLE_DEV
+ tristate "Test blackhole netdev functionality"
+ depends on m && NET
+ help
+ This builds the "test_blackhole_dev" module that validates the
+ data path through this blackhole netdev.
+
+ If unsure, say N.
+
config FIND_BIT_BENCHMARK
tristate "Test find_bit functions"
help
@@ -2040,6 +2076,14 @@ config TEST_STACKINIT
If unsure, say N.
+config TEST_MEMINIT
+ tristate "Test heap/page initialization"
+ help
+ Test if the kernel is zero-initializing heap and page allocations.
+ This can be useful to test init_on_alloc and init_on_free features.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index cb66bc9c5b2f..095601ce371d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -91,6 +91,8 @@ obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
+obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
+obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
@@ -102,7 +104,7 @@ endif
obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
-obj-y += math/
+obj-y += math/ crypto/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
@@ -202,6 +204,7 @@ obj-$(CONFIG_GLOB) += glob.o
obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
obj-$(CONFIG_MPILIB) += mpi/
+obj-$(CONFIG_DIMLIB) += dim/
obj-$(CONFIG_SIGNATURE) += digsig.o
lib-$(CONFIG_CLZ_TAB) += clz_tab.o
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 7e6905751522..e98c85a99787 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,11 +42,11 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
-long long atomic64_read(const atomic64_t *v)
+s64 atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
@@ -55,7 +55,7 @@ long long atomic64_read(const atomic64_t *v)
}
EXPORT_SYMBOL(atomic64_read);
-void atomic64_set(atomic64_t *v, long long i)
+void atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -67,7 +67,7 @@ void atomic64_set(atomic64_t *v, long long i)
EXPORT_SYMBOL(atomic64_set);
#define ATOMIC64_OP(op, c_op) \
-void atomic64_##op(long long a, atomic64_t *v) \
+void atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -79,11 +79,11 @@ void atomic64_##op(long long a, atomic64_t *v) \
EXPORT_SYMBOL(atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
-long long atomic64_##op##_return(long long a, atomic64_t *v) \
+s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
- long long val; \
+ s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = (v->counter c_op a); \
@@ -93,11 +93,11 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \
EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
-long long atomic64_fetch_##op(long long a, atomic64_t *v) \
+s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
- long long val; \
+ s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = v->counter; \
@@ -130,11 +130,11 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-long long atomic64_dec_if_positive(atomic64_t *v)
+s64 atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
@@ -145,11 +145,11 @@ long long atomic64_dec_if_positive(atomic64_t *v)
}
EXPORT_SYMBOL(atomic64_dec_if_positive);
-long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
@@ -160,11 +160,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
}
EXPORT_SYMBOL(atomic64_cmpxchg);
-long long atomic64_xchg(atomic64_t *v, long long new)
+s64 atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
@@ -174,11 +174,11 @@ long long atomic64_xchg(atomic64_t *v, long long new)
}
EXPORT_SYMBOL(atomic64_xchg);
-long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
+s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
- long long val;
+ s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index f235434df87b..bbe2589e8497 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -1,9 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/bitmap.c
* Helper functions for bitmap.h.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/export.h>
#include <linux/thread_info.h>
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index 2e11e48446ab..0d3a686b5ba2 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/clz_ctz.c
*
* Copyright (C) 2013 Chanho Min <chanho.min@lge.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
* The functions in this file aren't called directly, but are required by
* GCC builtins such as __builtin_ctz, and therefore they can't be removed
* despite appearing unreferenced in kernel source.
diff --git a/lib/cmdline.c b/lib/cmdline.c
index dc59d6216318..fbb9981a04a4 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/cmdline.c
* Helper functions generally used for parsing kernel command line
@@ -5,11 +6,7 @@
*
* Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
*
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
- *
* GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
- *
*/
#include <linux/export.h>
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index f610b2a10b3e..075f3788bbe4 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* cpu_rmap.c: CPU affinity reverse-map support
* Copyright 2011 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/cpu_rmap.h>
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index d873b34039ff..d1a7d29d2ac9 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -1,8 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/crc-ccitt.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c
index b3219d0abfb4..1974b355c148 100644
--- a/lib/crc-itu-t.c
+++ b/lib/crc-itu-t.c
@@ -1,8 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* crc-itu-t.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index e89ebfdbb0fc..8cc01a603416 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* T10 Data Integrity Field CRC16 calculation
*
* Copyright (c) 2007 Oracle Corporation. All rights reserved.
* Written by Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
diff --git a/lib/crc16.c b/lib/crc16.c
index 8737b084d1f9..5c3a803c01e0 100644
--- a/lib/crc16.c
+++ b/lib/crc16.c
@@ -1,8 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* crc16.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
diff --git a/lib/crc4.c b/lib/crc4.c
index 164ed9444cd3..e7e1779c67d9 100644
--- a/lib/crc4.c
+++ b/lib/crc4.c
@@ -1,8 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* crc4.c - simple crc-4 calculations.
- *
- * This source code is licensed under the GNU General Public License, Version
- * 2. See the file COPYING for more details.
*/
#include <linux/crc4.h>
diff --git a/lib/crc7.c b/lib/crc7.c
index bf6255e23919..6a848d73e804 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -1,8 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* crc7.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/types.h>
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
new file mode 100644
index 000000000000..88195c34932d
--- /dev/null
+++ b/lib/crypto/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
+libarc4-y := arc4.o
diff --git a/lib/crypto/arc4.c b/lib/crypto/arc4.c
new file mode 100644
index 000000000000..c2020f19c652
--- /dev/null
+++ b/lib/crypto/arc4.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API
+ *
+ * ARC4 Cipher Algorithm
+ *
+ * Jon Oberheide <jon@oberheide.org>
+ */
+
+#include <crypto/arc4.h>
+#include <linux/module.h>
+
+int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
+{
+ int i, j = 0, k = 0;
+
+ ctx->x = 1;
+ ctx->y = 0;
+
+ for (i = 0; i < 256; i++)
+ ctx->S[i] = i;
+
+ for (i = 0; i < 256; i++) {
+ u32 a = ctx->S[i];
+
+ j = (j + in_key[k] + a) & 0xff;
+ ctx->S[i] = ctx->S[j];
+ ctx->S[j] = a;
+ if (++k >= key_len)
+ k = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(arc4_setkey);
+
+void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
+{
+ u32 *const S = ctx->S;
+ u32 x, y, a, b;
+ u32 ty, ta, tb;
+
+ if (len == 0)
+ return;
+
+ x = ctx->x;
+ y = ctx->y;
+
+ a = S[x];
+ y = (y + a) & 0xff;
+ b = S[y];
+
+ do {
+ S[y] = a;
+ a = (a + b) & 0xff;
+ S[x] = b;
+ x = (x + 1) & 0xff;
+ ta = S[x];
+ ty = (y + ta) & 0xff;
+ tb = S[ty];
+ *out++ = *in++ ^ S[a];
+ if (--len == 0)
+ break;
+ y = ty;
+ a = ta;
+ b = tb;
+ } while (true);
+
+ ctx->x = x;
+ ctx->y = y;
+}
+EXPORT_SYMBOL(arc4_crypt);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 55437fd5128b..61261195f5b6 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -25,16 +25,37 @@
#define ODEBUG_POOL_SIZE 1024
#define ODEBUG_POOL_MIN_LEVEL 256
+#define ODEBUG_POOL_PERCPU_SIZE 64
+#define ODEBUG_BATCH_SIZE 16
#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
+/*
+ * We limit the freeing of debug objects via workqueue at a maximum
+ * frequency of 10Hz and about 1024 objects for each freeing operation.
+ * So it is freeing at most 10k debug objects per second.
+ */
+#define ODEBUG_FREE_WORK_MAX 1024
+#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
+
struct debug_bucket {
struct hlist_head list;
raw_spinlock_t lock;
};
+/*
+ * Debug object percpu free list
+ * Access is protected by disabling irq
+ */
+struct debug_percpu_free {
+ struct hlist_head free_objs;
+ int obj_free;
+};
+
+static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
+
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
@@ -44,13 +65,20 @@ static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
static HLIST_HEAD(obj_to_free);
+/*
+ * Because of the presence of percpu free pools, obj_pool_free will
+ * under-count those in the percpu free pools. Similarly, obj_pool_used
+ * will over-count those in the percpu free pools. Adjustments will be
+ * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
+ * can be off.
+ */
static int obj_pool_min_free = ODEBUG_POOL_SIZE;
static int obj_pool_free = ODEBUG_POOL_SIZE;
static int obj_pool_used;
static int obj_pool_max_used;
+static bool obj_freeing;
/* The number of objs on the global free list */
static int obj_nr_tofree;
-static struct kmem_cache *obj_cache;
static int debug_objects_maxchain __read_mostly;
static int __maybe_unused debug_objects_maxchecked __read_mostly;
@@ -63,6 +91,7 @@ static int debug_objects_pool_size __read_mostly
static int debug_objects_pool_min_level __read_mostly
= ODEBUG_POOL_MIN_LEVEL;
static struct debug_obj_descr *descr_test __read_mostly;
+static struct kmem_cache *obj_cache __read_mostly;
/*
* Track numbers of kmem_cache_alloc()/free() calls done.
@@ -71,7 +100,7 @@ static int debug_objects_allocated;
static int debug_objects_freed;
static void free_obj_work(struct work_struct *work);
-static DECLARE_WORK(debug_obj_work, free_obj_work);
+static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
static int __init enable_object_debug(char *str)
{
@@ -100,7 +129,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
static void fill_pool(void)
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
- struct debug_obj *new, *obj;
+ struct debug_obj *obj;
unsigned long flags;
if (likely(obj_pool_free >= debug_objects_pool_min_level))
@@ -116,7 +145,7 @@ static void fill_pool(void)
* Recheck with the lock held as the worker thread might have
* won the race and freed the global free list already.
*/
- if (obj_nr_tofree) {
+ while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
obj_nr_tofree--;
@@ -130,15 +159,23 @@ static void fill_pool(void)
return;
while (obj_pool_free < debug_objects_pool_min_level) {
+ struct debug_obj *new[ODEBUG_BATCH_SIZE];
+ int cnt;
- new = kmem_cache_zalloc(obj_cache, gfp);
- if (!new)
+ for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
+ new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
+ if (!new[cnt])
+ break;
+ }
+ if (!cnt)
return;
raw_spin_lock_irqsave(&pool_lock, flags);
- hlist_add_head(&new->node, &obj_pool);
- debug_objects_allocated++;
- obj_pool_free++;
+ while (cnt) {
+ hlist_add_head(&new[--cnt]->node, &obj_pool);
+ debug_objects_allocated++;
+ obj_pool_free++;
+ }
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
}
@@ -163,36 +200,81 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
}
/*
+ * Allocate a new object from the hlist
+ */
+static struct debug_obj *__alloc_object(struct hlist_head *list)
+{
+ struct debug_obj *obj = NULL;
+
+ if (list->first) {
+ obj = hlist_entry(list->first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ }
+
+ return obj;
+}
+
+/*
* Allocate a new object. If the pool is empty, switch off the debugger.
* Must be called with interrupts disabled.
*/
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
- struct debug_obj *obj = NULL;
+ struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
+ struct debug_obj *obj;
- raw_spin_lock(&pool_lock);
- if (obj_pool.first) {
- obj = hlist_entry(obj_pool.first, typeof(*obj), node);
+ if (likely(obj_cache)) {
+ obj = __alloc_object(&percpu_pool->free_objs);
+ if (obj) {
+ percpu_pool->obj_free--;
+ goto init_obj;
+ }
+ }
- obj->object = addr;
- obj->descr = descr;
- obj->state = ODEBUG_STATE_NONE;
- obj->astate = 0;
- hlist_del(&obj->node);
+ raw_spin_lock(&pool_lock);
+ obj = __alloc_object(&obj_pool);
+ if (obj) {
+ obj_pool_used++;
+ obj_pool_free--;
- hlist_add_head(&obj->node, &b->list);
+ /*
+ * Looking ahead, allocate one batch of debug objects and
+ * put them into the percpu free pool.
+ */
+ if (likely(obj_cache)) {
+ int i;
+
+ for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
+ struct debug_obj *obj2;
+
+ obj2 = __alloc_object(&obj_pool);
+ if (!obj2)
+ break;
+ hlist_add_head(&obj2->node,
+ &percpu_pool->free_objs);
+ percpu_pool->obj_free++;
+ obj_pool_used++;
+ obj_pool_free--;
+ }
+ }
- obj_pool_used++;
if (obj_pool_used > obj_pool_max_used)
obj_pool_max_used = obj_pool_used;
- obj_pool_free--;
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
raw_spin_unlock(&pool_lock);
+init_obj:
+ if (obj) {
+ obj->object = addr;
+ obj->descr = descr;
+ obj->state = ODEBUG_STATE_NONE;
+ obj->astate = 0;
+ hlist_add_head(&obj->node, &b->list);
+ }
return obj;
}
@@ -209,13 +291,19 @@ static void free_obj_work(struct work_struct *work)
unsigned long flags;
HLIST_HEAD(tofree);
+ WRITE_ONCE(obj_freeing, false);
if (!raw_spin_trylock_irqsave(&pool_lock, flags))
return;
+ if (obj_pool_free >= debug_objects_pool_size)
+ goto free_objs;
+
/*
* The objs on the pool list might be allocated before the work is
* run, so recheck if pool list it full or not, if not fill pool
- * list from the global free list
+ * list from the global free list. As it is likely that a workload
+ * may be gearing up to use more and more objects, don't free any
+ * of them until the next round.
*/
while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
@@ -224,7 +312,10 @@ static void free_obj_work(struct work_struct *work)
obj_pool_free++;
obj_nr_tofree--;
}
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+ return;
+free_objs:
/*
* Pool list is already full and there are still objs on the free
* list. Move remaining free objs to a temporary list to free the
@@ -243,24 +334,86 @@ static void free_obj_work(struct work_struct *work)
}
}
-static bool __free_object(struct debug_obj *obj)
+static void __free_object(struct debug_obj *obj)
{
+ struct debug_obj *objs[ODEBUG_BATCH_SIZE];
+ struct debug_percpu_free *percpu_pool;
+ int lookahead_count = 0;
unsigned long flags;
bool work;
- raw_spin_lock_irqsave(&pool_lock, flags);
- work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
+ local_irq_save(flags);
+ if (!obj_cache)
+ goto free_to_obj_pool;
+
+ /*
+ * Try to free it into the percpu pool first.
+ */
+ percpu_pool = this_cpu_ptr(&percpu_obj_pool);
+ if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
+ hlist_add_head(&obj->node, &percpu_pool->free_objs);
+ percpu_pool->obj_free++;
+ local_irq_restore(flags);
+ return;
+ }
+
+ /*
+ * As the percpu pool is full, look ahead and pull out a batch
+ * of objects from the percpu pool and free them as well.
+ */
+ for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
+ objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
+ if (!objs[lookahead_count])
+ break;
+ percpu_pool->obj_free--;
+ }
+
+free_to_obj_pool:
+ raw_spin_lock(&pool_lock);
+ work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
+ (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
obj_pool_used--;
if (work) {
obj_nr_tofree++;
hlist_add_head(&obj->node, &obj_to_free);
+ if (lookahead_count) {
+ obj_nr_tofree += lookahead_count;
+ obj_pool_used -= lookahead_count;
+ while (lookahead_count) {
+ hlist_add_head(&objs[--lookahead_count]->node,
+ &obj_to_free);
+ }
+ }
+
+ if ((obj_pool_free > debug_objects_pool_size) &&
+ (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
+ int i;
+
+ /*
+ * Free one more batch of objects from obj_pool.
+ */
+ for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
+ obj = __alloc_object(&obj_pool);
+ hlist_add_head(&obj->node, &obj_to_free);
+ obj_pool_free--;
+ obj_nr_tofree++;
+ }
+ }
} else {
obj_pool_free++;
hlist_add_head(&obj->node, &obj_pool);
+ if (lookahead_count) {
+ obj_pool_free += lookahead_count;
+ obj_pool_used -= lookahead_count;
+ while (lookahead_count) {
+ hlist_add_head(&objs[--lookahead_count]->node,
+ &obj_pool);
+ }
+ }
}
- raw_spin_unlock_irqrestore(&pool_lock, flags);
- return work;
+ raw_spin_unlock(&pool_lock);
+ local_irq_restore(flags);
}
/*
@@ -269,8 +422,11 @@ static bool __free_object(struct debug_obj *obj)
*/
static void free_object(struct debug_obj *obj)
{
- if (__free_object(obj))
- schedule_work(&debug_obj_work);
+ __free_object(obj);
+ if (!obj_freeing && obj_nr_tofree) {
+ WRITE_ONCE(obj_freeing, true);
+ schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+ }
}
/*
@@ -372,6 +528,7 @@ static void
__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
{
enum debug_obj_state state;
+ bool check_stack = false;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
@@ -391,7 +548,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
debug_objects_oom();
return;
}
- debug_object_is_on_stack(addr, onstack);
+ check_stack = true;
}
switch (obj->state) {
@@ -402,20 +559,23 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
break;
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "init");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "init");
debug_object_fixup(descr->fixup_init, addr, state);
return;
case ODEBUG_STATE_DESTROYED:
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_print_object(obj, "init");
- break;
+ return;
default:
break;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (check_stack)
+ debug_object_is_on_stack(addr, onstack);
}
/**
@@ -473,6 +633,8 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
obj = lookup_object(addr, db);
if (obj) {
+ bool print_object = false;
+
switch (obj->state) {
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
@@ -481,14 +643,14 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
break;
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "activate");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "activate");
ret = debug_object_fixup(descr->fixup_activate, addr, state);
return ret ? 0 : -EINVAL;
case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "activate");
+ print_object = true;
ret = -EINVAL;
break;
default:
@@ -496,10 +658,13 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
break;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (print_object)
+ debug_print_object(obj, "activate");
return ret;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+
/*
* We are here when a static object is activated. We
* let the type specific code confirm whether this is
@@ -531,6 +696,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
+ bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -548,24 +714,27 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
if (!obj->astate)
obj->state = ODEBUG_STATE_INACTIVE;
else
- debug_print_object(obj, "deactivate");
+ print_object = true;
break;
case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "deactivate");
+ print_object = true;
break;
default:
break;
}
- } else {
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (!obj) {
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
debug_print_object(&o, "deactivate");
+ } else if (print_object) {
+ debug_print_object(obj, "deactivate");
}
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
}
EXPORT_SYMBOL_GPL(debug_object_deactivate);
@@ -580,6 +749,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
+ bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -599,20 +769,22 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
obj->state = ODEBUG_STATE_DESTROYED;
break;
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "destroy");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "destroy");
debug_object_fixup(descr->fixup_destroy, addr, state);
return;
case ODEBUG_STATE_DESTROYED:
- debug_print_object(obj, "destroy");
+ print_object = true;
break;
default:
break;
}
out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (print_object)
+ debug_print_object(obj, "destroy");
}
EXPORT_SYMBOL_GPL(debug_object_destroy);
@@ -641,9 +813,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "free");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "free");
debug_object_fixup(descr->fixup_free, addr, state);
return;
default:
@@ -716,6 +888,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
+ bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -731,22 +904,25 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
if (obj->astate == expect)
obj->astate = next;
else
- debug_print_object(obj, "active_state");
+ print_object = true;
break;
default:
- debug_print_object(obj, "active_state");
+ print_object = true;
break;
}
- } else {
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (!obj) {
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
debug_print_object(&o, "active_state");
+ } else if (print_object) {
+ debug_print_object(obj, "active_state");
}
-
- raw_spin_unlock_irqrestore(&db->lock, flags);
}
EXPORT_SYMBOL_GPL(debug_object_active_state);
@@ -760,7 +936,6 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
struct hlist_node *tmp;
struct debug_obj *obj;
int cnt, objs_checked = 0;
- bool work = false;
saddr = (unsigned long) address;
eaddr = saddr + size;
@@ -782,16 +957,16 @@ repeat:
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- debug_print_object(obj, "free");
descr = obj->descr;
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "free");
debug_object_fixup(descr->fixup_free,
(void *) oaddr, state);
goto repeat;
default:
hlist_del(&obj->node);
- work |= __free_object(obj);
+ __free_object(obj);
break;
}
}
@@ -807,8 +982,10 @@ repeat:
debug_objects_maxchecked = objs_checked;
/* Schedule work to actually kmem_cache_free() objects */
- if (work)
- schedule_work(&debug_obj_work);
+ if (!obj_freeing && obj_nr_tofree) {
+ WRITE_ONCE(obj_freeing, true);
+ schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+ }
}
void debug_check_no_obj_freed(const void *address, unsigned long size)
@@ -822,13 +999,19 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
static int debug_stats_show(struct seq_file *m, void *v)
{
+ int cpu, obj_percpu_free = 0;
+
+ for_each_possible_cpu(cpu)
+ obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
+
seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
seq_printf(m, "warnings :%d\n", debug_objects_warnings);
seq_printf(m, "fixups :%d\n", debug_objects_fixups);
- seq_printf(m, "pool_free :%d\n", obj_pool_free);
+ seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free);
+ seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
- seq_printf(m, "pool_used :%d\n", obj_pool_used);
+ seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
@@ -850,26 +1033,16 @@ static const struct file_operations debug_stats_fops = {
static int __init debug_objects_init_debugfs(void)
{
- struct dentry *dbgdir, *dbgstats;
+ struct dentry *dbgdir;
if (!debug_objects_enabled)
return 0;
dbgdir = debugfs_create_dir("debug_objects", NULL);
- if (!dbgdir)
- return -ENOMEM;
- dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
- &debug_stats_fops);
- if (!dbgstats)
- goto err;
+ debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
return 0;
-
-err:
- debugfs_remove(dbgdir);
-
- return -ENOMEM;
}
__initcall(debug_objects_init_debugfs);
@@ -1175,9 +1348,20 @@ free:
*/
void __init debug_objects_mem_init(void)
{
+ int cpu, extras;
+
if (!debug_objects_enabled)
return;
+ /*
+ * Initialize the percpu object pools
+ *
+ * Initialization is not strictly necessary, but was done for
+ * completeness.
+ */
+ for_each_possible_cpu(cpu)
+ INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
+
obj_cache = kmem_cache_create("debug_objects_cache",
sizeof (struct debug_obj), 0,
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
@@ -1194,6 +1378,7 @@ void __init debug_objects_mem_init(void)
* Increase the thresholds for allocating and freeing objects
* according to the number of possible CPUs available in the system.
*/
- debug_objects_pool_size += num_possible_cpus() * 32;
- debug_objects_pool_min_level += num_possible_cpus() * 4;
+ extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
+ debug_objects_pool_size += extras;
+ debug_objects_pool_min_level += extras;
}
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 1b0baf3008ea..c0cfcfd486be 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Wrapper for decompressing LZ4-compressed kernel, initramfs, and initrd
*
* Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifdef STATIC
diff --git a/lib/devres.c b/lib/devres.c
index 69bed2f38306..6a0e9bd6524a 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -131,7 +131,8 @@ EXPORT_SYMBOL(devm_iounmap);
* if (IS_ERR(base))
* return PTR_ERR(base);
*/
-void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res)
{
resource_size_t size;
void __iomem *dest_ptr;
diff --git a/lib/digsig.c b/lib/digsig.c
index 3cf89c775ab2..e0627c3e53b2 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -218,7 +218,7 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
/* search in specific keyring */
key_ref_t kref;
kref = keyring_search(make_key_ref(keyring, 1UL),
- &key_type_user, name);
+ &key_type_user, name, true);
if (IS_ERR(kref))
key = ERR_CAST(kref);
else
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
new file mode 100644
index 000000000000..1d6858a108cb
--- /dev/null
+++ b/lib/dim/Makefile
@@ -0,0 +1,7 @@
+#
+# DIM Dynamic Interrupt Moderation library
+#
+
+obj-$(CONFIG_DIMLIB) += dim.o
+
+dim-y := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
new file mode 100644
index 000000000000..439d641ec796
--- /dev/null
+++ b/lib/dim/dim.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+bool dim_on_top(struct dim *dim)
+{
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ return true;
+ case DIM_GOING_RIGHT:
+ return (dim->steps_left > 1) && (dim->steps_right == 1);
+ default: /* DIM_GOING_LEFT */
+ return (dim->steps_right > 1) && (dim->steps_left == 1);
+ }
+}
+EXPORT_SYMBOL(dim_on_top);
+
+void dim_turn(struct dim *dim)
+{
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ break;
+ case DIM_GOING_RIGHT:
+ dim->tune_state = DIM_GOING_LEFT;
+ dim->steps_left = 0;
+ break;
+ case DIM_GOING_LEFT:
+ dim->tune_state = DIM_GOING_RIGHT;
+ dim->steps_right = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(dim_turn);
+
+void dim_park_on_top(struct dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tired = 0;
+ dim->tune_state = DIM_PARKING_ON_TOP;
+}
+EXPORT_SYMBOL(dim_park_on_top);
+
+void dim_park_tired(struct dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tune_state = DIM_PARKING_TIRED;
+}
+EXPORT_SYMBOL(dim_park_tired);
+
+void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ struct dim_stats *curr_stats)
+{
+ /* u32 holds up to 71 minutes, should be enough */
+ u32 delta_us = ktime_us_delta(end->time, start->time);
+ u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+ u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+ start->byte_ctr);
+ u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr,
+ start->comp_ctr);
+
+ if (!delta_us)
+ return;
+
+ curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+ curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC,
+ delta_us);
+ curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
+ if (curr_stats->epms != 0)
+ curr_stats->cpe_ratio =
+ (curr_stats->cpms * 100) / curr_stats->epms;
+ else
+ curr_stats->cpe_ratio = 0;
+
+}
+EXPORT_SYMBOL(dim_calc_stats);
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
new file mode 100644
index 000000000000..5bcc902c5388
--- /dev/null
+++ b/lib/dim/net_dim.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+struct dim_cq_moder
+net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
+{
+ struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
+
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+EXPORT_SYMBOL(net_dim_get_rx_moderation);
+
+struct dim_cq_moder
+net_dim_get_def_rx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
+}
+EXPORT_SYMBOL(net_dim_get_def_rx_moderation);
+
+struct dim_cq_moder
+net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
+{
+ struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
+
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+EXPORT_SYMBOL(net_dim_get_tx_moderation);
+
+struct dim_cq_moder
+net_dim_get_def_tx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
+}
+EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
+
+static int net_dim_step(struct dim *dim)
+{
+ if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
+ return DIM_TOO_TIRED;
+
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ break;
+ case DIM_GOING_RIGHT:
+ if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
+ return DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ break;
+ case DIM_GOING_LEFT:
+ if (dim->profile_ix == 0)
+ return DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ break;
+ }
+
+ dim->tired++;
+ return DIM_STEPPED;
+}
+
+static void net_dim_exit_parking(struct dim *dim)
+{
+ dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT;
+ net_dim_step(dim);
+}
+
+static int net_dim_stats_compare(struct dim_stats *curr,
+ struct dim_stats *prev)
+{
+ if (!prev->bpms)
+ return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+ return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (!prev->ppms)
+ return curr->ppms ? DIM_STATS_BETTER :
+ DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+ return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (!prev->epms)
+ return DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+ return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ return DIM_STATS_SAME;
+}
+
+static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
+{
+ int prev_state = dim->tune_state;
+ int prev_ix = dim->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ stats_res = net_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+ if (stats_res != DIM_STATS_SAME)
+ net_dim_exit_parking(dim);
+ break;
+
+ case DIM_PARKING_TIRED:
+ dim->tired--;
+ if (!dim->tired)
+ net_dim_exit_parking(dim);
+ break;
+
+ case DIM_GOING_RIGHT:
+ case DIM_GOING_LEFT:
+ stats_res = net_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+ if (stats_res != DIM_STATS_BETTER)
+ dim_turn(dim);
+
+ if (dim_on_top(dim)) {
+ dim_park_on_top(dim);
+ break;
+ }
+
+ step_res = net_dim_step(dim);
+ switch (step_res) {
+ case DIM_ON_EDGE:
+ dim_park_on_top(dim);
+ break;
+ case DIM_TOO_TIRED:
+ dim_park_tired(dim);
+ break;
+ }
+
+ break;
+ }
+
+ if (prev_state != DIM_PARKING_ON_TOP ||
+ dim->tune_state != DIM_PARKING_ON_TOP)
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+void net_dim(struct dim *dim, struct dim_sample end_sample)
+{
+ struct dim_stats curr_stats;
+ u16 nevents;
+
+ switch (dim->state) {
+ case DIM_MEASURE_IN_PROGRESS:
+ nevents = BIT_GAP(BITS_PER_TYPE(u16),
+ end_sample.event_ctr,
+ dim->start_sample.event_ctr);
+ if (nevents < DIM_NEVENTS)
+ break;
+ dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
+ if (net_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ /* fall through */
+ case DIM_START_MEASURE:
+ dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
+ end_sample.byte_ctr, &dim->start_sample);
+ dim->state = DIM_MEASURE_IN_PROGRESS;
+ break;
+ case DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+EXPORT_SYMBOL(net_dim);
diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c
new file mode 100644
index 000000000000..f7e26c7b4749
--- /dev/null
+++ b/lib/dim/rdma_dim.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+static int rdma_dim_step(struct dim *dim)
+{
+ if (dim->tune_state == DIM_GOING_RIGHT) {
+ if (dim->profile_ix == (RDMA_DIM_PARAMS_NUM_PROFILES - 1))
+ return DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ }
+ if (dim->tune_state == DIM_GOING_LEFT) {
+ if (dim->profile_ix == 0)
+ return DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ }
+
+ return DIM_STEPPED;
+}
+
+static int rdma_dim_stats_compare(struct dim_stats *curr,
+ struct dim_stats *prev)
+{
+ /* first stat */
+ if (!prev->cpms)
+ return DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->cpms, prev->cpms))
+ return (curr->cpms > prev->cpms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (IS_SIGNIFICANT_DIFF(curr->cpe_ratio, prev->cpe_ratio))
+ return (curr->cpe_ratio > prev->cpe_ratio) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ return DIM_STATS_SAME;
+}
+
+static bool rdma_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
+{
+ int prev_ix = dim->profile_ix;
+ u8 state = dim->tune_state;
+ int stats_res;
+ int step_res;
+
+ if (state != DIM_PARKING_ON_TOP && state != DIM_PARKING_TIRED) {
+ stats_res = rdma_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+
+ switch (stats_res) {
+ case DIM_STATS_SAME:
+ if (curr_stats->cpe_ratio <= 50 * prev_ix)
+ dim->profile_ix = 0;
+ break;
+ case DIM_STATS_WORSE:
+ dim_turn(dim);
+ /* fall through */
+ case DIM_STATS_BETTER:
+ step_res = rdma_dim_step(dim);
+ if (step_res == DIM_ON_EDGE)
+ dim_turn(dim);
+ break;
+ }
+ }
+
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+void rdma_dim(struct dim *dim, u64 completions)
+{
+ struct dim_sample *curr_sample = &dim->measuring_sample;
+ struct dim_stats curr_stats;
+ u32 nevents;
+
+ dim_update_sample_with_comps(curr_sample->event_ctr + 1, 0, 0,
+ curr_sample->comp_ctr + completions,
+ &dim->measuring_sample);
+
+ switch (dim->state) {
+ case DIM_MEASURE_IN_PROGRESS:
+ nevents = curr_sample->event_ctr - dim->start_sample.event_ctr;
+ if (nevents < DIM_NEVENTS)
+ break;
+ dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats);
+ if (rdma_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ /* fall through */
+ case DIM_START_MEASURE:
+ dim->state = DIM_MEASURE_IN_PROGRESS;
+ dim_update_sample_with_comps(curr_sample->event_ctr, 0, 0,
+ curr_sample->comp_ctr,
+ &dim->start_sample);
+ break;
+ case DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+EXPORT_SYMBOL(rdma_dim);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 8a16c2d498e9..c60409138e13 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -993,20 +993,14 @@ static __initdata int ddebug_init_success;
static int __init dynamic_debug_init_debugfs(void)
{
- struct dentry *dir, *file;
+ struct dentry *dir;
if (!ddebug_init_success)
return -ENODEV;
dir = debugfs_create_dir("dynamic_debug", NULL);
- if (!dir)
- return -ENOMEM;
- file = debugfs_create_file("control", 0644, dir, NULL,
- &ddebug_proc_fops);
- if (!file) {
- debugfs_remove(dir);
- return -ENOMEM;
- }
+ debugfs_create_file("control", 0644, dir, NULL, &ddebug_proc_fops);
+
return 0;
}
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 3cb21b2bf088..8186ca84910b 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -166,10 +166,10 @@ static int debugfs_ul_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
-static struct dentry *debugfs_create_ul(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value)
+static void debugfs_create_ul(const char *name, umode_t mode,
+ struct dentry *parent, unsigned long *value)
{
- return debugfs_create_file(name, mode, parent, value, &fops_ul);
+ debugfs_create_file(name, mode, parent, value, &fops_ul);
}
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
@@ -185,12 +185,11 @@ static int debugfs_stacktrace_depth_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
debugfs_stacktrace_depth_set, "%llu\n");
-static struct dentry *debugfs_create_stacktrace_depth(
- const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value)
+static void debugfs_create_stacktrace_depth(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
{
- return debugfs_create_file(name, mode, parent, value,
- &fops_stacktrace_depth);
+ debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth);
}
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
@@ -202,51 +201,31 @@ struct dentry *fault_create_debugfs_attr(const char *name,
struct dentry *dir;
dir = debugfs_create_dir(name, parent);
- if (!dir)
- return ERR_PTR(-ENOMEM);
-
- if (!debugfs_create_ul("probability", mode, dir, &attr->probability))
- goto fail;
- if (!debugfs_create_ul("interval", mode, dir, &attr->interval))
- goto fail;
- if (!debugfs_create_atomic_t("times", mode, dir, &attr->times))
- goto fail;
- if (!debugfs_create_atomic_t("space", mode, dir, &attr->space))
- goto fail;
- if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose))
- goto fail;
- if (!debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
- &attr->ratelimit_state.interval))
- goto fail;
- if (!debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
- &attr->ratelimit_state.burst))
- goto fail;
- if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
- goto fail;
+ if (IS_ERR(dir))
+ return dir;
+
+ debugfs_create_ul("probability", mode, dir, &attr->probability);
+ debugfs_create_ul("interval", mode, dir, &attr->interval);
+ debugfs_create_atomic_t("times", mode, dir, &attr->times);
+ debugfs_create_atomic_t("space", mode, dir, &attr->space);
+ debugfs_create_ul("verbose", mode, dir, &attr->verbose);
+ debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
+ &attr->ratelimit_state.interval);
+ debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
+ &attr->ratelimit_state.burst);
+ debugfs_create_bool("task-filter", mode, dir, &attr->task_filter);
#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
-
- if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
- &attr->stacktrace_depth))
- goto fail;
- if (!debugfs_create_ul("require-start", mode, dir,
- &attr->require_start))
- goto fail;
- if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end))
- goto fail;
- if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start))
- goto fail;
- if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end))
- goto fail;
-
+ debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
+ &attr->stacktrace_depth);
+ debugfs_create_ul("require-start", mode, dir, &attr->require_start);
+ debugfs_create_ul("require-end", mode, dir, &attr->require_end);
+ debugfs_create_ul("reject-start", mode, dir, &attr->reject_start);
+ debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
attr->dname = dget(dir);
return dir;
-fail:
- debugfs_remove_recursive(dir);
-
- return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 9969358a7af5..e7258d8c252b 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -20,56 +20,42 @@
#endif
#include <linux/font.h>
-#define NO_FONTS
-
static const struct font_desc *fonts[] = {
#ifdef CONFIG_FONT_8x8
-#undef NO_FONTS
- &font_vga_8x8,
+ &font_vga_8x8,
#endif
#ifdef CONFIG_FONT_8x16
-#undef NO_FONTS
- &font_vga_8x16,
+ &font_vga_8x16,
#endif
#ifdef CONFIG_FONT_6x11
-#undef NO_FONTS
- &font_vga_6x11,
+ &font_vga_6x11,
#endif
#ifdef CONFIG_FONT_7x14
-#undef NO_FONTS
- &font_7x14,
+ &font_7x14,
#endif
#ifdef CONFIG_FONT_SUN8x16
-#undef NO_FONTS
- &font_sun_8x16,
+ &font_sun_8x16,
#endif
#ifdef CONFIG_FONT_SUN12x22
-#undef NO_FONTS
- &font_sun_12x22,
+ &font_sun_12x22,
#endif
#ifdef CONFIG_FONT_10x18
-#undef NO_FONTS
- &font_10x18,
+ &font_10x18,
#endif
#ifdef CONFIG_FONT_ACORN_8x8
-#undef NO_FONTS
- &font_acorn_8x8,
+ &font_acorn_8x8,
#endif
#ifdef CONFIG_FONT_PEARL_8x8
-#undef NO_FONTS
- &font_pearl_8x8,
+ &font_pearl_8x8,
#endif
#ifdef CONFIG_FONT_MINI_4x6
-#undef NO_FONTS
- &font_mini_4x6,
+ &font_mini_4x6,
#endif
#ifdef CONFIG_FONT_6x10
-#undef NO_FONTS
- &font_6x10,
+ &font_6x10,
#endif
#ifdef CONFIG_FONT_TER16x32
-#undef NO_FONTS
- &font_ter_16x32,
+ &font_ter_16x32,
#endif
};
@@ -90,16 +76,17 @@ static const struct font_desc *fonts[] = {
* specified font.
*
*/
-
const struct font_desc *find_font(const char *name)
{
- unsigned int i;
+ unsigned int i;
- for (i = 0; i < num_fonts; i++)
- if (!strcmp(fonts[i]->name, name))
- return fonts[i];
- return NULL;
+ BUILD_BUG_ON(!num_fonts);
+ for (i = 0; i < num_fonts; i++)
+ if (!strcmp(fonts[i]->name, name))
+ return fonts[i];
+ return NULL;
}
+EXPORT_SYMBOL(find_font);
/**
@@ -116,44 +103,46 @@ const struct font_desc *find_font(const char *name)
* chosen font.
*
*/
-
const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
u32 font_h)
{
- int i, c, cc;
- const struct font_desc *f, *g;
-
- g = NULL;
- cc = -10000;
- for(i=0; i<num_fonts; i++) {
- f = fonts[i];
- c = f->pref;
+ int i, c, cc, res;
+ const struct font_desc *f, *g;
+
+ g = NULL;
+ cc = -10000;
+ for (i = 0; i < num_fonts; i++) {
+ f = fonts[i];
+ c = f->pref;
#if defined(__mc68000__)
#ifdef CONFIG_FONT_PEARL_8x8
- if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
- c = 100;
+ if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
+ c = 100;
#endif
#ifdef CONFIG_FONT_6x11
- if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
- c = 100;
+ if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
+ c = 100;
#endif
#endif
- if ((yres < 400) == (f->height <= 8))
- c += 1000;
+ if ((yres < 400) == (f->height <= 8))
+ c += 1000;
+
+ /* prefer a bigger font for high resolution */
+ res = (xres / f->width) * (yres / f->height) / 1000;
+ if (res > 20)
+ c += 20 - res;
- if ((font_w & (1 << (f->width - 1))) &&
- (font_h & (1 << (f->height - 1))))
- c += 1000;
+ if ((font_w & (1 << (f->width - 1))) &&
+ (font_h & (1 << (f->height - 1))))
+ c += 1000;
- if (c > cc) {
- cc = c;
- g = f;
+ if (c > cc) {
+ cc = c;
+ g = f;
+ }
}
- }
- return g;
+ return g;
}
-
-EXPORT_SYMBOL(find_font);
EXPORT_SYMBOL(get_default_font);
MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 770c769d7cb7..9fc31292cfa1 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Basic general purpose allocator for managing special purpose
* memory, for example, memory that is not managed by the regular
@@ -23,9 +24,6 @@
* CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
*
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/slab.h>
@@ -329,21 +327,45 @@ EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
* gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
- * @dma: dma-view physical address return value. Use NULL if unneeded.
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
*
* Allocate the requested number of bytes from the specified pool.
* Uses the pool allocation function (with first-fit algorithm by default).
* Can not be used in NMI handler on architectures without
* NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
*/
void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
{
+ return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc);
+
+/**
+ * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
+ * usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of bytes from the specified pool. Uses the
+ * given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
unsigned long vaddr;
if (!pool)
return NULL;
- vaddr = gen_pool_alloc(pool, size);
+ vaddr = gen_pool_alloc_algo(pool, size, algo, data);
if (!vaddr)
return NULL;
@@ -352,7 +374,102 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
return (void *)vaddr;
}
-EXPORT_SYMBOL(gen_pool_dma_alloc);
+EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
+
+/**
+ * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
+ * usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number bytes from the specified pool, with the given
+ * alignment restriction. Can not be used in NMI handler on architectures
+ * without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_alloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc_align);
+
+/**
+ * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
+ * DMA usage
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
+{
+ return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc);
+
+/**
+ * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
+ * DMA usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool. Uses
+ * the given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
+ void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
+
+ if (vaddr)
+ memset(vaddr, 0, size);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
+
+/**
+ * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
+ * DMA usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool,
+ * with the given alignment restriction. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_zalloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
/**
* gen_pool_free - free allocated special memory back to the pool
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 81b70ed37209..b1d55b669ae2 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/hexdump.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#include <linux/types.h>
diff --git a/lib/idr.c b/lib/idr.c
index c34e256d2f01..66a374892482 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -228,11 +228,21 @@ void *idr_get_next(struct idr *idr, int *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
+ void *entry = NULL;
unsigned long base = idr->idr_base;
unsigned long id = *nextid;
id = (id < base) ? 0 : id - base;
- slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
+ radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
+ entry = rcu_dereference_raw(*slot);
+ if (!entry)
+ continue;
+ if (!xa_is_internal(entry))
+ break;
+ if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
+ break;
+ slot = radix_tree_iter_retry(&iter);
+ }
if (!slot)
return NULL;
id = iter.index + base;
@@ -241,7 +251,7 @@ void *idr_get_next(struct idr *idr, int *nextid)
return NULL;
*nextid = id;
- return rcu_dereference_raw(*slot);
+ return entry;
}
EXPORT_SYMBOL(idr_get_next);
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index b8f1d6cbb200..5de7c04e05ef 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2006 PathScale, Inc. All Rights Reserved.
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/export.h>
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 063213685563..0a2ffadc6d71 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -30,6 +30,8 @@ early_param("nohugeiomap", set_nohugeiomap);
void __init ioremap_huge_init(void)
{
if (!ioremap_huge_disabled) {
+ if (arch_ioremap_p4d_supported())
+ ioremap_p4d_capable = 1;
if (arch_ioremap_pud_supported())
ioremap_pud_capable = 1;
if (arch_ioremap_pmd_supported())
@@ -86,6 +88,9 @@ static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
if ((end - addr) != PMD_SIZE)
return 0;
+ if (!IS_ALIGNED(addr, PMD_SIZE))
+ return 0;
+
if (!IS_ALIGNED(phys_addr, PMD_SIZE))
return 0;
@@ -126,6 +131,9 @@ static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
if ((end - addr) != PUD_SIZE)
return 0;
+ if (!IS_ALIGNED(addr, PUD_SIZE))
+ return 0;
+
if (!IS_ALIGNED(phys_addr, PUD_SIZE))
return 0;
@@ -166,6 +174,9 @@ static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
if ((end - addr) != P4D_SIZE)
return 0;
+ if (!IS_ALIGNED(addr, P4D_SIZE))
+ return 0;
+
if (!IS_ALIGNED(phys_addr, P4D_SIZE))
return 0;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f99c41d4eb54..f1e0569b4539 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1634,9 +1634,9 @@ EXPORT_SYMBOL(dup_iter);
* on-stack array was used or not (and regardless of whether this function
* returns an error or not).
*
- * Return: 0 on success or negative error code on error.
+ * Return: Negative error code on error, bytes imported on success
*/
-int import_iovec(int type, const struct iovec __user * uvector,
+ssize_t import_iovec(int type, const struct iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
struct iovec **iov, struct iov_iter *i)
{
@@ -1652,16 +1652,17 @@ int import_iovec(int type, const struct iovec __user * uvector,
}
iov_iter_init(i, type, p, nr_segs, n);
*iov = p == *iov ? NULL : p;
- return 0;
+ return n;
}
EXPORT_SYMBOL(import_iovec);
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i)
+ssize_t compat_import_iovec(int type,
+ const struct compat_iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i)
{
ssize_t n;
struct iovec *p;
@@ -1675,7 +1676,7 @@ int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
}
iov_iter_init(i, type, p, nr_segs, n);
*iov = p == *iov ? NULL : p;
- return 0;
+ return n;
}
#endif
diff --git a/lib/klist.c b/lib/klist.c
index f6b547812fe3..332a4fbf18ff 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* klist.c - Routines for manipulating klists.
*
* Copyright (C) 2005 Patrick Mochel
*
- * This file is released under the GPL v2.
- *
* This klist interface provides a couple of structures that wrap around
* struct list_head to provide explicit list "head" (struct klist) and list
* "node" (struct klist_node) objects. For struct klist, a spinlock is
diff --git a/lib/kobject.c b/lib/kobject.c
index f2ccdbac8ed9..83198cb37d8d 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -498,8 +498,10 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
- if (!kobj->parent)
+ if (!kobj->parent) {
+ kobject_put(kobj);
return -EINVAL;
+ }
devpath = kobject_get_path(kobj, GFP_KERNEL);
if (!devpath) {
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 712ed1f4eb64..52f0c258c895 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -157,9 +157,11 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
*
* The number of pending lists of size 2^k is determined by the
* state of bit k of "count" plus two extra pieces of information:
+ *
* - The state of bit k-1 (when k == 0, consider bit -1 always set), and
* - Whether the higher-order bits are zero or non-zero (i.e.
* is count >= 2^(k+1)).
+ *
* There are six states we distinguish. "x" represents some arbitrary
* bits, and "y" represents some arbitrary non-zero bits:
* 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 08c60d10747f..3bb6260d8f42 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -397,8 +397,8 @@ do { \
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addl %5,%1\n" \
"adcl %3,%0" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "%0" ((USItype)(ah)), \
"g" ((USItype)(bh)), \
"%1" ((USItype)(al)), \
@@ -406,22 +406,22 @@ do { \
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
__asm__ ("subl %5,%1\n" \
"sbbl %3,%0" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
+ : "=r" (sh), \
+ "=&r" (sl) \
: "0" ((USItype)(ah)), \
"g" ((USItype)(bh)), \
"1" ((USItype)(al)), \
"g" ((USItype)(bl)))
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("mull %3" \
- : "=a" ((USItype)(w0)), \
- "=d" ((USItype)(w1)) \
+ : "=a" (w0), \
+ "=d" (w1) \
: "%0" ((USItype)(u)), \
"rm" ((USItype)(v)))
#define udiv_qrnnd(q, r, n1, n0, d) \
__asm__ ("divl %4" \
- : "=a" ((USItype)(q)), \
- "=d" ((USItype)(r)) \
+ : "=a" (q), \
+ "=d" (r) \
: "0" ((USItype)(n0)), \
"1" ((USItype)(n1)), \
"rm" ((USItype)(d)))
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 82b19e4f1189..2fd7a46d55ec 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -24,6 +24,7 @@
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
{
mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL;
+ struct karatsuba_ctx karactx = {};
mpi_ptr_t xp_marker = NULL;
mpi_ptr_t tspace = NULL;
mpi_ptr_t rp, ep, mp, bp;
@@ -150,13 +151,11 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
int c;
mpi_limb_t e;
mpi_limb_t carry_limb;
- struct karatsuba_ctx karactx;
xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1));
if (!xp)
goto enomem;
- memset(&karactx, 0, sizeof karactx);
negative_result = (ep[0] & 1) && base->sign;
i = esize - 1;
@@ -281,8 +280,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
if (mod_shift_cnt)
mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
MPN_NORMALIZE(rp, rsize);
-
- mpihelp_release_karatsuba_ctx(&karactx);
}
if (negative_result && rsize) {
@@ -299,6 +296,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
leave:
rc = 0;
enomem:
+ mpihelp_release_karatsuba_ctx(&karactx);
if (assign_rp)
mpi_assign_limb_space(res, rp, size);
if (mp_marker)
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 3d2ba7cf83f4..21016b32d313 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -59,33 +59,22 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
err_inject->nb.priority = priority;
dir = debugfs_create_dir(name, parent);
- if (!dir)
- return ERR_PTR(-ENOMEM);
actions_dir = debugfs_create_dir("actions", dir);
- if (!actions_dir)
- goto fail;
for (action = err_inject->actions; action->name; action++) {
struct dentry *action_dir;
action_dir = debugfs_create_dir(action->name, actions_dir);
- if (!action_dir)
- goto fail;
/*
* Create debugfs r/w file containing action->error. If
* notifier call chain is called with action->val, it will
* fail with the error code
*/
- if (!debugfs_create_errno("error", mode, action_dir,
- &action->error))
- goto fail;
+ debugfs_create_errno("error", mode, action_dir, &action->error);
}
return dir;
-fail:
- debugfs_remove_recursive(dir);
- return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(notifier_err_inject_init);
diff --git a/lib/objagg.c b/lib/objagg.c
index 576be22e86de..55621fb82e0a 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -605,12 +605,10 @@ const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
{
struct objagg_stats *objagg_stats;
struct objagg_obj *objagg_obj;
- size_t alloc_size;
int i;
- alloc_size = sizeof(*objagg_stats) +
- sizeof(objagg_stats->stats_info[0]) * objagg->obj_count;
- objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
+ objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
+ objagg->obj_count), GFP_KERNEL);
if (!objagg_stats)
return ERR_PTR(-ENOMEM);
diff --git a/lib/parser.c b/lib/parser.c
index dd70e5e6c9e2..f5b3e5d7a7f9 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -1,8 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/parser.c - simple parser for mount, etc. options.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/ctype.h>
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 071a76c7bac0..4f6c6ebbbbde 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -70,11 +70,14 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
return -ENOMEM;
ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
+ ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
- if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
+ if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
- else
+ ref->allow_reinit = true;
+ } else {
start_count += PERCPU_COUNT_BIAS;
+ }
if (flags & PERCPU_REF_INIT_DEAD)
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
@@ -120,6 +123,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
ref->confirm_switch = NULL;
wake_up_all(&percpu_ref_switch_waitq);
+ if (!ref->allow_reinit)
+ percpu_ref_exit(ref);
+
/* drop ref from percpu_ref_switch_to_atomic() */
percpu_ref_put(ref);
}
@@ -195,6 +201,9 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return;
+ if (WARN_ON_ONCE(!ref->allow_reinit))
+ return;
+
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index e723eacf7868..42695bc8d451 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -12,9 +12,6 @@ raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
hostprogs-y += mktables
-quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@
-
ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
@@ -26,7 +23,6 @@ CFLAGS_REMOVE_altivec1.o += -msoft-float
CFLAGS_REMOVE_altivec2.o += -msoft-float
CFLAGS_REMOVE_altivec4.o += -msoft-float
CFLAGS_REMOVE_altivec8.o += -msoft-float
-CFLAGS_REMOVE_altivec8.o += -msoft-float
CFLAGS_REMOVE_vpermxor1.o += -msoft-float
CFLAGS_REMOVE_vpermxor2.o += -msoft-float
CFLAGS_REMOVE_vpermxor4.o += -msoft-float
@@ -51,111 +47,39 @@ CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
endif
endif
-targets += int1.c
-$(obj)/int1.c: UNROLL := 1
-$(obj)/int1.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int2.c
-$(obj)/int2.c: UNROLL := 2
-$(obj)/int2.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int4.c
-$(obj)/int4.c: UNROLL := 4
-$(obj)/int4.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int8.c
-$(obj)/int8.c: UNROLL := 8
-$(obj)/int8.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
-targets += int16.c
-$(obj)/int16.c: UNROLL := 16
-$(obj)/int16.c: $(src)/int.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
+quiet_cmd_unroll = UNROLL $@
+ cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@
-targets += int32.c
-$(obj)/int32.c: UNROLL := 32
-$(obj)/int32.c: $(src)/int.uc $(src)/unroll.awk FORCE
+targets += int1.c int2.c int4.c int8.c int16.c int32.c
+$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_altivec1.o += $(altivec_flags)
-targets += altivec1.c
-$(obj)/altivec1.c: UNROLL := 1
-$(obj)/altivec1.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec2.o += $(altivec_flags)
-targets += altivec2.c
-$(obj)/altivec2.c: UNROLL := 2
-$(obj)/altivec2.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec4.o += $(altivec_flags)
-targets += altivec4.c
-$(obj)/altivec4.c: UNROLL := 4
-$(obj)/altivec4.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_altivec8.o += $(altivec_flags)
-targets += altivec8.c
-$(obj)/altivec8.c: UNROLL := 8
-$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
+targets += altivec1.c altivec2.c altivec4.c altivec8.c
+$(obj)/altivec%.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_vpermxor1.o += $(altivec_flags)
-targets += vpermxor1.c
-$(obj)/vpermxor1.c: UNROLL := 1
-$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor2.o += $(altivec_flags)
-targets += vpermxor2.c
-$(obj)/vpermxor2.c: UNROLL := 2
-$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor4.o += $(altivec_flags)
-targets += vpermxor4.c
-$(obj)/vpermxor4.c: UNROLL := 4
-$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_vpermxor8.o += $(altivec_flags)
-targets += vpermxor8.c
-$(obj)/vpermxor8.c: UNROLL := 8
-$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+targets += vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
+$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_neon1.o += $(NEON_FLAGS)
-targets += neon1.c
-$(obj)/neon1.c: UNROLL := 1
-$(obj)/neon1.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon2.o += $(NEON_FLAGS)
-targets += neon2.c
-$(obj)/neon2.c: UNROLL := 2
-$(obj)/neon2.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon4.o += $(NEON_FLAGS)
-targets += neon4.c
-$(obj)/neon4.c: UNROLL := 4
-$(obj)/neon4.c: $(src)/neon.uc $(src)/unroll.awk FORCE
- $(call if_changed,unroll)
-
CFLAGS_neon8.o += $(NEON_FLAGS)
-targets += neon8.c
-$(obj)/neon8.c: UNROLL := 8
-$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
+targets += neon1.c neon2.c neon4.c neon8.c
+$(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += s390vx8.c
-$(obj)/s390vx8.c: UNROLL := 8
-$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
+$(obj)/s390vx%.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
index 7076ef1ba3dd..0a2e76035ea9 100644
--- a/lib/raid6/neon.c
+++ b/lib/raid6/neon.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics
*
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/raid/pq.h>
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 914ebe98fc21..9e597e1f91a4 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -60,7 +60,7 @@ static inline void LOAD_DATA(int x, u8 *ptr)
typedef struct { u8 _[16 * $#]; } addrtype;
register addrtype *__ptr asm("1") = (addrtype *) ptr;
- asm volatile ("VLM %2,%3,0,%r1"
+ asm volatile ("VLM %2,%3,0,%1"
: : "m" (*__ptr), "a" (__ptr), "i" (x),
"i" (x + $# - 1));
}
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1ef6e25d031c..abc86c6a3177 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
- if (newleft)
- *leftmost = node;
-
while (true) {
/*
* Loop invariant: node is red.
@@ -437,38 +433,19 @@ static const struct rb_augment_callbacks dummy_callbacks = {
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- __rb_insert(node, root, false, NULL, dummy_rotate);
+ __rb_insert(node, root, dummy_rotate);
}
EXPORT_SYMBOL(rb_insert_color);
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root,
- NULL, &dummy_callbacks);
+ rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
EXPORT_SYMBOL(rb_erase);
-void rb_insert_color_cached(struct rb_node *node,
- struct rb_root_cached *root, bool leftmost)
-{
- __rb_insert(node, &root->rb_root, leftmost,
- &root->rb_leftmost, dummy_rotate);
-}
-EXPORT_SYMBOL(rb_insert_color_cached);
-
-void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
-{
- struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, &root->rb_root,
- &root->rb_leftmost, &dummy_callbacks);
- if (rebalance)
- ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
-}
-EXPORT_SYMBOL(rb_erase_cached);
-
/*
* Augmented rbtree manipulation functions.
*
@@ -477,10 +454,9 @@ EXPORT_SYMBOL(rb_erase_cached);
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- __rb_insert(node, root, newleft, leftmost, augment_rotate);
+ __rb_insert(node, root, augment_rotate);
}
EXPORT_SYMBOL(__rb_insert_augmented);
@@ -591,16 +567,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
}
EXPORT_SYMBOL(rb_replace_node);
-void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
- struct rb_root_cached *root)
-{
- rb_replace_node(victim, new, &root->rb_root);
-
- if (root->rb_leftmost == victim)
- root->rb_leftmost = new;
-}
-EXPORT_SYMBOL(rb_replace_node_cached);
-
void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
struct rb_root *root)
{
diff --git a/lib/reed_solomon/Makefile b/lib/reed_solomon/Makefile
index ba9d7a3329eb..5d4fa68f26cb 100644
--- a/lib/reed_solomon/Makefile
+++ b/lib/reed_solomon/Makefile
@@ -4,4 +4,4 @@
#
obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o
-
+obj-$(CONFIG_REED_SOLOMON_TEST) += test_rslib.o
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
index 1db74eb098d0..805de84ae83d 100644
--- a/lib/reed_solomon/decode_rs.c
+++ b/lib/reed_solomon/decode_rs.c
@@ -22,6 +22,7 @@
uint16_t *index_of = rs->index_of;
uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
int count = 0;
+ int num_corrected;
uint16_t msk = (uint16_t) rs->nn;
/*
@@ -39,11 +40,21 @@
/* Check length parameter for validity */
pad = nn - nroots - len;
- BUG_ON(pad < 0 || pad >= nn);
+ BUG_ON(pad < 0 || pad >= nn - nroots);
/* Does the caller provide the syndrome ? */
- if (s != NULL)
- goto decode;
+ if (s != NULL) {
+ for (i = 0; i < nroots; i++) {
+ /* The syndrome is in index form,
+ * so nn represents zero
+ */
+ if (s[i] != nn)
+ goto decode;
+ }
+
+ /* syndrome is zero, no errors to correct */
+ return 0;
+ }
/* form the syndromes; i.e., evaluate data(x) at roots of
* g(x) */
@@ -88,8 +99,7 @@
/* if syndrome is zero, data[] is a codeword and there are no
* errors to correct. So return data[] unmodified
*/
- count = 0;
- goto finish;
+ return 0;
}
decode:
@@ -99,9 +109,9 @@
if (no_eras > 0) {
/* Init lambda to be the erasure locator polynomial */
lambda[1] = alpha_to[rs_modnn(rs,
- prim * (nn - 1 - eras_pos[0]))];
+ prim * (nn - 1 - (eras_pos[0] + pad)))];
for (i = 1; i < no_eras; i++) {
- u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
+ u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
for (j = i + 1; j > 0; j--) {
tmp = index_of[lambda[j - 1]];
if (tmp != nn) {
@@ -175,6 +185,15 @@
if (lambda[i] != nn)
deg_lambda = i;
}
+
+ if (deg_lambda == 0) {
+ /*
+ * deg(lambda) is zero even though the syndrome is non-zero
+ * => uncorrectable error detected
+ */
+ return -EBADMSG;
+ }
+
/* Find roots of error+erasure locator polynomial by Chien search */
memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0]));
count = 0; /* Number of roots of lambda(x) */
@@ -188,6 +207,12 @@
}
if (q != 0)
continue; /* Not a root */
+
+ if (k < pad) {
+ /* Impossible error location. Uncorrectable error. */
+ return -EBADMSG;
+ }
+
/* store root (index-form) and error location number */
root[count] = i;
loc[count] = k;
@@ -202,8 +227,7 @@
* deg(lambda) unequal to number of roots => uncorrectable
* error detected
*/
- count = -EBADMSG;
- goto finish;
+ return -EBADMSG;
}
/*
* Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
@@ -223,7 +247,9 @@
/*
* Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
* inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form
+ * Note: we reuse the buffer for b to store the correction pattern
*/
+ num_corrected = 0;
for (j = count - 1; j >= 0; j--) {
num1 = 0;
for (i = deg_omega; i >= 0; i--) {
@@ -231,6 +257,13 @@
num1 ^= alpha_to[rs_modnn(rs, omega[i] +
i * root[j])];
}
+
+ if (num1 == 0) {
+ /* Nothing to correct at this position */
+ b[j] = 0;
+ continue;
+ }
+
num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)];
den = 0;
@@ -242,30 +275,52 @@
i * root[j])];
}
}
- /* Apply error to data */
- if (num1 != 0 && loc[j] >= pad) {
- uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] +
- index_of[num2] +
- nn - index_of[den])];
- /* Store the error correction pattern, if a
- * correction buffer is available */
- if (corr) {
- corr[j] = cor;
- } else {
- /* If a data buffer is given and the
- * error is inside the message,
- * correct it */
- if (data && (loc[j] < (nn - nroots)))
- data[loc[j] - pad] ^= cor;
- }
+
+ b[j] = alpha_to[rs_modnn(rs, index_of[num1] +
+ index_of[num2] +
+ nn - index_of[den])];
+ num_corrected++;
+ }
+
+ /*
+ * We compute the syndrome of the 'error' and check that it matches
+ * the syndrome of the received word
+ */
+ for (i = 0; i < nroots; i++) {
+ tmp = 0;
+ for (j = 0; j < count; j++) {
+ if (b[j] == 0)
+ continue;
+
+ k = (fcr + i) * prim * (nn-loc[j]-1);
+ tmp ^= alpha_to[rs_modnn(rs, index_of[b[j]] + k)];
}
+
+ if (tmp != alpha_to[s[i]])
+ return -EBADMSG;
}
-finish:
- if (eras_pos != NULL) {
- for (i = 0; i < count; i++)
- eras_pos[i] = loc[i] - pad;
+ /*
+ * Store the error correction pattern, if a
+ * correction buffer is available
+ */
+ if (corr && eras_pos) {
+ j = 0;
+ for (i = 0; i < count; i++) {
+ if (b[i]) {
+ corr[j] = b[i];
+ eras_pos[j++] = loc[i] - pad;
+ }
+ }
+ } else if (data && par) {
+ /* Apply error to data and parity */
+ for (i = 0; i < count; i++) {
+ if (loc[i] < (nn - nroots))
+ data[loc[i] - pad] ^= b[i];
+ else
+ par[loc[i] - pad - len] ^= b[i];
+ }
}
- return count;
+ return num_corrected;
}
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index e5fdc8b9e856..bbc01bad3053 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -340,7 +340,8 @@ EXPORT_SYMBOL_GPL(encode_rs8);
* @data: data field of a given type
* @par: received parity data field
* @len: data length
- * @s: syndrome data field (if NULL, syndrome is calculated)
+ * @s: syndrome data field, must be in index form
+ * (if NULL, syndrome is calculated)
* @no_eras: number of erasures
* @eras_pos: position of erasures, can be NULL
* @invmsk: invert data mask (will be xored on data, not on parity!)
@@ -354,7 +355,8 @@ EXPORT_SYMBOL_GPL(encode_rs8);
* decoding, so the caller has to ensure that decoder invocations are
* serialized.
*
- * Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
+ * Returns the number of corrected symbols or -EBADMSG for uncorrectable
+ * errors. The count includes errors in the parity.
*/
int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
@@ -391,7 +393,8 @@ EXPORT_SYMBOL_GPL(encode_rs16);
* @data: data field of a given type
* @par: received parity data field
* @len: data length
- * @s: syndrome data field (if NULL, syndrome is calculated)
+ * @s: syndrome data field, must be in index form
+ * (if NULL, syndrome is calculated)
* @no_eras: number of erasures
* @eras_pos: position of erasures, can be NULL
* @invmsk: invert data mask (will be xored on data, not on parity!)
@@ -403,7 +406,8 @@ EXPORT_SYMBOL_GPL(encode_rs16);
* decoding, so the caller has to ensure that decoder invocations are
* serialized.
*
- * Returns the number of corrected bits or -EBADMSG for uncorrectable errors.
+ * Returns the number of corrected symbols or -EBADMSG for uncorrectable
+ * errors. The count includes errors in the parity.
*/
int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len,
uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
new file mode 100644
index 000000000000..4eb29f365ece
--- /dev/null
+++ b/lib/reed_solomon/test_rslib.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tests for Generic Reed Solomon encoder / decoder library
+ *
+ * Written by Ferdinand Blomqvist
+ * Based on previous work by Phil Karn, KA9Q
+ */
+#include <linux/rslib.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+enum verbosity {
+ V_SILENT,
+ V_PROGRESS,
+ V_CSUMMARY
+};
+
+enum method {
+ CORR_BUFFER,
+ CALLER_SYNDROME,
+ IN_PLACE
+};
+
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg)
+
+__param(int, v, V_PROGRESS, "Verbosity level");
+__param(int, ewsc, 1, "Erasures without symbol corruption");
+__param(int, bc, 1, "Test for correct behaviour beyond error correction capacity");
+
+struct etab {
+ int symsize;
+ int genpoly;
+ int fcs;
+ int prim;
+ int nroots;
+ int ntrials;
+};
+
+/* List of codes to test */
+static struct etab Tab[] = {
+ {2, 0x7, 1, 1, 1, 100000 },
+ {3, 0xb, 1, 1, 2, 100000 },
+ {3, 0xb, 1, 1, 3, 100000 },
+ {3, 0xb, 2, 1, 4, 100000 },
+ {4, 0x13, 1, 1, 4, 10000 },
+ {5, 0x25, 1, 1, 6, 1000 },
+ {6, 0x43, 3, 1, 8, 1000 },
+ {7, 0x89, 1, 1, 14, 500 },
+ {8, 0x11d, 1, 1, 30, 100 },
+ {8, 0x187, 112, 11, 32, 100 },
+ {9, 0x211, 1, 1, 33, 80 },
+ {0, 0, 0, 0, 0, 0},
+};
+
+
+struct estat {
+ int dwrong;
+ int irv;
+ int wepos;
+ int nwords;
+};
+
+struct bcstat {
+ int rfail;
+ int rsuccess;
+ int noncw;
+ int nwords;
+};
+
+struct wspace {
+ uint16_t *c; /* sent codeword */
+ uint16_t *r; /* received word */
+ uint16_t *s; /* syndrome */
+ uint16_t *corr; /* correction buffer */
+ int *errlocs;
+ int *derrlocs;
+};
+
+struct pad {
+ int mult;
+ int shift;
+};
+
+static struct pad pad_coef[] = {
+ { 0, 0 },
+ { 1, 2 },
+ { 1, 1 },
+ { 3, 2 },
+ { 1, 0 },
+};
+
+static void free_ws(struct wspace *ws)
+{
+ if (!ws)
+ return;
+
+ kfree(ws->errlocs);
+ kfree(ws->c);
+ kfree(ws);
+}
+
+static struct wspace *alloc_ws(struct rs_codec *rs)
+{
+ int nroots = rs->nroots;
+ struct wspace *ws;
+ int nn = rs->nn;
+
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ if (!ws)
+ return NULL;
+
+ ws->c = kmalloc_array(2 * (nn + nroots),
+ sizeof(uint16_t), GFP_KERNEL);
+ if (!ws->c)
+ goto err;
+
+ ws->r = ws->c + nn;
+ ws->s = ws->r + nn;
+ ws->corr = ws->s + nroots;
+
+ ws->errlocs = kmalloc_array(nn + nroots, sizeof(int), GFP_KERNEL);
+ if (!ws->errlocs)
+ goto err;
+
+ ws->derrlocs = ws->errlocs + nn;
+ return ws;
+
+err:
+ free_ws(ws);
+ return NULL;
+}
+
+
+/*
+ * Generates a random codeword and stores it in c. Generates random errors and
+ * erasures, and stores the random word with errors in r. Erasure positions are
+ * stored in derrlocs, while errlocs has one of three values in every position:
+ *
+ * 0 if there is no error in this position;
+ * 1 if there is a symbol error in this position;
+ * 2 if there is an erasure without symbol corruption.
+ *
+ * Returns the number of corrupted symbols.
+ */
+static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
+ int len, int errs, int eras)
+{
+ int nroots = rs->codec->nroots;
+ int *derrlocs = ws->derrlocs;
+ int *errlocs = ws->errlocs;
+ int dlen = len - nroots;
+ int nn = rs->codec->nn;
+ uint16_t *c = ws->c;
+ uint16_t *r = ws->r;
+ int errval;
+ int errloc;
+ int i;
+
+ /* Load c with random data and encode */
+ for (i = 0; i < dlen; i++)
+ c[i] = prandom_u32() & nn;
+
+ memset(c + dlen, 0, nroots * sizeof(*c));
+ encode_rs16(rs, c, dlen, c + dlen, 0);
+
+ /* Make copyand add errors and erasures */
+ memcpy(r, c, len * sizeof(*r));
+ memset(errlocs, 0, len * sizeof(*errlocs));
+ memset(derrlocs, 0, nroots * sizeof(*derrlocs));
+
+ /* Generating random errors */
+ for (i = 0; i < errs; i++) {
+ do {
+ /* Error value must be nonzero */
+ errval = prandom_u32() & nn;
+ } while (errval == 0);
+
+ do {
+ /* Must not choose the same location twice */
+ errloc = prandom_u32() % len;
+ } while (errlocs[errloc] != 0);
+
+ errlocs[errloc] = 1;
+ r[errloc] ^= errval;
+ }
+
+ /* Generating random erasures */
+ for (i = 0; i < eras; i++) {
+ do {
+ /* Must not choose the same location twice */
+ errloc = prandom_u32() % len;
+ } while (errlocs[errloc] != 0);
+
+ derrlocs[i] = errloc;
+
+ if (ewsc && (prandom_u32() & 1)) {
+ /* Erasure with the symbol intact */
+ errlocs[errloc] = 2;
+ } else {
+ /* Erasure with corrupted symbol */
+ do {
+ /* Error value must be nonzero */
+ errval = prandom_u32() & nn;
+ } while (errval == 0);
+
+ errlocs[errloc] = 1;
+ r[errloc] ^= errval;
+ errs++;
+ }
+ }
+
+ return errs;
+}
+
+static void fix_err(uint16_t *data, int nerrs, uint16_t *corr, int *errlocs)
+{
+ int i;
+
+ for (i = 0; i < nerrs; i++)
+ data[errlocs[i]] ^= corr[i];
+}
+
+static void compute_syndrome(struct rs_control *rsc, uint16_t *data,
+ int len, uint16_t *syn)
+{
+ struct rs_codec *rs = rsc->codec;
+ uint16_t *alpha_to = rs->alpha_to;
+ uint16_t *index_of = rs->index_of;
+ int nroots = rs->nroots;
+ int prim = rs->prim;
+ int fcr = rs->fcr;
+ int i, j;
+
+ /* Calculating syndrome */
+ for (i = 0; i < nroots; i++) {
+ syn[i] = data[0];
+ for (j = 1; j < len; j++) {
+ if (syn[i] == 0) {
+ syn[i] = data[j];
+ } else {
+ syn[i] = data[j] ^
+ alpha_to[rs_modnn(rs, index_of[syn[i]]
+ + (fcr + i) * prim)];
+ }
+ }
+ }
+
+ /* Convert to index form */
+ for (i = 0; i < nroots; i++)
+ syn[i] = rs->index_of[syn[i]];
+}
+
+/* Test up to error correction capacity */
+static void test_uc(struct rs_control *rs, int len, int errs,
+ int eras, int trials, struct estat *stat,
+ struct wspace *ws, int method)
+{
+ int dlen = len - rs->codec->nroots;
+ int *derrlocs = ws->derrlocs;
+ int *errlocs = ws->errlocs;
+ uint16_t *corr = ws->corr;
+ uint16_t *c = ws->c;
+ uint16_t *r = ws->r;
+ uint16_t *s = ws->s;
+ int derrs, nerrs;
+ int i, j;
+
+ for (j = 0; j < trials; j++) {
+ nerrs = get_rcw_we(rs, ws, len, errs, eras);
+
+ switch (method) {
+ case CORR_BUFFER:
+ derrs = decode_rs16(rs, r, r + dlen, dlen,
+ NULL, eras, derrlocs, 0, corr);
+ fix_err(r, derrs, corr, derrlocs);
+ break;
+ case CALLER_SYNDROME:
+ compute_syndrome(rs, r, len, s);
+ derrs = decode_rs16(rs, NULL, NULL, dlen,
+ s, eras, derrlocs, 0, corr);
+ fix_err(r, derrs, corr, derrlocs);
+ break;
+ case IN_PLACE:
+ derrs = decode_rs16(rs, r, r + dlen, dlen,
+ NULL, eras, derrlocs, 0, NULL);
+ break;
+ default:
+ continue;
+ }
+
+ if (derrs != nerrs)
+ stat->irv++;
+
+ if (method != IN_PLACE) {
+ for (i = 0; i < derrs; i++) {
+ if (errlocs[derrlocs[i]] != 1)
+ stat->wepos++;
+ }
+ }
+
+ if (memcmp(r, c, len * sizeof(*r)))
+ stat->dwrong++;
+ }
+ stat->nwords += trials;
+}
+
+static int ex_rs_helper(struct rs_control *rs, struct wspace *ws,
+ int len, int trials, int method)
+{
+ static const char * const desc[] = {
+ "Testing correction buffer interface...",
+ "Testing with caller provided syndrome...",
+ "Testing in-place interface..."
+ };
+
+ struct estat stat = {0, 0, 0, 0};
+ int nroots = rs->codec->nroots;
+ int errs, eras, retval;
+
+ if (v >= V_PROGRESS)
+ pr_info(" %s\n", desc[method]);
+
+ for (errs = 0; errs <= nroots / 2; errs++)
+ for (eras = 0; eras <= nroots - 2 * errs; eras++)
+ test_uc(rs, len, errs, eras, trials, &stat, ws, method);
+
+ if (v >= V_CSUMMARY) {
+ pr_info(" Decodes wrong: %d / %d\n",
+ stat.dwrong, stat.nwords);
+ pr_info(" Wrong return value: %d / %d\n",
+ stat.irv, stat.nwords);
+ if (method != IN_PLACE)
+ pr_info(" Wrong error position: %d\n", stat.wepos);
+ }
+
+ retval = stat.dwrong + stat.wepos + stat.irv;
+ if (retval && v >= V_PROGRESS)
+ pr_warn(" FAIL: %d decoding failures!\n", retval);
+
+ return retval;
+}
+
+static int exercise_rs(struct rs_control *rs, struct wspace *ws,
+ int len, int trials)
+{
+
+ int retval = 0;
+ int i;
+
+ if (v >= V_PROGRESS)
+ pr_info("Testing up to error correction capacity...\n");
+
+ for (i = 0; i <= IN_PLACE; i++)
+ retval |= ex_rs_helper(rs, ws, len, trials, i);
+
+ return retval;
+}
+
+/* Tests for correct behaviour beyond error correction capacity */
+static void test_bc(struct rs_control *rs, int len, int errs,
+ int eras, int trials, struct bcstat *stat,
+ struct wspace *ws)
+{
+ int nroots = rs->codec->nroots;
+ int dlen = len - nroots;
+ int *derrlocs = ws->derrlocs;
+ uint16_t *corr = ws->corr;
+ uint16_t *r = ws->r;
+ int derrs, j;
+
+ for (j = 0; j < trials; j++) {
+ get_rcw_we(rs, ws, len, errs, eras);
+ derrs = decode_rs16(rs, r, r + dlen, dlen,
+ NULL, eras, derrlocs, 0, corr);
+ fix_err(r, derrs, corr, derrlocs);
+
+ if (derrs >= 0) {
+ stat->rsuccess++;
+
+ /*
+ * We check that the returned word is actually a
+ * codeword. The obious way to do this would be to
+ * compute the syndrome, but we don't want to replicate
+ * that code here. However, all the codes are in
+ * systematic form, and therefore we can encode the
+ * returned word, and see whether the parity changes or
+ * not.
+ */
+ memset(corr, 0, nroots * sizeof(*corr));
+ encode_rs16(rs, r, dlen, corr, 0);
+
+ if (memcmp(r + dlen, corr, nroots * sizeof(*corr)))
+ stat->noncw++;
+ } else {
+ stat->rfail++;
+ }
+ }
+ stat->nwords += trials;
+}
+
+static int exercise_rs_bc(struct rs_control *rs, struct wspace *ws,
+ int len, int trials)
+{
+ struct bcstat stat = {0, 0, 0, 0};
+ int nroots = rs->codec->nroots;
+ int errs, eras, cutoff;
+
+ if (v >= V_PROGRESS)
+ pr_info("Testing beyond error correction capacity...\n");
+
+ for (errs = 1; errs <= nroots; errs++) {
+ eras = nroots - 2 * errs + 1;
+ if (eras < 0)
+ eras = 0;
+
+ cutoff = nroots <= len - errs ? nroots : len - errs;
+ for (; eras <= cutoff; eras++)
+ test_bc(rs, len, errs, eras, trials, &stat, ws);
+ }
+
+ if (v >= V_CSUMMARY) {
+ pr_info(" decoder gives up: %d / %d\n",
+ stat.rfail, stat.nwords);
+ pr_info(" decoder returns success: %d / %d\n",
+ stat.rsuccess, stat.nwords);
+ pr_info(" not a codeword: %d / %d\n",
+ stat.noncw, stat.rsuccess);
+ }
+
+ if (stat.noncw && v >= V_PROGRESS)
+ pr_warn(" FAIL: %d silent failures!\n", stat.noncw);
+
+ return stat.noncw;
+}
+
+static int run_exercise(struct etab *e)
+{
+ int nn = (1 << e->symsize) - 1;
+ int kk = nn - e->nroots;
+ struct rs_control *rsc;
+ int retval = -ENOMEM;
+ int max_pad = kk - 1;
+ int prev_pad = -1;
+ struct wspace *ws;
+ int i;
+
+ rsc = init_rs(e->symsize, e->genpoly, e->fcs, e->prim, e->nroots);
+ if (!rsc)
+ return retval;
+
+ ws = alloc_ws(rsc->codec);
+ if (!ws)
+ goto err;
+
+ retval = 0;
+ for (i = 0; i < ARRAY_SIZE(pad_coef); i++) {
+ int pad = (pad_coef[i].mult * max_pad) >> pad_coef[i].shift;
+ int len = nn - pad;
+
+ if (pad == prev_pad)
+ continue;
+
+ prev_pad = pad;
+ if (v >= V_PROGRESS) {
+ pr_info("Testing (%d,%d)_%d code...\n",
+ len, kk - pad, nn + 1);
+ }
+
+ retval |= exercise_rs(rsc, ws, len, e->ntrials);
+ if (bc)
+ retval |= exercise_rs_bc(rsc, ws, len, e->ntrials);
+ }
+
+ free_ws(ws);
+
+err:
+ free_rs(rsc);
+ return retval;
+}
+
+static int __init test_rslib_init(void)
+{
+ int i, fail = 0;
+
+ for (i = 0; Tab[i].symsize != 0 ; i++) {
+ int retval;
+
+ retval = run_exercise(Tab + i);
+ if (retval < 0)
+ return -ENOMEM;
+
+ fail |= retval;
+ }
+
+ if (fail)
+ pr_warn("rslib: test failed\n");
+ else
+ pr_info("rslib: test ok\n");
+
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void __exit test_rslib_exit(void)
+{
+}
+
+module_init(test_rslib_init)
+module_exit(test_rslib_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ferdinand Blomqvist");
+MODULE_DESCRIPTION("Reed-Solomon library test");
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 935ec80f213f..bdb7e4cadf05 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Resizable, Scalable, Concurrent Hash Table
*
@@ -8,10 +9,6 @@
* Code partially derived from nft_hash
* Rewritten with rehash code from br_multicast plus single list
* pointer as suggested by Josh Triplett
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/atomic.h>
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 54f57cd117c6..969e5400a615 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -26,9 +26,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
/*
* First get a stable cleared mask, setting the old mask to 0.
*/
- do {
- mask = sb->map[index].cleared;
- } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
+ mask = xchg(&sb->map[index].cleared, 0);
/*
* Now clear the masked bits in our free word
@@ -516,10 +514,8 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
struct sbq_wait_state *ws = &sbq->ws[wake_index];
if (waitqueue_active(&ws->wait)) {
- int o = atomic_read(&sbq->wake_index);
-
- if (wake_index != o)
- atomic_cmpxchg(&sbq->wake_index, o, wake_index);
+ if (wake_index != atomic_read(&sbq->wake_index))
+ atomic_set(&sbq->wake_index, wake_index);
return ws;
}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 739dc9fe2c55..c2cf2c311b7d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
*
* Scatterlist handling helpers.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/export.h>
#include <linux/slab.h>
@@ -181,7 +179,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
* __sg_free_table - Free a previously mapped sg table
* @table: The sg table header to use
* @max_ents: The maximum number of entries per single scatterlist
- * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
+ * @nents_first_chunk: Number of entries int the (preallocated) first
+ * scatterlist chunk, 0 means no such preallocated first chunk
* @free_fn: Free function
*
* Description:
@@ -191,9 +190,10 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
- bool skip_first_chunk, sg_free_fn *free_fn)
+ unsigned int nents_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
+ unsigned curr_max_ents = nents_first_chunk ?: max_ents;
if (unlikely(!table->sgl))
return;
@@ -209,9 +209,9 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
* sg_size is then one less than alloc size, since the last
* element is the chain pointer.
*/
- if (alloc_size > max_ents) {
- next = sg_chain_ptr(&sgl[max_ents - 1]);
- alloc_size = max_ents;
+ if (alloc_size > curr_max_ents) {
+ next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
+ alloc_size = curr_max_ents;
sg_size = alloc_size - 1;
} else {
sg_size = alloc_size;
@@ -219,11 +219,12 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
}
table->orig_nents -= sg_size;
- if (skip_first_chunk)
- skip_first_chunk = false;
+ if (nents_first_chunk)
+ nents_first_chunk = 0;
else
free_fn(sgl, alloc_size);
sgl = next;
+ curr_max_ents = max_ents;
}
table->sgl = NULL;
@@ -246,6 +247,8 @@ EXPORT_SYMBOL(sg_free_table);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @max_ents: The maximum number of entries the allocator returns per call
+ * @nents_first_chunk: Number of entries int the (preallocated) first
+ * scatterlist chunk, 0 means no such preallocated chunk provided by user
* @gfp_mask: GFP allocation mask
* @alloc_fn: Allocator to use
*
@@ -262,10 +265,13 @@ EXPORT_SYMBOL(sg_free_table);
**/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
unsigned int max_ents, struct scatterlist *first_chunk,
- gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
+ unsigned int nents_first_chunk, gfp_t gfp_mask,
+ sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
+ unsigned curr_max_ents = nents_first_chunk ?: max_ents;
+ unsigned prv_max_ents;
memset(table, 0, sizeof(*table));
@@ -281,8 +287,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
do {
unsigned int sg_size, alloc_size = left;
- if (alloc_size > max_ents) {
- alloc_size = max_ents;
+ if (alloc_size > curr_max_ents) {
+ alloc_size = curr_max_ents;
sg_size = alloc_size - 1;
} else
sg_size = alloc_size;
@@ -316,7 +322,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
* If this is not the first mapping, chain previous part.
*/
if (prv)
- sg_chain(prv, max_ents, sg);
+ sg_chain(prv, prv_max_ents, sg);
else
table->sgl = sg;
@@ -327,6 +333,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
sg_mark_end(&sg[sg_size - 1]);
prv = sg;
+ prv_max_ents = curr_max_ents;
+ curr_max_ents = max_ents;
} while (left);
return 0;
@@ -349,9 +357,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
- NULL, gfp_mask, sg_kmalloc);
+ NULL, 0, gfp_mask, sg_kmalloc);
if (unlikely(ret))
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
return ret;
}
@@ -678,17 +686,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
{
if (!miter->__remaining) {
struct scatterlist *sg;
- unsigned long pgoffset;
if (!__sg_page_iter_next(&miter->piter))
return false;
sg = miter->piter.sg;
- pgoffset = miter->piter.sg_pgoffset;
- miter->__offset = pgoffset ? 0 : sg->offset;
+ miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
+ miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
+ miter->__offset &= PAGE_SIZE - 1;
miter->__remaining = sg->offset + sg->length -
- (pgoffset << PAGE_SHIFT) - miter->__offset;
+ (miter->piter.sg_pgoffset << PAGE_SHIFT) -
+ miter->__offset;
miter->__remaining = min_t(unsigned long, miter->__remaining,
PAGE_SIZE - miter->__offset);
}
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index cff20df2695e..db29e5c1f790 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -70,18 +70,27 @@ static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
/**
* sg_free_table_chained - Free a previously mapped sg table
* @table: The sg table header to use
- * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained?
+ * @nents_first_chunk: size of the first_chunk SGL passed to
+ * sg_alloc_table_chained
*
* Description:
* Free an sg table previously allocated and setup with
* sg_alloc_table_chained().
*
+ * @nents_first_chunk has to be same with that same parameter passed
+ * to sg_alloc_table_chained().
+ *
**/
-void sg_free_table_chained(struct sg_table *table, bool first_chunk)
+void sg_free_table_chained(struct sg_table *table,
+ unsigned nents_first_chunk)
{
- if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
+ if (table->orig_nents <= nents_first_chunk)
return;
- __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
+
+ if (nents_first_chunk == 1)
+ nents_first_chunk = 0;
+
+ __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
}
EXPORT_SYMBOL_GPL(sg_free_table_chained);
@@ -90,31 +99,41 @@ EXPORT_SYMBOL_GPL(sg_free_table_chained);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @first_chunk: first SGL
+ * @nents_first_chunk: number of the SGL of @first_chunk
*
* Description:
* Allocate and chain SGLs in an sg table. If @nents@ is larger than
- * SG_CHUNK_SIZE a chained sg table will be setup.
+ * @nents_first_chunk a chained sg table will be setup. @first_chunk is
+ * ignored if nents_first_chunk <= 1 because user expects the SGL points
+ * non-chain SGL.
*
**/
int sg_alloc_table_chained(struct sg_table *table, int nents,
- struct scatterlist *first_chunk)
+ struct scatterlist *first_chunk, unsigned nents_first_chunk)
{
int ret;
BUG_ON(!nents);
- if (first_chunk) {
- if (nents <= SG_CHUNK_SIZE) {
+ if (first_chunk && nents_first_chunk) {
+ if (nents <= nents_first_chunk) {
table->nents = table->orig_nents = nents;
sg_init_table(table->sgl, nents);
return 0;
}
}
+ /* User supposes that the 1st SGL includes real entry */
+ if (nents_first_chunk <= 1) {
+ first_chunk = NULL;
+ nents_first_chunk = 0;
+ }
+
ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
- first_chunk, GFP_ATOMIC, sg_pool_alloc);
+ first_chunk, nents_first_chunk,
+ GFP_ATOMIC, sg_pool_alloc);
if (unlikely(ret))
- sg_free_table_chained(table, (bool)first_chunk);
+ sg_free_table_chained(table, nents_first_chunk);
return ret;
}
EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
diff --git a/lib/sg_split.c b/lib/sg_split.c
index b063410c3593..9982c63d1063 100644
--- a/lib/sg_split.c
+++ b/lib/sg_split.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
*
* Scatterlist splitting helpers.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/scatterlist.h>
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 157d9e31f6c2..60ba93fc42ce 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -23,7 +23,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
+ if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu)))
goto out;
/*
diff --git a/lib/string.c b/lib/string.c
index 6016eb3ac73d..461fb620f85f 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -400,6 +400,9 @@ EXPORT_SYMBOL(strncmp);
* strchr - Find the first occurrence of a character in a string
* @s: The string to be searched
* @c: The character to search for
+ *
+ * Note that the %NUL-terminator is considered part of the string, and can
+ * be searched for.
*/
char *strchr(const char *s, int c)
{
@@ -453,12 +456,18 @@ EXPORT_SYMBOL(strrchr);
* @s: The string to be searched
* @count: The number of characters to be searched
* @c: The character to search for
+ *
+ * Note that the %NUL-terminator is considered part of the string, and can
+ * be searched for.
*/
char *strnchr(const char *s, size_t count, int c)
{
- for (; count-- && *s != '\0'; ++s)
+ while (count--) {
if (*s == (char)c)
return (char *)s;
+ if (*s++ == '\0')
+ break;
+ }
return NULL;
}
EXPORT_SYMBOL(strnchr);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 4403e1924f73..963050c0283e 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -231,35 +231,36 @@ static bool unescape_special(char **src, char **dst)
* @src: source buffer (escaped)
* @dst: destination buffer (unescaped)
* @size: size of the destination buffer (0 to unlimit)
- * @flags: combination of the flags (bitwise OR):
- * %UNESCAPE_SPACE:
+ * @flags: combination of the flags.
+ *
+ * Description:
+ * The function unquotes characters in the given string.
+ *
+ * Because the size of the output will be the same as or less than the size of
+ * the input, the transformation may be performed in place.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will always be NULL-terminated. Source string must be
+ * NULL-terminated as well. The supported flags are::
+ *
+ * UNESCAPE_SPACE:
* '\f' - form feed
* '\n' - new line
* '\r' - carriage return
* '\t' - horizontal tab
* '\v' - vertical tab
- * %UNESCAPE_OCTAL:
+ * UNESCAPE_OCTAL:
* '\NNN' - byte with octal value NNN (1 to 3 digits)
- * %UNESCAPE_HEX:
+ * UNESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
- * %UNESCAPE_SPECIAL:
+ * UNESCAPE_SPECIAL:
* '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
- * %UNESCAPE_ANY:
+ * UNESCAPE_ANY:
* all previous together
*
- * Description:
- * The function unquotes characters in the given string.
- *
- * Because the size of the output will be the same as or less than the size of
- * the input, the transformation may be performed in place.
- *
- * Caller must provide valid source and destination pointers. Be aware that
- * destination buffer will always be NULL-terminated. Source string must be
- * NULL-terminated as well.
- *
* Return:
* The amount of the characters processed to the destination buffer excluding
* trailing '\0' is returned.
@@ -441,7 +442,29 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* @isz: source buffer size
* @dst: destination buffer (escaped)
* @osz: destination buffer size
- * @flags: combination of the flags (bitwise OR):
+ * @flags: combination of the flags
+ * @only: NULL-terminated string containing characters used to limit
+ * the selected escape class. If characters are included in @only
+ * that would not normally be escaped by the classes selected
+ * in @flags, they will be copied to @dst unescaped.
+ *
+ * Description:
+ * The process of escaping byte buffer includes several parts. They are applied
+ * in the following sequence.
+ *
+ * 1. The character is matched to the printable class, if asked, and in
+ * case of match it passes through to the output.
+ * 2. The character is not matched to the one from @only string and thus
+ * must go as-is to the output.
+ * 3. The character is checked if it falls into the class given by @flags.
+ * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
+ * character. Note that they actually can't go together, otherwise
+ * %ESCAPE_HEX will be ignored.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will not be NULL-terminated, thus caller have to append
+ * it if needs. The supported flags are::
+ *
* %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
* '\n' - new line
@@ -464,26 +487,6 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
- * @only: NULL-terminated string containing characters used to limit
- * the selected escape class. If characters are included in @only
- * that would not normally be escaped by the classes selected
- * in @flags, they will be copied to @dst unescaped.
- *
- * Description:
- * The process of escaping byte buffer includes several parts. They are applied
- * in the following sequence.
- * 1. The character is matched to the printable class, if asked, and in
- * case of match it passes through to the output.
- * 2. The character is not matched to the one from @only string and thus
- * must go as-is to the output.
- * 3. The character is checked if it falls into the class given by @flags.
- * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
- * character. Note that they actually can't go together, otherwise
- * %ESCAPE_HEX will be ignored.
- *
- * Caller must provide valid source and destination pointers. Be aware that
- * destination buffer will not be NULL-terminated, thus caller have to append
- * it if needs.
*
* Return:
* The total size of the escaped output that would be generated for
@@ -540,6 +543,25 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
}
EXPORT_SYMBOL(string_escape_mem);
+int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
+ size_t osz)
+{
+ char *p = dst;
+ char *end = p + osz;
+
+ while (isz--) {
+ unsigned char c = *src++;
+
+ if (!isprint(c) || !isascii(c) || c == '"' || c == '\\')
+ escape_hex(c, &p, end);
+ else
+ escape_passthrough(c, &p, end);
+ }
+
+ return p - dst;
+}
+EXPORT_SYMBOL(string_escape_mem_ascii);
+
/*
* Return an allocated string that has been escaped of special characters
* and double quotes, making it safe to log in quotes.
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
new file mode 100644
index 000000000000..4c40580a99a3
--- /dev/null
+++ b/lib/test_blackhole_dev.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This module tests the blackhole_dev that is created during the
+ * net subsystem initialization. The test this module performs is
+ * by injecting an skb into the stack with skb->dev as the
+ * blackhole_dev and expects kernel to behave in a sane manner
+ * (in other words, *not crash*)!
+ *
+ * Copyright (c) 2018, Mahesh Bandewar <maheshb@google.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+#include <linux/ipv6.h>
+
+#include <net/dst.h>
+
+#define SKB_SIZE 256
+#define HEAD_SIZE (14+40+8) /* Ether + IPv6 + UDP */
+#define TAIL_SIZE 32 /* random tail-room */
+
+#define UDP_PORT 1234
+
+static int __init test_blackholedev_init(void)
+{
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *ethh;
+ struct udphdr *uh;
+ int data_len;
+ int ret;
+
+ skb = alloc_skb(SKB_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Reserve head-room for the headers */
+ skb_reserve(skb, HEAD_SIZE);
+
+ /* Add data to the skb */
+ data_len = SKB_SIZE - (HEAD_SIZE + TAIL_SIZE);
+ memset(__skb_put(skb, data_len), 0xf, data_len);
+
+ /* Add protocol data */
+ /* (Transport) UDP */
+ uh = (struct udphdr *)skb_push(skb, sizeof(struct udphdr));
+ skb_set_transport_header(skb, 0);
+ uh->source = uh->dest = htons(UDP_PORT);
+ uh->len = htons(data_len);
+ uh->check = 0;
+ /* (Network) IPv6 */
+ ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
+ skb_set_network_header(skb, 0);
+ ip6h->hop_limit = 32;
+ ip6h->payload_len = data_len + sizeof(struct udphdr);
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->saddr = in6addr_loopback;
+ ip6h->daddr = in6addr_loopback;
+ /* Ether */
+ ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
+ skb_set_mac_header(skb, 0);
+
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = blackhole_netdev;
+
+ /* Now attempt to send the packet */
+ ret = dev_queue_xmit(skb);
+
+ switch (ret) {
+ case NET_XMIT_SUCCESS:
+ pr_warn("dev_queue_xmit() returned NET_XMIT_SUCCESS\n");
+ break;
+ case NET_XMIT_DROP:
+ pr_warn("dev_queue_xmit() returned NET_XMIT_DROP\n");
+ break;
+ case NET_XMIT_CN:
+ pr_warn("dev_queue_xmit() returned NET_XMIT_CN\n");
+ break;
+ default:
+ pr_err("dev_queue_xmit() returned UNKNOWN(%d)\n", ret);
+ }
+
+ return 0;
+}
+
+static void __exit test_blackholedev_exit(void)
+{
+ pr_warn("test_blackholedev module terminating.\n");
+}
+
+module_init(test_blackholedev_init);
+module_exit(test_blackholedev_exit);
+
+MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 7de2702621dc..b63b367a94e8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -1,26 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <a.ryabinin@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/kasan.h>
#include <linux/kernel.h>
-#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/kasan.h>
/*
* Note: test functions are marked noinline so that their names appear in
@@ -623,6 +620,95 @@ static noinline void __init kasan_strings(void)
strnlen(ptr, 1);
}
+static noinline void __init kasan_bitops(void)
+{
+ /*
+ * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
+ * this way we do not actually corrupt other memory.
+ */
+ long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
+ if (!bits)
+ return;
+
+ /*
+ * Below calls try to access bit within allocated memory; however, the
+ * below accesses are still out-of-bounds, since bitops are defined to
+ * operate on the whole long the bit is in.
+ */
+ pr_info("out-of-bounds in set_bit\n");
+ set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __set_bit\n");
+ __set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit\n");
+ clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit\n");
+ __clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit_unlock\n");
+ clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit_unlock\n");
+ __clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in change_bit\n");
+ change_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __change_bit\n");
+ __change_bit(BITS_PER_LONG, bits);
+
+ /*
+ * Below calls try to access bit beyond allocated memory.
+ */
+ pr_info("out-of-bounds in test_and_set_bit\n");
+ test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_set_bit\n");
+ __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_set_bit_lock\n");
+ test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_clear_bit\n");
+ test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_clear_bit\n");
+ __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_change_bit\n");
+ test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_change_bit\n");
+ __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_bit\n");
+ (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+#if defined(clear_bit_unlock_is_negative_byte)
+ pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
+ clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
+#endif
+ kfree(bits);
+}
+
+static noinline void __init kmalloc_double_kzfree(void)
+{
+ char *ptr;
+ size_t size = 16;
+
+ pr_info("double-free (kzfree)\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ kzfree(ptr);
+ kzfree(ptr);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -664,6 +750,8 @@ static int __init kmalloc_tests_init(void)
kasan_memchr();
kasan_memcmp();
kasan_strings();
+ kasan_bitops();
+ kmalloc_double_kzfree();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
new file mode 100644
index 000000000000..62d19f270cad
--- /dev/null
+++ b/lib/test_meminit.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for SL[AOU]B/page initialization at alloc/free time.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define GARBAGE_INT (0x09A7BA9E)
+#define GARBAGE_BYTE (0x9E)
+
+#define REPORT_FAILURES_IN_FN() \
+ do { \
+ if (failures) \
+ pr_info("%s failed %d out of %d times\n", \
+ __func__, failures, num_tests); \
+ else \
+ pr_info("all %d tests in %s passed\n", \
+ num_tests, __func__); \
+ } while (0)
+
+/* Calculate the number of uninitialized bytes in the buffer. */
+static int __init count_nonzero_bytes(void *ptr, size_t size)
+{
+ int i, ret = 0;
+ unsigned char *p = (unsigned char *)ptr;
+
+ for (i = 0; i < size; i++)
+ if (p[i])
+ ret++;
+ return ret;
+}
+
+/* Fill a buffer with garbage, skipping |skip| first bytes. */
+static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip)
+{
+ unsigned int *p = (unsigned int *)((char *)ptr + skip);
+ int i = 0;
+
+ WARN_ON(skip > size);
+ size -= skip;
+
+ while (size >= sizeof(*p)) {
+ p[i] = GARBAGE_INT;
+ i++;
+ size -= sizeof(*p);
+ }
+ if (size)
+ memset(&p[i], GARBAGE_BYTE, size);
+}
+
+static void __init fill_with_garbage(void *ptr, size_t size)
+{
+ fill_with_garbage_skip(ptr, size, 0);
+}
+
+static int __init do_alloc_pages_order(int order, int *total_failures)
+{
+ struct page *page;
+ void *buf;
+ size_t size = PAGE_SIZE << order;
+
+ page = alloc_pages(GFP_KERNEL, order);
+ buf = page_address(page);
+ fill_with_garbage(buf, size);
+ __free_pages(page, order);
+
+ page = alloc_pages(GFP_KERNEL, order);
+ buf = page_address(page);
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ __free_pages(page, order);
+ return 1;
+}
+
+/* Test the page allocator by calling alloc_pages with different orders. */
+static int __init test_pages(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i;
+
+ for (i = 0; i < 10; i++)
+ num_tests += do_alloc_pages_order(i, &failures);
+
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/* Test kmalloc() with given parameters. */
+static int __init do_kmalloc_size(size_t size, int *total_failures)
+{
+ void *buf;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ fill_with_garbage(buf, size);
+ kfree(buf);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ kfree(buf);
+ return 1;
+}
+
+/* Test vmalloc() with given parameters. */
+static int __init do_vmalloc_size(size_t size, int *total_failures)
+{
+ void *buf;
+
+ buf = vmalloc(size);
+ fill_with_garbage(buf, size);
+ vfree(buf);
+
+ buf = vmalloc(size);
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ vfree(buf);
+ return 1;
+}
+
+/* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
+static int __init test_kvmalloc(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, size;
+
+ for (i = 0; i < 20; i++) {
+ size = 1 << i;
+ num_tests += do_kmalloc_size(size, &failures);
+ num_tests += do_vmalloc_size(size, &failures);
+ }
+
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+#define CTOR_BYTES (sizeof(unsigned int))
+#define CTOR_PATTERN (0x41414141)
+/* Initialize the first 4 bytes of the object. */
+static void test_ctor(void *obj)
+{
+ *(unsigned int *)obj = CTOR_PATTERN;
+}
+
+/*
+ * Check the invariants for the buffer allocated from a slab cache.
+ * If the cache has a test constructor, the first 4 bytes of the object must
+ * always remain equal to CTOR_PATTERN.
+ * If the cache isn't an RCU-typesafe one, or if the allocation is done with
+ * __GFP_ZERO, then the object contents must be zeroed after allocation.
+ * If the cache is an RCU-typesafe one, the object contents must never be
+ * zeroed after the first use. This is checked by memcmp() in
+ * do_kmem_cache_size().
+ */
+static bool __init check_buf(void *buf, int size, bool want_ctor,
+ bool want_rcu, bool want_zero)
+{
+ int bytes;
+ bool fail = false;
+
+ bytes = count_nonzero_bytes(buf, size);
+ WARN_ON(want_ctor && want_zero);
+ if (want_zero)
+ return bytes;
+ if (want_ctor) {
+ if (*(unsigned int *)buf != CTOR_PATTERN)
+ fail = 1;
+ } else {
+ if (bytes)
+ fail = !want_rcu;
+ }
+ return fail;
+}
+
+/*
+ * Test kmem_cache with given parameters:
+ * want_ctor - use a constructor;
+ * want_rcu - use SLAB_TYPESAFE_BY_RCU;
+ * want_zero - use __GFP_ZERO.
+ */
+static int __init do_kmem_cache_size(size_t size, bool want_ctor,
+ bool want_rcu, bool want_zero,
+ int *total_failures)
+{
+ struct kmem_cache *c;
+ int iter;
+ bool fail = false;
+ gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0);
+ void *buf, *buf_copy;
+
+ c = kmem_cache_create("test_cache", size, 1,
+ want_rcu ? SLAB_TYPESAFE_BY_RCU : 0,
+ want_ctor ? test_ctor : NULL);
+ for (iter = 0; iter < 10; iter++) {
+ buf = kmem_cache_alloc(c, alloc_mask);
+ /* Check that buf is zeroed, if it must be. */
+ fail = check_buf(buf, size, want_ctor, want_rcu, want_zero);
+ fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0);
+
+ if (!want_rcu) {
+ kmem_cache_free(c, buf);
+ continue;
+ }
+
+ /*
+ * If this is an RCU cache, use a critical section to ensure we
+ * can touch objects after they're freed.
+ */
+ rcu_read_lock();
+ /*
+ * Copy the buffer to check that it's not wiped on
+ * free().
+ */
+ buf_copy = kmalloc(size, GFP_KERNEL);
+ if (buf_copy)
+ memcpy(buf_copy, buf, size);
+
+ kmem_cache_free(c, buf);
+ /*
+ * Check that |buf| is intact after kmem_cache_free().
+ * |want_zero| is false, because we wrote garbage to
+ * the buffer already.
+ */
+ fail |= check_buf(buf, size, want_ctor, want_rcu,
+ false);
+ if (buf_copy) {
+ fail |= (bool)memcmp(buf, buf_copy, size);
+ kfree(buf_copy);
+ }
+ rcu_read_unlock();
+ }
+ kmem_cache_destroy(c);
+
+ *total_failures += fail;
+ return 1;
+}
+
+/*
+ * Check that the data written to an RCU-allocated object survives
+ * reallocation.
+ */
+static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
+{
+ struct kmem_cache *c;
+ void *buf, *buf_contents, *saved_ptr;
+ void **used_objects;
+ int i, iter, maxiter = 1024;
+ bool fail = false;
+
+ c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ buf = kmem_cache_alloc(c, GFP_KERNEL);
+ saved_ptr = buf;
+ fill_with_garbage(buf, size);
+ buf_contents = kmalloc(size, GFP_KERNEL);
+ if (!buf_contents)
+ goto out;
+ used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
+ if (!used_objects) {
+ kfree(buf_contents);
+ goto out;
+ }
+ memcpy(buf_contents, buf, size);
+ kmem_cache_free(c, buf);
+ /*
+ * Run for a fixed number of iterations. If we never hit saved_ptr,
+ * assume the test passes.
+ */
+ for (iter = 0; iter < maxiter; iter++) {
+ buf = kmem_cache_alloc(c, GFP_KERNEL);
+ used_objects[iter] = buf;
+ if (buf == saved_ptr) {
+ fail = memcmp(buf_contents, buf, size);
+ for (i = 0; i <= iter; i++)
+ kmem_cache_free(c, used_objects[i]);
+ goto free_out;
+ }
+ }
+
+free_out:
+ kmem_cache_destroy(c);
+ kfree(buf_contents);
+ kfree(used_objects);
+out:
+ *total_failures += fail;
+ return 1;
+}
+
+/*
+ * Test kmem_cache allocation by creating caches of different sizes, with and
+ * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
+ */
+static int __init test_kmemcache(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, flags, size;
+ bool ctor, rcu, zero;
+
+ for (i = 0; i < 10; i++) {
+ size = 8 << i;
+ for (flags = 0; flags < 8; flags++) {
+ ctor = flags & 1;
+ rcu = flags & 2;
+ zero = flags & 4;
+ if (ctor & zero)
+ continue;
+ num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
+ &failures);
+ }
+ }
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */
+static int __init test_rcu_persistent(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, size;
+
+ for (i = 0; i < 10; i++) {
+ size = 8 << i;
+ num_tests += do_kmem_cache_rcu_persistent(size, &failures);
+ }
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/*
+ * Run the tests. Each test function returns the number of executed tests and
+ * updates |failures| with the number of failed tests.
+ */
+static int __init test_meminit_init(void)
+{
+ int failures = 0, num_tests = 0;
+
+ num_tests += test_pages(&failures);
+ num_tests += test_kvmalloc(&failures);
+ num_tests += test_kmemcache(&failures);
+ num_tests += test_rcu_persistent(&failures);
+
+ if (failures == 0)
+ pr_info("all %d tests passed!\n", num_tests);
+ else
+ pr_info("failures: %d out of %d\n", failures, num_tests);
+
+ return failures ? -EINVAL : 0;
+}
+module_init(test_meminit_init);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_overflow.c b/lib/test_overflow.c
index fc680562d8b6..7a4b6f6c5473 100644
--- a/lib/test_overflow.c
+++ b/lib/test_overflow.c
@@ -486,16 +486,17 @@ static int __init test_overflow_shift(void)
* Deal with the various forms of allocator arguments. See comments above
* the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
*/
-#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL)
-#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE)
+#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN)
+#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
+#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
#define alloc000(alloc, arg, sz) alloc(sz)
#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
-#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL)
+#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
#define free0(free, arg, ptr) free(ptr)
#define free1(free, arg, ptr) free(arg, ptr)
-/* Wrap around to 8K */
-#define TEST_SIZE (9 << PAGE_SHIFT)
+/* Wrap around to 16K */
+#define TEST_SIZE (5 * 4096)
#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
static int __init test_ ## func (void *arg) \
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 084fe5a6ac57..c5a6fef7b45d 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Resizable, Scalable, Concurrent Hash Table
*
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/**************************************************************************
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
index e97dc54b4fdf..2d7d257a430e 100644
--- a/lib/test_stackinit.c
+++ b/lib/test_stackinit.c
@@ -12,7 +12,7 @@
/* Exfiltration buffer. */
#define MAX_VAR_SIZE 128
-static char check_buf[MAX_VAR_SIZE];
+static u8 check_buf[MAX_VAR_SIZE];
/* Character array to trigger stack protector in all functions. */
#define VAR_BUFFER 32
@@ -106,9 +106,18 @@ static noinline __init int test_ ## name (void) \
\
/* Fill clone type with zero for per-field init. */ \
memset(&zero, 0x00, sizeof(zero)); \
+ /* Clear entire check buffer for 0xFF overlap test. */ \
+ memset(check_buf, 0x00, sizeof(check_buf)); \
/* Fill stack with 0xFF. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 1, \
FETCH_ARG_ ## which(zero)); \
+ /* Verify all bytes overwritten with 0xFF. */ \
+ for (sum = 0, i = 0; i < target_size; i++) \
+ sum += (check_buf[i] != 0xFF); \
+ if (sum) { \
+ pr_err(#name ": leaf fill was not 0xFF!?\n"); \
+ return 1; \
+ } \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
@@ -126,9 +135,9 @@ static noinline __init int test_ ## name (void) \
return 1; \
} \
\
- /* Look for any set bits in the check region. */ \
- for (i = 0; i < sizeof(check_buf); i++) \
- sum += (check_buf[i] != 0); \
+ /* Look for any bytes still 0xFF in check region. */ \
+ for (sum = 0, i = 0; i < target_size; i++) \
+ sum += (check_buf[i] == 0xFF); \
\
if (sum == 0) \
pr_info(#name " ok\n"); \
@@ -162,13 +171,13 @@ static noinline __init int leaf_ ## name(unsigned long sp, \
* Keep this buffer around to make sure we've got a \
* stack frame of SOME kind... \
*/ \
- memset(buf, (char)(sp && 0xff), sizeof(buf)); \
+ memset(buf, (char)(sp & 0xff), sizeof(buf)); \
/* Fill variable with 0xFF. */ \
if (fill) { \
fill_start = &var; \
fill_size = sizeof(var); \
memset(fill_start, \
- (char)((sp && 0xff) | forced_mask), \
+ (char)((sp & 0xff) | forced_mask), \
fill_size); \
} \
\
diff --git a/lib/test_string.c b/lib/test_string.c
index bf8def01ed20..7b31f4a505bf 100644
--- a/lib/test_string.c
+++ b/lib/test_string.c
@@ -36,7 +36,7 @@ static __init int memset16_selftest(void)
fail:
kfree(p);
if (i < 256)
- return (i << 24) | (j << 16) | k;
+ return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
@@ -72,7 +72,7 @@ static __init int memset32_selftest(void)
fail:
kfree(p);
if (i < 256)
- return (i << 24) | (j << 16) | k;
+ return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
@@ -108,7 +108,74 @@ static __init int memset64_selftest(void)
fail:
kfree(p);
if (i < 256)
- return (i << 24) | (j << 16) | k;
+ return (i << 24) | (j << 16) | k | 0x8000;
+ return 0;
+}
+
+static __init int strchr_selftest(void)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ result = strchr(test_string, test_string[i]);
+ if (result - test_string != i)
+ return i + 'a';
+ }
+
+ result = strchr(empty_string, '\0');
+ if (result != empty_string)
+ return 0x101;
+
+ result = strchr(empty_string, 'a');
+ if (result)
+ return 0x102;
+
+ result = strchr(test_string, 'z');
+ if (result)
+ return 0x103;
+
+ return 0;
+}
+
+static __init int strnchr_selftest(void)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i, j;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ for (j = 0; j < strlen(test_string) + 2; j++) {
+ result = strnchr(test_string, j, test_string[i]);
+ if (j <= i) {
+ if (!result)
+ continue;
+ return ((i + 'a') << 8) | j;
+ }
+ if (result - test_string != i)
+ return ((i + 'a') << 8) | j;
+ }
+ }
+
+ result = strnchr(empty_string, 0, '\0');
+ if (result)
+ return 0x10001;
+
+ result = strnchr(empty_string, 1, '\0');
+ if (result != empty_string)
+ return 0x10002;
+
+ result = strnchr(empty_string, 1, 'a');
+ if (result)
+ return 0x10003;
+
+ result = strnchr(NULL, 0, '\0');
+ if (result)
+ return 0x10004;
+
return 0;
}
@@ -131,6 +198,16 @@ static __init int string_selftest_init(void)
if (subtest)
goto fail;
+ test = 4;
+ subtest = strchr_selftest();
+ if (subtest)
+ goto fail;
+
+ test = 5;
+ subtest = strnchr_selftest();
+ if (subtest)
+ goto fail;
+
pr_info("String selftests succeeded\n");
return 0;
fail:
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 5d4bad8bd96a..9d631a7b6a70 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -38,6 +38,12 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
return xa_store(xa, index, xa_mk_index(index), gfp);
}
+static void xa_insert_index(struct xarray *xa, unsigned long index)
+{
+ XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
+ GFP_KERNEL) != 0);
+}
+
static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
{
u32 id;
@@ -338,6 +344,37 @@ static noinline void check_xa_shrink(struct xarray *xa)
}
}
+static noinline void check_insert(struct xarray *xa)
+{
+ unsigned long i;
+
+ for (i = 0; i < 1024; i++) {
+ xa_insert_index(xa, i);
+ XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
+ xa_erase_index(xa, i);
+ }
+
+ for (i = 10; i < BITS_PER_LONG; i++) {
+ xa_insert_index(xa, 1UL << i);
+ XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
+ xa_erase_index(xa, 1UL << i);
+
+ xa_insert_index(xa, (1UL << i) - 1);
+ XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
+ xa_erase_index(xa, (1UL << i) - 1);
+ }
+
+ xa_insert_index(xa, ~0UL);
+ XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
+ xa_erase_index(xa, ~0UL);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
static noinline void check_cmpxchg(struct xarray *xa)
{
void *FIVE = xa_mk_value(5);
@@ -1527,6 +1564,7 @@ static int xarray_checks(void)
check_xa_mark(&array);
check_xa_shrink(&array);
check_xas_erase(&array);
+ check_insert(&array);
check_cmpxchg(&array);
check_reserve(&array);
check_reserve(&xa0);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index ecc179338094..e7d31735950d 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* UBSAN error reporting functions
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/bitops.h>
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
new file mode 100644
index 000000000000..cc00364bd2c2
--- /dev/null
+++ b/lib/vdso/Kconfig
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config HAVE_GENERIC_VDSO
+ bool
+
+if HAVE_GENERIC_VDSO
+
+config GENERIC_GETTIMEOFDAY
+ bool
+ help
+ This is a generic implementation of gettimeofday vdso.
+ Each architecture that enables this feature has to
+ provide the fallback implementation.
+
+config GENERIC_VDSO_32
+ bool
+ depends on GENERIC_GETTIMEOFDAY && !64BIT
+ help
+ This config option helps to avoid possible performance issues
+ in 32 bit only architectures.
+
+config GENERIC_COMPAT_VDSO
+ bool
+ help
+ This config option enables the compat VDSO layer.
+
+config CROSS_COMPILE_COMPAT_VDSO
+ string "32 bit Toolchain prefix for compat vDSO"
+ default ""
+ depends on GENERIC_COMPAT_VDSO
+ help
+ Defines the cross-compiler prefix for compiling compat vDSO.
+ If a 64 bit compiler (i.e. x86_64) can compile the VDSO for
+ 32 bit, it does not need to define this parameter.
+
+endif
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
new file mode 100644
index 000000000000..c415a685d61b
--- /dev/null
+++ b/lib/vdso/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+
+GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
+
+c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
+
+# This cmd checks that the vdso library does not contain absolute relocation
+# It has to be called after the linking of the vdso library and requires it
+# as a parameter.
+#
+# $(ARCH_REL_TYPE_ABS) is defined in the arch specific makefile and corresponds
+# to the absolute relocation types printed by "objdump -R" and accepted by the
+# dynamic linker.
+ifndef ARCH_REL_TYPE_ABS
+$(error ARCH_REL_TYPE_ABS is not set)
+endif
+
+quiet_cmd_vdso_check = VDSOCHK $@
+ cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \
+ then (echo >&2 "$@: dynamic relocations are not supported"; \
+ rm -f $@; /bin/false); fi
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
new file mode 100644
index 000000000000..2d1c1f241fd9
--- /dev/null
+++ b/lib/vdso/gettimeofday.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic userspace implementations of gettimeofday() and similar.
+ */
+#include <linux/compiler.h>
+#include <linux/math64.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/hrtimer_defs.h>
+#include <vdso/datapage.h>
+#include <vdso/helpers.h>
+
+/*
+ * The generic vDSO implementation requires that gettimeofday.h
+ * provides:
+ * - __arch_get_vdso_data(): to get the vdso datapage.
+ * - __arch_get_hw_counter(): to get the hw counter based on the
+ * clock_mode.
+ * - gettimeofday_fallback(): fallback for gettimeofday.
+ * - clock_gettime_fallback(): fallback for clock_gettime.
+ * - clock_getres_fallback(): fallback for clock_getres.
+ */
+#ifdef ENABLE_COMPAT_VDSO
+#include <asm/vdso/compat_gettimeofday.h>
+#else
+#include <asm/vdso/gettimeofday.h>
+#endif /* ENABLE_COMPAT_VDSO */
+
+#ifndef vdso_calc_delta
+/*
+ * Default implementation which works for all sane clocksources. That
+ * obviously excludes x86/TSC.
+ */
+static __always_inline
+u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+{
+ return ((cycles - last) & mask) * mult;
+}
+#endif
+
+static int do_hres(const struct vdso_data *vd, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ u64 cycles, last, sec, ns;
+ u32 seq;
+
+ do {
+ seq = vdso_read_begin(vd);
+ cycles = __arch_get_hw_counter(vd->clock_mode);
+ ns = vdso_ts->nsec;
+ last = vd->cycle_last;
+ if (unlikely((s64)cycles < 0))
+ return clock_gettime_fallback(clk, ts);
+
+ ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
+ ns >>= vd->shift;
+ sec = vdso_ts->sec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ /*
+ * Do this outside the loop: a race inside the loop could result
+ * in __iter_div_u64_rem() being extremely slow.
+ */
+ ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+static void do_coarse(const struct vdso_data *vd, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ u32 seq;
+
+ do {
+ seq = vdso_read_begin(vd);
+ ts->tv_sec = vdso_ts->sec;
+ ts->tv_nsec = vdso_ts->nsec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ const struct vdso_data *vd = __arch_get_vdso_data();
+ u32 msk;
+
+ /* Check for negative values or invalid clocks */
+ if (unlikely((u32) clock >= MAX_CLOCKS))
+ goto fallback;
+
+ /*
+ * Convert the clockid to a bitmask and use it to check which
+ * clocks are handled in the VDSO directly.
+ */
+ msk = 1U << clock;
+ if (likely(msk & VDSO_HRES)) {
+ return do_hres(&vd[CS_HRES_COARSE], clock, ts);
+ } else if (msk & VDSO_COARSE) {
+ do_coarse(&vd[CS_HRES_COARSE], clock, ts);
+ return 0;
+ } else if (msk & VDSO_RAW) {
+ return do_hres(&vd[CS_RAW], clock, ts);
+ }
+
+fallback:
+ return clock_gettime_fallback(clock, ts);
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
+{
+ struct __kernel_timespec ts;
+ int ret;
+
+ if (res == NULL)
+ goto fallback;
+
+ ret = __cvdso_clock_gettime(clock, &ts);
+
+ if (ret == 0) {
+ res->tv_sec = ts.tv_sec;
+ res->tv_nsec = ts.tv_nsec;
+ }
+
+ return ret;
+
+fallback:
+ return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
+}
+
+static __maybe_unused int
+__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ const struct vdso_data *vd = __arch_get_vdso_data();
+
+ if (likely(tv != NULL)) {
+ struct __kernel_timespec ts;
+
+ if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
+ return gettimeofday_fallback(tv, tz);
+
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
+ }
+
+ if (unlikely(tz != NULL)) {
+ tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
+ tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
+ }
+
+ return 0;
+}
+
+#ifdef VDSO_HAS_TIME
+static __maybe_unused time_t __cvdso_time(time_t *time)
+{
+ const struct vdso_data *vd = __arch_get_vdso_data();
+ time_t t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
+
+ if (time)
+ *time = t;
+
+ return t;
+}
+#endif /* VDSO_HAS_TIME */
+
+#ifdef VDSO_HAS_CLOCK_GETRES
+static __maybe_unused
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ const struct vdso_data *vd = __arch_get_vdso_data();
+ u64 ns;
+ u32 msk;
+ u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+
+ /* Check for negative values or invalid clocks */
+ if (unlikely((u32) clock >= MAX_CLOCKS))
+ goto fallback;
+
+ /*
+ * Convert the clockid to a bitmask and use it to check which
+ * clocks are handled in the VDSO directly.
+ */
+ msk = 1U << clock;
+ if (msk & VDSO_HRES) {
+ /*
+ * Preserves the behaviour of posix_get_hrtimer_res().
+ */
+ ns = hrtimer_res;
+ } else if (msk & VDSO_COARSE) {
+ /*
+ * Preserves the behaviour of posix_get_coarse_res().
+ */
+ ns = LOW_RES_NSEC;
+ } else if (msk & VDSO_RAW) {
+ /*
+ * Preserves the behaviour of posix_get_hrtimer_res().
+ */
+ ns = hrtimer_res;
+ } else {
+ goto fallback;
+ }
+
+ if (res) {
+ res->tv_sec = 0;
+ res->tv_nsec = ns;
+ }
+
+ return 0;
+
+fallback:
+ return clock_getres_fallback(clock, res);
+}
+
+static __maybe_unused int
+__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+{
+ struct __kernel_timespec ts;
+ int ret;
+
+ if (res == NULL)
+ goto fallback;
+
+ ret = __cvdso_clock_getres(clock, &ts);
+
+ if (ret == 0) {
+ res->tv_sec = ts.tv_sec;
+ res->tv_nsec = ts.tv_nsec;
+ }
+
+ return ret;
+
+fallback:
+ return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
+}
+#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 63937044c57d..b0967cf17137 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -599,7 +599,7 @@ static char *string_nocheck(char *buf, char *end, const char *s,
struct printf_spec spec)
{
int len = 0;
- size_t lim = spec.precision;
+ int lim = spec.precision;
while (lim--) {
char c = *s++;
@@ -1799,7 +1799,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
- return error_string(buf, end, "(%pC?)", spec);
+ return ptr_to_id(buf, end, clk, spec);
#endif
}
}
diff --git a/lib/xarray.c b/lib/xarray.c
index 6be3acbb861f..446b956c9188 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -298,6 +298,8 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
xas_destroy(xas);
return false;
}
+ if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
+ gfp |= __GFP_ACCOUNT;
xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
if (!xas->xa_alloc)
return false;
@@ -325,6 +327,8 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
xas_destroy(xas);
return false;
}
+ if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
+ gfp |= __GFP_ACCOUNT;
if (gfpflags_allow_blocking(gfp)) {
xas_unlock_type(xas, lock_type);
xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -358,8 +362,12 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
if (node) {
xas->xa_alloc = NULL;
} else {
- node = kmem_cache_alloc(radix_tree_node_cachep,
- GFP_NOWAIT | __GFP_NOWARN);
+ gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
+
+ if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
+ gfp |= __GFP_ACCOUNT;
+
+ node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
if (!node) {
xas_set_err(xas, -ENOMEM);
return NULL;