diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 1 | ||||
-rw-r--r-- | lib/Kconfig.kasan | 8 | ||||
-rw-r--r-- | lib/cpumask.c | 74 | ||||
-rw-r--r-- | lib/find_last_bit.c | 41 | ||||
-rw-r--r-- | lib/rhashtable.c | 11 | ||||
-rw-r--r-- | lib/string.c | 2 |
6 files changed, 45 insertions, 92 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 17670573dda8..ba2b0c87e65b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1281,6 +1281,7 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY int "How much to slow down RCU grace-period initialization" range 0 5 default 3 + depends on RCU_TORTURE_TEST_SLOW_INIT help This option specifies the number of jiffies to wait between each rcu_node structure initialization. diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 4fecaedc80a2..777eda7d1ab4 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -10,8 +10,11 @@ config KASAN help Enables kernel address sanitizer - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. - This is strictly debugging feature. It consumes about 1/8 - of available memory and brings about ~x3 performance slowdown. + This is strictly a debugging feature and it requires a gcc version + of 4.9.2 or later. Detection of out of bounds accesses to stack or + global variables requires gcc 5.0 or later. + This feature consumes about 1/8 of available memory and brings about + ~x3 performance slowdown. For better error detection enable CONFIG_STACKTRACE, and add slub_debug=U to boot cmdline. @@ -40,6 +43,7 @@ config KASAN_INLINE memory accesses. This is faster than outline (in some workloads it gives about x2 boost over outline instrumentation), but make kernel's .text size much bigger. + This requires a gcc version of 5.0 or later. endchoice diff --git a/lib/cpumask.c b/lib/cpumask.c index 830dd5dec40f..5f627084f2e9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) #endif /** - * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first - * + * cpumask_local_spread - select the i'th cpu with local numa cpu's first * @i: index number - * @numa_node: local numa_node - * @dstp: cpumask with the relevant cpu bit set according to the policy + * @node: local numa_node * - * This function sets the cpumask according to a numa aware policy. - * cpumask could be used as an affinity hint for the IRQ related to a - * queue. When the policy is to spread queues across cores - local cores - * first. + * This function selects an online CPU according to a numa aware policy; + * local cpus are returned first, followed by non-local ones, then it + * wraps around. * - * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set - * the cpu bit and need to re-call the function. + * It's not very efficient, but useful for setup. */ -int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) +unsigned int cpumask_local_spread(unsigned int i, int node) { - cpumask_var_t mask; int cpu; - int ret = 0; - - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) - return -ENOMEM; + /* Wrap: we always want a cpu. */ i %= num_online_cpus(); - if (numa_node == -1 || !cpumask_of_node(numa_node)) { - /* Use all online cpu's for non numa aware system */ - cpumask_copy(mask, cpu_online_mask); + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; } else { - int n; - - cpumask_and(mask, - cpumask_of_node(numa_node), cpu_online_mask); - - n = cpumask_weight(mask); - if (i >= n) { - i -= n; - - /* If index > number of local cpu's, mask out local - * cpu's - */ - cpumask_andnot(mask, cpu_online_mask, mask); + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; } } - - for_each_cpu(cpu, mask) { - if (--i < 0) - goto out; - } - - ret = -EAGAIN; - -out: - free_cpumask_var(mask); - - if (!ret) - cpumask_set_cpu(cpu, dstp); - - return ret; + BUG(); } -EXPORT_SYMBOL(cpumask_set_cpu_local_first); +EXPORT_SYMBOL(cpumask_local_spread); diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c deleted file mode 100644 index 3e3be40c6a6e..000000000000 --- a/lib/find_last_bit.c +++ /dev/null @@ -1,41 +0,0 @@ -/* find_last_bit.c: fallback find next bit implementation - * - * Copyright (C) 2008 IBM Corporation - * Written by Rusty Russell <rusty@rustcorp.com.au> - * (Inspired by David Howell's find_next_bit implementation) - * - * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease - * size and improve performance, 2015. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/bitops.h> -#include <linux/bitmap.h> -#include <linux/export.h> -#include <linux/kernel.h> - -#ifndef find_last_bit - -unsigned long find_last_bit(const unsigned long *addr, unsigned long size) -{ - if (size) { - unsigned long val = BITMAP_LAST_WORD_MASK(size); - unsigned long idx = (size-1) / BITS_PER_LONG; - - do { - val &= addr[idx]; - if (val) - return idx * BITS_PER_LONG + __fls(val); - - val = ~0ul; - } while (idx--); - } - return size; -} -EXPORT_SYMBOL(find_last_bit); - -#endif diff --git a/lib/rhashtable.c b/lib/rhashtable.c index b28df4019ade..4396434e4715 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -14,6 +14,7 @@ * published by the Free Software Foundation. */ +#include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> @@ -446,6 +447,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, if (key && rhashtable_lookup_fast(ht, key, ht->p)) goto exit; + err = -E2BIG; + if (unlikely(rht_grow_above_max(ht, tbl))) + goto exit; + err = -EAGAIN; if (rhashtable_check_elasticity(ht, tbl, hash) || rht_grow_above_100(ht, tbl)) @@ -738,6 +743,12 @@ int rhashtable_init(struct rhashtable *ht, if (params->max_size) ht->p.max_size = rounddown_pow_of_two(params->max_size); + if (params->insecure_max_entries) + ht->p.insecure_max_entries = + rounddown_pow_of_two(params->insecure_max_entries); + else + ht->p.insecure_max_entries = ht->p.max_size * 2; + ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); /* The maximum (not average) chain length grows with the diff --git a/lib/string.c b/lib/string.c index a5792019193c..bb3d4b6993c4 100644 --- a/lib/string.c +++ b/lib/string.c @@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset); void memzero_explicit(void *s, size_t count) { memset(s, 0, count); - barrier(); + barrier_data(s); } EXPORT_SYMBOL(memzero_explicit); |