diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 9 | ||||
-rw-r--r-- | lib/Kconfig.debug | 7 | ||||
-rw-r--r-- | lib/Makefile | 5 | ||||
-rw-r--r-- | lib/bitrev.c | 17 | ||||
-rw-r--r-- | lib/checksum.c | 12 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 2 | ||||
-rw-r--r-- | lib/iovec.c | 87 | ||||
-rw-r--r-- | lib/mpi/mpi-cmp.c | 10 | ||||
-rw-r--r-- | lib/mpi/mpi-internal.h | 2 | ||||
-rw-r--r-- | lib/rhashtable.c | 1170 | ||||
-rw-r--r-- | lib/test_rhashtable.c | 227 |
11 files changed, 1012 insertions, 536 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 54cf309a92a5..cd177caf3876 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -13,6 +13,15 @@ config RAID6_PQ config BITREVERSE tristate +config HAVE_ARCH_BITREVERSE + boolean + default n + depends on BITREVERSE + help + This option provides an config for the architecture which have instruction + can do bitreverse operation, we use the hardware instruction if the architecture + have this capability. + config RATIONAL boolean diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5f2ce616c046..e5ea3ab856bf 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -636,7 +636,7 @@ config DEBUG_STACKOVERFLOW depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW ---help--- Say Y here if you want to check for overflows of kernel, IRQ - and exception stacks (if your archicture uses them). This + and exception stacks (if your architecture uses them). This option will show detailed messages if free stack space drops below a certain limit. @@ -1215,6 +1215,7 @@ config RCU_TORTURE_TEST tristate "torture tests for RCU" depends on DEBUG_KERNEL select TORTURE_TEST + select SRCU default n help This option provides a kernel module that runs torture tests @@ -1257,7 +1258,7 @@ config RCU_CPU_STALL_TIMEOUT config RCU_CPU_STALL_INFO bool "Print additional diagnostics on RCU CPU stall" depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL - default n + default y help For each stalled CPU that is aware of the current RCU grace period, print out additional per-CPU diagnostic information @@ -1586,7 +1587,7 @@ config TEST_KSTRTOX tristate "Test kstrto*() family of functions at runtime" config TEST_RHASHTABLE - bool "Perform selftest on resizable hash table" + tristate "Perform selftest on resizable hash table" default n help Enable this option to test the rhashtable functions at boot. diff --git a/lib/Makefile b/lib/Makefile index 3c3b30b9e020..25c061f77df7 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -4,7 +4,7 @@ ifdef CONFIG_FUNCTION_TRACER ORIG_CFLAGS := $(KBUILD_CFLAGS) -KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) +KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ @@ -24,7 +24,7 @@ obj-y += lockref.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ + gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o @@ -35,6 +35,7 @@ obj-$(CONFIG_TEST_LKM) += test_module.o obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o obj-$(CONFIG_TEST_BPF) += test_bpf.o obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o +obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/bitrev.c b/lib/bitrev.c index 3956203456d4..40ffda94cc5d 100644 --- a/lib/bitrev.c +++ b/lib/bitrev.c @@ -1,3 +1,4 @@ +#ifndef CONFIG_HAVE_ARCH_BITREVERSE #include <linux/types.h> #include <linux/module.h> #include <linux/bitrev.h> @@ -42,18 +43,4 @@ const u8 byte_rev_table[256] = { }; EXPORT_SYMBOL_GPL(byte_rev_table); -u16 bitrev16(u16 x) -{ - return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); -} -EXPORT_SYMBOL(bitrev16); - -/** - * bitrev32 - reverse the order of bits in a u32 value - * @x: value to be bit-reversed - */ -u32 bitrev32(u32 x) -{ - return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16); -} -EXPORT_SYMBOL(bitrev32); +#endif /* CONFIG_HAVE_ARCH_BITREVERSE */ diff --git a/lib/checksum.c b/lib/checksum.c index 129775eb6de6..8b39e86dbab5 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum) EXPORT_SYMBOL(csum_partial_copy); #ifndef csum_tcpudp_nofold +static inline u32 from64to32(u64 x) +{ + /* add up 32-bit and 32-bit for 32+c bit */ + x = (x & 0xffffffff) + (x >> 32); + /* add up carry.. */ + x = (x & 0xffffffff) + (x >> 32); + return (u32)x; +} + __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, @@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, #else s += (proto + len) << 8; #endif - s += (s >> 32); - return (__force __wsum)s; + return (__force __wsum)from64to32(s); } EXPORT_SYMBOL(csum_tcpudp_nofold); #endif diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 527799d44476..d8f3d3150603 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -641,7 +641,7 @@ static __init int ddebug_setup_query(char *str) __setup("ddebug_query=", ddebug_setup_query); /* - * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the + * File_ops->write method for <debugfs>/dynamic_debug/control. Gathers the * command text from userspace, parses and executes it. */ #define USER_BUF_PAGE 4096 diff --git a/lib/iovec.c b/lib/iovec.c deleted file mode 100644 index 2d99cb4a5006..000000000000 --- a/lib/iovec.c +++ /dev/null @@ -1,87 +0,0 @@ -#include <linux/uaccess.h> -#include <linux/export.h> -#include <linux/uio.h> - -/* - * Copy iovec to kernel. Returns -EFAULT on error. - * - * Note: this modifies the original iovec. - */ - -int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) -{ - while (len > 0) { - if (iov->iov_len) { - int copy = min_t(unsigned int, len, iov->iov_len); - if (copy_from_user(kdata, iov->iov_base, copy)) - return -EFAULT; - len -= copy; - kdata += copy; - iov->iov_base += copy; - iov->iov_len -= copy; - } - iov++; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_fromiovec); - -/* - * Copy kernel to iovec. Returns -EFAULT on error. - */ - -int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, - int offset, int len) -{ - int copy; - for (; len > 0; ++iov) { - /* Skip over the finished iovecs */ - if (unlikely(offset >= iov->iov_len)) { - offset -= iov->iov_len; - continue; - } - copy = min_t(unsigned int, iov->iov_len - offset, len); - if (copy_to_user(iov->iov_base + offset, kdata, copy)) - return -EFAULT; - offset = 0; - kdata += copy; - len -= copy; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_toiovecend); - -/* - * Copy iovec to kernel. Returns -EFAULT on error. - */ - -int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, - int offset, int len) -{ - /* No data? Done! */ - if (len == 0) - return 0; - - /* Skip over the finished iovecs */ - while (offset >= iov->iov_len) { - offset -= iov->iov_len; - iov++; - } - - while (len > 0) { - u8 __user *base = iov->iov_base + offset; - int copy = min_t(unsigned int, len, iov->iov_len - offset); - - offset = 0; - if (copy_from_user(kdata, base, copy)) - return -EFAULT; - len -= copy; - kdata += copy; - iov++; - } - - return 0; -} -EXPORT_SYMBOL(memcpy_fromiovecend); diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c index 1871e7b61ca0..d25e9e96c310 100644 --- a/lib/mpi/mpi-cmp.c +++ b/lib/mpi/mpi-cmp.c @@ -57,14 +57,12 @@ int mpi_cmp(MPI u, MPI v) if (usize != vsize && !u->sign && !v->sign) return usize - vsize; if (usize != vsize && u->sign && v->sign) - return vsize + usize; + return vsize - usize; if (!usize) return 0; cmp = mpihelp_cmp(u->d, v->d, usize); - if (!cmp) - return 0; - if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0)) - return 1; - return -1; + if (u->sign) + return -cmp; + return cmp; } EXPORT_SYMBOL_GPL(mpi_cmp); diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h index 60cf765628e9..c65dd1bff45a 100644 --- a/lib/mpi/mpi-internal.h +++ b/lib/mpi/mpi-internal.h @@ -84,7 +84,7 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) do { \ mpi_size_t _i; \ for (_i = 0; _i < (n); _i++) \ - (d)[_i] = (d)[_i]; \ + (d)[_i] = (s)[_i]; \ } while (0) #define MPN_COPY_DECR(d, s, n) \ diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6c3c723e902b..9cc4c4a90d00 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -1,7 +1,7 @@ /* * Resizable, Scalable, Concurrent Hash Table * - * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> + * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * * Based on the following paper: @@ -23,94 +23,203 @@ #include <linux/jhash.h> #include <linux/random.h> #include <linux/rhashtable.h> +#include <linux/err.h> #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4UL +#define BUCKET_LOCKS_PER_CPU 128UL -#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) +/* Base bits plus 1 bit for nulls marker */ +#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) -#ifdef CONFIG_PROVE_LOCKING -int lockdep_rht_mutex_is_held(const struct rhashtable *ht) +enum { + RHT_LOCK_NORMAL, + RHT_LOCK_NESTED, +}; + +/* The bucket lock is selected based on the hash and protects mutations + * on a group of hash buckets. + * + * A maximum of tbl->size/2 bucket locks is allocated. This ensures that + * a single lock always covers both buckets which may both contains + * entries which link to the same bucket of the old table during resizing. + * This allows to simplify the locking as locking the bucket in both + * tables during resize always guarantee protection. + * + * IMPORTANT: When holding the bucket lock of both the old and new table + * during expansions and shrinking, the old bucket lock must always be + * acquired first. + */ +static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) { - return ht->p.mutex_is_held(ht->p.parent); + return &tbl->locks[hash & tbl->locks_mask]; } -EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); -#endif static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) { return (void *) he - ht->p.head_offset; } -static u32 __hashfn(const struct rhashtable *ht, const void *key, - u32 len, u32 hsize) +static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) +{ + return hash & (tbl->size - 1); +} + +static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) { - u32 h; + u32 hash; - h = ht->p.hashfn(key, len, ht->p.hash_rnd); + if (unlikely(!ht->p.key_len)) + hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); + else + hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, + ht->p.hash_rnd); - return h & (hsize - 1); + return hash >> HASH_RESERVED_SPACE; } -/** - * rhashtable_hashfn - compute hash for key of given length - * @ht: hash table to compute for - * @key: pointer to key - * @len: length of key - * - * Computes the hash value using the hash function provided in the 'hashfn' - * of struct rhashtable_params. The returned value is guaranteed to be - * smaller than the number of buckets in the hash table. - */ -u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len) +static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) { - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE; +} - return __hashfn(ht, key, len, tbl->size); +static u32 head_hashfn(const struct rhashtable *ht, + const struct bucket_table *tbl, + const struct rhash_head *he) +{ + return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); } -EXPORT_SYMBOL_GPL(rhashtable_hashfn); -static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize) +#ifdef CONFIG_PROVE_LOCKING +static void debug_dump_buckets(const struct rhashtable *ht, + const struct bucket_table *tbl) { - if (unlikely(!ht->p.key_len)) { - u32 h; + struct rhash_head *he; + unsigned int i, hash; - h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); + for (i = 0; i < tbl->size; i++) { + pr_warn(" [Bucket %d] ", i); + rht_for_each_rcu(he, tbl, i) { + hash = head_hashfn(ht, tbl, he); + pr_cont("[hash = %#x, lock = %p] ", + hash, bucket_lock(tbl, hash)); + } + pr_cont("\n"); + } + +} + +static void debug_dump_table(struct rhashtable *ht, + const struct bucket_table *tbl, + unsigned int hash) +{ + struct bucket_table *old_tbl, *future_tbl; + + pr_emerg("BUG: lock for hash %#x in table %p not held\n", + hash, tbl); - return h & (hsize - 1); + rcu_read_lock(); + future_tbl = rht_dereference_rcu(ht->future_tbl, ht); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + if (future_tbl != old_tbl) { + pr_warn("Future table %p (size: %zd)\n", + future_tbl, future_tbl->size); + debug_dump_buckets(ht, future_tbl); } - return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize); + pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size); + debug_dump_buckets(ht, old_tbl); + + rcu_read_unlock(); } -/** - * rhashtable_obj_hashfn - compute hash for hashed object - * @ht: hash table to compute for - * @ptr: pointer to hashed object - * - * Computes the hash value using the hash function `hashfn` respectively - * 'obj_hashfn' depending on whether the hash table is set up to work with - * a fixed length key. The returned value is guaranteed to be smaller than - * the number of buckets in the hash table. - */ -u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr) +#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) +#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \ + do { \ + if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \ + debug_dump_table(HT, TBL, HASH); \ + BUG(); \ + } \ + } while (0) + +int lockdep_rht_mutex_is_held(struct rhashtable *ht) { - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; +} +EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); - return obj_hashfn(ht, ptr, tbl->size); +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) +{ + spinlock_t *lock = bucket_lock(tbl, hash); + + return (debug_locks) ? lockdep_is_held(lock) : 1; } -EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn); +EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); +#else +#define ASSERT_RHT_MUTEX(HT) +#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) +#endif -static u32 head_hashfn(const struct rhashtable *ht, - const struct rhash_head *he, u32 hsize) + +static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) { - return obj_hashfn(ht, rht_obj(ht, he), hsize); + struct rhash_head __rcu **pprev; + + for (pprev = &tbl->buckets[n]; + !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); + pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) + ; + + return pprev; } -static struct bucket_table *bucket_table_alloc(size_t nbuckets) +static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) +{ + unsigned int i, size; +#if defined(CONFIG_PROVE_LOCKING) + unsigned int nr_pcpus = 2; +#else + unsigned int nr_pcpus = num_possible_cpus(); +#endif + + nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); + size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); + + /* Never allocate more than 0.5 locks per bucket */ + size = min_t(unsigned int, size, tbl->size >> 1); + + if (sizeof(spinlock_t) != 0) { +#ifdef CONFIG_NUMA + if (size * sizeof(spinlock_t) > PAGE_SIZE) + tbl->locks = vmalloc(size * sizeof(spinlock_t)); + else +#endif + tbl->locks = kmalloc_array(size, sizeof(spinlock_t), + GFP_KERNEL); + if (!tbl->locks) + return -ENOMEM; + for (i = 0; i < size; i++) + spin_lock_init(&tbl->locks[i]); + } + tbl->locks_mask = size - 1; + + return 0; +} + +static void bucket_table_free(const struct bucket_table *tbl) +{ + if (tbl) + kvfree(tbl->locks); + + kvfree(tbl); +} + +static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, + size_t nbuckets) { struct bucket_table *tbl; size_t size; + int i; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); @@ -122,12 +231,15 @@ static struct bucket_table *bucket_table_alloc(size_t nbuckets) tbl->size = nbuckets; - return tbl; -} + if (alloc_bucket_locks(ht, tbl) < 0) { + bucket_table_free(tbl); + return NULL; + } -static void bucket_table_free(const struct bucket_table *tbl) -{ - kvfree(tbl); + for (i = 0; i < nbuckets; i++) + INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); + + return tbl; } /** @@ -138,7 +250,8 @@ static void bucket_table_free(const struct bucket_table *tbl) bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) { /* Expand table when exceeding 75% load */ - return ht->nelems > (new_size / 4 * 3); + return atomic_read(&ht->nelems) > (new_size / 4 * 3) && + (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); } EXPORT_SYMBOL_GPL(rht_grow_above_75); @@ -150,41 +263,75 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) { /* Shrink table beneath 30% load */ - return ht->nelems < (new_size * 3 / 10); + return atomic_read(&ht->nelems) < (new_size * 3 / 10) && + (atomic_read(&ht->shift) > ht->p.min_shift); } EXPORT_SYMBOL_GPL(rht_shrink_below_30); -static void hashtable_chain_unzip(const struct rhashtable *ht, +static void lock_buckets(struct bucket_table *new_tbl, + struct bucket_table *old_tbl, unsigned int hash) + __acquires(old_bucket_lock) +{ + spin_lock_bh(bucket_lock(old_tbl, hash)); + if (new_tbl != old_tbl) + spin_lock_bh_nested(bucket_lock(new_tbl, hash), + RHT_LOCK_NESTED); +} + +static void unlock_buckets(struct bucket_table *new_tbl, + struct bucket_table *old_tbl, unsigned int hash) + __releases(old_bucket_lock) +{ + if (new_tbl != old_tbl) + spin_unlock_bh(bucket_lock(new_tbl, hash)); + spin_unlock_bh(bucket_lock(old_tbl, hash)); +} + +/** + * Unlink entries on bucket which hash to different bucket. + * + * Returns true if no more work needs to be performed on the bucket. + */ +static bool hashtable_chain_unzip(struct rhashtable *ht, const struct bucket_table *new_tbl, - struct bucket_table *old_tbl, size_t n) + struct bucket_table *old_tbl, + size_t old_hash) { struct rhash_head *he, *p, *next; - unsigned int h; + unsigned int new_hash, new_hash2; + + ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash); /* Old bucket empty, no work needed. */ - p = rht_dereference(old_tbl->buckets[n], ht); - if (!p) - return; + p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, + old_hash); + if (rht_is_a_nulls(p)) + return false; + + new_hash = head_hashfn(ht, new_tbl, p); + ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); /* Advance the old bucket pointer one or more times until it * reaches a node that doesn't hash to the same bucket as the * previous node p. Call the previous node p; */ - h = head_hashfn(ht, p, new_tbl->size); - rht_for_each(he, p->next, ht) { - if (head_hashfn(ht, he, new_tbl->size) != h) + rht_for_each_continue(he, p->next, old_tbl, old_hash) { + new_hash2 = head_hashfn(ht, new_tbl, he); + ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2); + + if (new_hash != new_hash2) break; p = he; } - RCU_INIT_POINTER(old_tbl->buckets[n], p->next); + rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); /* Find the subsequent node which does hash to the same * bucket as node P, or NULL if no such node exists. */ - next = NULL; - if (he) { - rht_for_each(he, he->next, ht) { - if (head_hashfn(ht, he, new_tbl->size) == h) { + INIT_RHT_NULLS_HEAD(next, ht, old_hash); + if (!rht_is_a_nulls(he)) { + rht_for_each_continue(he, he->next, old_tbl, old_hash) { + if (head_hashfn(ht, new_tbl, he) == new_hash) { next = he; break; } @@ -194,7 +341,20 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /* Set p's next pointer to that subsequent node pointer, * bypassing the nodes which do not hash to p's bucket */ - RCU_INIT_POINTER(p->next, next); + rcu_assign_pointer(p->next, next); + + p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, + old_hash); + + return !rht_is_a_nulls(p); +} + +static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl, + unsigned int new_hash, struct rhash_head *entry) +{ + ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); + + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); } /** @@ -207,53 +367,57 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. * - * The caller must ensure that no concurrent table mutations take place. - * It is however valid to have concurrent lookups if they are RCU protected. + * The caller must ensure that no concurrent resizing occurs by holding + * ht->mutex. + * + * It is valid to have concurrent insertions and deletions protected by per + * bucket locks or concurrent RCU protected lookups and traversals. */ int rhashtable_expand(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_head *he; - unsigned int i, h; - bool complete; + unsigned int new_hash, old_hash; + bool complete = false; ASSERT_RHT_MUTEX(ht); - if (ht->p.max_shift && ht->shift >= ht->p.max_shift) - return 0; - - new_tbl = bucket_table_alloc(old_tbl->size * 2); + new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); if (new_tbl == NULL) return -ENOMEM; - ht->shift++; + atomic_inc(&ht->shift); + + /* Make insertions go into the new, empty table right away. Deletions + * and lookups will be attempted in both tables until we synchronize. + * The synchronize_rcu() guarantees for the new table to be picked up + * so no new additions go into the old table while we relink. + */ + rcu_assign_pointer(ht->future_tbl, new_tbl); + synchronize_rcu(); - /* For each new bucket, search the corresponding old bucket - * for the first entry that hashes to the new bucket, and - * link the new bucket to that entry. Since all the entries - * which will end up in the new bucket appear in the same - * old bucket, this constructs an entirely valid new hash - * table, but with multiple buckets "zipped" together into a - * single imprecise chain. + /* For each new bucket, search the corresponding old bucket for the + * first entry that hashes to the new bucket, and link the end of + * newly formed bucket chain (containing entries added to future + * table) to that entry. Since all the entries which will end up in + * the new bucket appear in the same old bucket, this constructs an + * entirely valid new hash table, but with multiple buckets + * "zipped" together into a single imprecise chain. */ - for (i = 0; i < new_tbl->size; i++) { - h = i & (old_tbl->size - 1); - rht_for_each(he, old_tbl->buckets[h], ht) { - if (head_hashfn(ht, he, new_tbl->size) == i) { - RCU_INIT_POINTER(new_tbl->buckets[i], he); + for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { + old_hash = rht_bucket_index(old_tbl, new_hash); + lock_buckets(new_tbl, old_tbl, new_hash); + rht_for_each(he, old_tbl, old_hash) { + if (head_hashfn(ht, new_tbl, he) == new_hash) { + link_old_to_new(ht, new_tbl, new_hash, he); break; } } + unlock_buckets(new_tbl, old_tbl, new_hash); } - /* Publish the new table pointer. Lookups may now traverse - * the new table, but they will not benefit from any - * additional efficiency until later steps unzip the buckets. - */ - rcu_assign_pointer(ht->tbl, new_tbl); - /* Unzip interleaved hash chains */ - do { + while (!complete && !ht->being_destroyed) { /* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. @@ -265,12 +429,19 @@ int rhashtable_expand(struct rhashtable *ht) * table): ... */ complete = true; - for (i = 0; i < old_tbl->size; i++) { - hashtable_chain_unzip(ht, new_tbl, old_tbl, i); - if (old_tbl->buckets[i] != NULL) + for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { + lock_buckets(new_tbl, old_tbl, old_hash); + + if (hashtable_chain_unzip(ht, new_tbl, old_tbl, + old_hash)) complete = false; + + unlock_buckets(new_tbl, old_tbl, old_hash); } - } while (!complete); + } + + rcu_assign_pointer(ht->tbl, new_tbl); + synchronize_rcu(); bucket_table_free(old_tbl); return 0; @@ -284,45 +455,51 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. * + * The caller must ensure that no concurrent resizing occurs by holding + * ht->mutex. + * * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. + * + * It is valid to have concurrent insertions and deletions protected by per + * bucket locks or concurrent RCU protected lookups and traversals. */ int rhashtable_shrink(struct rhashtable *ht) { - struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); - struct rhash_head __rcu **pprev; - unsigned int i; + struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); + unsigned int new_hash; ASSERT_RHT_MUTEX(ht); - if (ht->shift <= ht->p.min_shift) - return 0; - - ntbl = bucket_table_alloc(tbl->size / 2); - if (ntbl == NULL) + new_tbl = bucket_table_alloc(ht, tbl->size / 2); + if (new_tbl == NULL) return -ENOMEM; - ht->shift--; + rcu_assign_pointer(ht->future_tbl, new_tbl); + synchronize_rcu(); - /* Link each bucket in the new table to the first bucket - * in the old table that contains entries which will hash - * to the new bucket. + /* Link the first entry in the old bucket to the end of the + * bucket in the new table. As entries are concurrently being + * added to the new table, lock down the new bucket. As we + * always divide the size in half when shrinking, each bucket + * in the new table maps to exactly two buckets in the old + * table. */ - for (i = 0; i < ntbl->size; i++) { - ntbl->buckets[i] = tbl->buckets[i]; + for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { + lock_buckets(new_tbl, tbl, new_hash); - /* Link each bucket in the new table to the first bucket - * in the old table that contains entries which will hash - * to the new bucket. - */ - for (pprev = &ntbl->buckets[i]; *pprev != NULL; - pprev = &rht_dereference(*pprev, ht)->next) - ; - RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), + tbl->buckets[new_hash]); + ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size); + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), + tbl->buckets[new_hash + new_tbl->size]); + + unlock_buckets(new_tbl, tbl, new_hash); } /* Publish the new, valid hash table */ - rcu_assign_pointer(ht->tbl, ntbl); + rcu_assign_pointer(ht->tbl, new_tbl); + atomic_dec(&ht->shift); /* Wait for readers. No new readers will have references to the * old hash table. @@ -335,59 +512,99 @@ int rhashtable_shrink(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_shrink); -/** - * rhashtable_insert - insert object into hash hash table - * @ht: hash table - * @obj: pointer to hash head inside object - * - * Will automatically grow the table via rhashtable_expand() if the the - * grow_decision function specified at rhashtable_init() returns true. - * - * The caller must ensure that no concurrent table mutations occur. It is - * however valid to have concurrent lookups if they are RCU protected. - */ -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) +static void rht_deferred_worker(struct work_struct *work) { - struct bucket_table *tbl = rht_dereference(ht->tbl, ht); - u32 hash; + struct rhashtable *ht; + struct bucket_table *tbl; + struct rhashtable_walker *walker; - ASSERT_RHT_MUTEX(ht); + ht = container_of(work, struct rhashtable, run_work); + mutex_lock(&ht->mutex); + if (ht->being_destroyed) + goto unlock; - hash = head_hashfn(ht, obj, tbl->size); - RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); - rcu_assign_pointer(tbl->buckets[hash], obj); - ht->nelems++; + tbl = rht_dereference(ht->tbl, ht); + + list_for_each_entry(walker, &ht->walkers, list) + walker->resize = true; if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) rhashtable_expand(ht); + else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) + rhashtable_shrink(ht); + +unlock: + mutex_unlock(&ht->mutex); +} + +static void rhashtable_wakeup_worker(struct rhashtable *ht) +{ + struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + size_t size = tbl->size; + + /* Only adjust the table if no resizing is currently in progress. */ + if (tbl == new_tbl && + ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || + (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) + schedule_work(&ht->run_work); +} + +static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, + struct bucket_table *tbl, u32 hash) +{ + struct rhash_head *head; + + hash = rht_bucket_index(tbl, hash); + head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + + ASSERT_BUCKET_LOCK(ht, tbl, hash); + + if (rht_is_a_nulls(head)) + INIT_RHT_NULLS_HEAD(obj->next, ht, hash); + else + RCU_INIT_POINTER(obj->next, head); + + rcu_assign_pointer(tbl->buckets[hash], obj); + + atomic_inc(&ht->nelems); + + rhashtable_wakeup_worker(ht); } -EXPORT_SYMBOL_GPL(rhashtable_insert); /** - * rhashtable_remove_pprev - remove object from hash table given previous element + * rhashtable_insert - insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object - * @pprev: pointer to previous element * - * Identical to rhashtable_remove() but caller is alreayd aware of the element - * in front of the element to be deleted. This is in particular useful for - * deletion when combined with walking or lookup. + * Will take a per bucket spinlock to protect against mutual mutations + * on the same bucket. Multiple insertions may occur in parallel unless + * they map to the same bucket lock. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). */ -void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev) +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *tbl, *old_tbl; + unsigned hash; - ASSERT_RHT_MUTEX(ht); + rcu_read_lock(); - RCU_INIT_POINTER(*pprev, obj->next); - ht->nelems--; + tbl = rht_dereference_rcu(ht->future_tbl, ht); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); - if (ht->p.shrink_decision && - ht->p.shrink_decision(ht, tbl->size)) - rhashtable_shrink(ht); + lock_buckets(tbl, old_tbl, hash); + __rhashtable_insert(ht, obj, tbl, hash); + unlock_buckets(tbl, old_tbl, hash); + + rcu_read_unlock(); } -EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); +EXPORT_SYMBOL_GPL(rhashtable_insert); /** * rhashtable_remove - remove object from hash table @@ -398,7 +615,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); * walk the bucket chain upon removal. The removal operation is thus * considerable slow if the hash table is not correctly sized. * - * Will automatically shrink the table via rhashtable_expand() if the the + * Will automatically shrink the table via rhashtable_expand() if the * shrink_decision function specified at rhashtable_init() returns true. * * The caller must ensure that no concurrent table mutations occur. It is @@ -406,30 +623,87 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); */ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *tbl, *new_tbl, *old_tbl; struct rhash_head __rcu **pprev; - struct rhash_head *he; - u32 h; + struct rhash_head *he, *he2; + unsigned int hash, new_hash; + bool ret = false; - ASSERT_RHT_MUTEX(ht); - - h = head_hashfn(ht, obj, tbl->size); - - pprev = &tbl->buckets[h]; - rht_for_each(he, tbl->buckets[h], ht) { + rcu_read_lock(); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); + + lock_buckets(new_tbl, old_tbl, new_hash); +restart: + hash = rht_bucket_index(tbl, new_hash); + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { if (he != obj) { pprev = &he->next; continue; } - rhashtable_remove_pprev(ht, he, pprev); - return true; + ASSERT_BUCKET_LOCK(ht, tbl, hash); + + if (old_tbl->size > new_tbl->size && tbl == old_tbl && + !rht_is_a_nulls(obj->next) && + head_hashfn(ht, tbl, obj->next) != hash) { + rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); + } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) { + rht_for_each_continue(he2, obj->next, tbl, hash) { + if (head_hashfn(ht, tbl, he2) == hash) { + rcu_assign_pointer(*pprev, he2); + goto found; + } + } + + rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); + } else { + rcu_assign_pointer(*pprev, obj->next); + } + +found: + ret = true; + break; + } + + /* The entry may be linked in either 'tbl', 'future_tbl', or both. + * 'future_tbl' only exists for a short period of time during + * resizing. Thus traversing both is fine and the added cost is + * very rare. + */ + if (tbl != old_tbl) { + tbl = old_tbl; + goto restart; + } + + unlock_buckets(new_tbl, old_tbl, new_hash); + + if (ret) { + atomic_dec(&ht->nelems); + rhashtable_wakeup_worker(ht); } - return false; + rcu_read_unlock(); + + return ret; } EXPORT_SYMBOL_GPL(rhashtable_remove); +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +static bool rhashtable_compare(void *ptr, void *arg) +{ + struct rhashtable_compare_arg *x = arg; + struct rhashtable *ht = x->ht; + + return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len); +} + /** * rhashtable_lookup - lookup key in hash table * @ht: hash table @@ -439,65 +713,313 @@ EXPORT_SYMBOL_GPL(rhashtable_remove); * for a entry with an identical key. The first matching entry is returned. * * This lookup function may only be used for fixed key hash table (key_len - * paramter set). It will BUG() if used inappropriately. + * parameter set). It will BUG() if used inappropriately. * - * Lookups may occur in parallel with hash mutations as long as the lookup is - * guarded by rcu_read_lock(). The caller must take care of this. + * Lookups may occur in parallel with hashtable mutations and resizing. */ -void *rhashtable_lookup(const struct rhashtable *ht, const void *key) +void *rhashtable_lookup(struct rhashtable *ht, const void *key) { - const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); - struct rhash_head *he; - u32 h; + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; BUG_ON(!ht->p.key_len); - h = __hashfn(ht, key, ht->p.key_len, tbl->size); - rht_for_each_rcu(he, tbl->buckets[h], ht) { - if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, - ht->p.key_len)) - continue; - return (void *) he - ht->p.head_offset; - } - - return NULL; + return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg); } EXPORT_SYMBOL_GPL(rhashtable_lookup); /** * rhashtable_lookup_compare - search hash table with compare function * @ht: hash table - * @hash: hash value of desired entry + * @key: the pointer to the key * @compare: compare function, must return true on match * @arg: argument passed on to compare function * * Traverses the bucket chain behind the provided hash value and calls the * specified compare function for each entry. * - * Lookups may occur in parallel with hash mutations as long as the lookup is - * guarded by rcu_read_lock(). The caller must take care of this. + * Lookups may occur in parallel with hashtable mutations and resizing. * * Returns the first entry on which the compare function returned true. */ -void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, +void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg) { - const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + const struct bucket_table *tbl, *old_tbl; struct rhash_head *he; + u32 hash; - if (unlikely(hash >= tbl->size)) - return NULL; + rcu_read_lock(); - rht_for_each_rcu(he, tbl->buckets[hash], ht) { + old_tbl = rht_dereference_rcu(ht->tbl, ht); + tbl = rht_dereference_rcu(ht->future_tbl, ht); + hash = key_hashfn(ht, key, ht->p.key_len); +restart: + rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { if (!compare(rht_obj(ht, he), arg)) continue; - return (void *) he - ht->p.head_offset; + rcu_read_unlock(); + return rht_obj(ht, he); + } + + if (unlikely(tbl != old_tbl)) { + tbl = old_tbl; + goto restart; } + rcu_read_unlock(); return NULL; } EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); +/** + * rhashtable_lookup_insert - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * This lookup function may only be used for fixed key hash table (key_len + * parameter set). It will BUG() if used inappropriately. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = rht_obj(ht, obj) + ht->p.key_offset, + }; + + BUG_ON(!ht->p.key_len); + + return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare, + &arg); +} +EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); + +/** + * rhashtable_lookup_compare_insert - search and insert object to hash table + * with compare function + * @ht: hash table + * @obj: pointer to hash head inside object + * @compare: compare function, must return true on match + * @arg: argument passed on to compare function + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * Lookups may occur in parallel with hashtable mutations and resizing. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +bool rhashtable_lookup_compare_insert(struct rhashtable *ht, + struct rhash_head *obj, + bool (*compare)(void *, void *), + void *arg) +{ + struct bucket_table *new_tbl, *old_tbl; + u32 new_hash; + bool success = true; + + BUG_ON(!ht->p.key_len); + + rcu_read_lock(); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); + + lock_buckets(new_tbl, old_tbl, new_hash); + + if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, + compare, arg)) { + success = false; + goto exit; + } + + __rhashtable_insert(ht, obj, new_tbl, new_hash); + +exit: + unlock_buckets(new_tbl, old_tbl, new_hash); + rcu_read_unlock(); + + return success; +} +EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); + +/** + * rhashtable_walk_init - Initialise an iterator + * @ht: Table to walk over + * @iter: Hash table Iterator + * + * This function prepares a hash table walk. + * + * Note that if you restart a walk after rhashtable_walk_stop you + * may see the same object twice. Also, you may miss objects if + * there are removals in between rhashtable_walk_stop and the next + * call to rhashtable_walk_start. + * + * For a completely stable walk you should construct your own data + * structure outside the hash table. + * + * This function may sleep so you must not call it from interrupt + * context or with spin locks held. + * + * You must call rhashtable_walk_exit if this function returns + * successfully. + */ +int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) +{ + iter->ht = ht; + iter->p = NULL; + iter->slot = 0; + iter->skip = 0; + + iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); + if (!iter->walker) + return -ENOMEM; + + mutex_lock(&ht->mutex); + list_add(&iter->walker->list, &ht->walkers); + mutex_unlock(&ht->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_init); + +/** + * rhashtable_walk_exit - Free an iterator + * @iter: Hash table Iterator + * + * This function frees resources allocated by rhashtable_walk_init. + */ +void rhashtable_walk_exit(struct rhashtable_iter *iter) +{ + mutex_lock(&iter->ht->mutex); + list_del(&iter->walker->list); + mutex_unlock(&iter->ht->mutex); + kfree(iter->walker); +} +EXPORT_SYMBOL_GPL(rhashtable_walk_exit); + +/** + * rhashtable_walk_start - Start a hash table walk + * @iter: Hash table iterator + * + * Start a hash table walk. Note that we take the RCU lock in all + * cases including when we return an error. So you must always call + * rhashtable_walk_stop to clean up. + * + * Returns zero if successful. + * + * Returns -EAGAIN if resize event occured. Note that the iterator + * will rewind back to the beginning and you may use it immediately + * by calling rhashtable_walk_next. + */ +int rhashtable_walk_start(struct rhashtable_iter *iter) +{ + rcu_read_lock(); + + if (iter->walker->resize) { + iter->slot = 0; + iter->skip = 0; + iter->walker->resize = false; + return -EAGAIN; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_start); + +/** + * rhashtable_walk_next - Return the next object and advance the iterator + * @iter: Hash table iterator + * + * Note that you must call rhashtable_walk_stop when you are finished + * with the walk. + * + * Returns the next object or NULL when the end of the table is reached. + * + * Returns -EAGAIN if resize event occured. Note that the iterator + * will rewind back to the beginning and you may continue to use it. + */ +void *rhashtable_walk_next(struct rhashtable_iter *iter) +{ + const struct bucket_table *tbl; + struct rhashtable *ht = iter->ht; + struct rhash_head *p = iter->p; + void *obj = NULL; + + tbl = rht_dereference_rcu(ht->tbl, ht); + + if (p) { + p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); + goto next; + } + + for (; iter->slot < tbl->size; iter->slot++) { + int skip = iter->skip; + + rht_for_each_rcu(p, tbl, iter->slot) { + if (!skip) + break; + skip--; + } + +next: + if (!rht_is_a_nulls(p)) { + iter->skip++; + iter->p = p; + obj = rht_obj(ht, p); + goto out; + } + + iter->skip = 0; + } + + iter->p = NULL; + +out: + if (iter->walker->resize) { + iter->p = NULL; + iter->slot = 0; + iter->skip = 0; + iter->walker->resize = false; + return ERR_PTR(-EAGAIN); + } + + return obj; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_next); + +/** + * rhashtable_walk_stop - Finish a hash table walk + * @iter: Hash table iterator + * + * Finish a hash table walk. + */ +void rhashtable_walk_stop(struct rhashtable_iter *iter) +{ + rcu_read_unlock(); + iter->p = NULL; +} +EXPORT_SYMBOL_GPL(rhashtable_walk_stop); + static size_t rounded_hashtable_size(struct rhashtable_params *params) { return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), @@ -525,9 +1047,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, - * #ifdef CONFIG_PROVE_LOCKING - * .mutex_is_held = &my_mutex_is_held, - * #endif + * .nulls_base = (1U << RHT_BASE_SHIFT), * }; * * Configuration Example 2: Variable length keys @@ -547,9 +1067,6 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .head_offset = offsetof(struct test_obj, node), * .hashfn = jhash, * .obj_hashfn = my_hash_fn, - * #ifdef CONFIG_PROVE_LOCKING - * .mutex_is_held = &my_mutex_is_held, - * #endif * }; */ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) @@ -563,24 +1080,40 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) (!params->key_len && !params->obj_hashfn)) return -EINVAL; + if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) + return -EINVAL; + params->min_shift = max_t(size_t, params->min_shift, ilog2(HASH_MIN_SIZE)); if (params->nelem_hint) size = rounded_hashtable_size(params); - tbl = bucket_table_alloc(size); + memset(ht, 0, sizeof(*ht)); + mutex_init(&ht->mutex); + memcpy(&ht->p, params, sizeof(*params)); + INIT_LIST_HEAD(&ht->walkers); + + if (params->locks_mul) + ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); + else + ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; + + tbl = bucket_table_alloc(ht, size); if (tbl == NULL) return -ENOMEM; - memset(ht, 0, sizeof(*ht)); - ht->shift = ilog2(tbl->size); - memcpy(&ht->p, params, sizeof(*params)); + atomic_set(&ht->nelems, 0); + atomic_set(&ht->shift, ilog2(tbl->size)); RCU_INIT_POINTER(ht->tbl, tbl); + RCU_INIT_POINTER(ht->future_tbl, tbl); if (!ht->p.hash_rnd) get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); + if (ht->p.grow_decision || ht->p.shrink_decision) + INIT_WORK(&ht->run_work, rht_deferred_worker); + return 0; } EXPORT_SYMBOL_GPL(rhashtable_init); @@ -593,216 +1126,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init); * has to make sure that no resizing may happen by unpublishing the hashtable * and waiting for the quiescent cycle before releasing the bucket array. */ -void rhashtable_destroy(const struct rhashtable *ht) +void rhashtable_destroy(struct rhashtable *ht) { - bucket_table_free(ht->tbl); -} -EXPORT_SYMBOL_GPL(rhashtable_destroy); - -/************************************************************************** - * Self Test - **************************************************************************/ - -#ifdef CONFIG_TEST_RHASHTABLE + ht->being_destroyed = true; -#define TEST_HT_SIZE 8 -#define TEST_ENTRIES 2048 -#define TEST_PTR ((void *) 0xdeadbeef) -#define TEST_NEXPANDS 4 + if (ht->p.grow_decision || ht->p.shrink_decision) + cancel_work_sync(&ht->run_work); -#ifdef CONFIG_PROVE_LOCKING -static int test_mutex_is_held(void *parent) -{ - return 1; + mutex_lock(&ht->mutex); + bucket_table_free(rht_dereference(ht->tbl, ht)); + mutex_unlock(&ht->mutex); } -#endif - -struct test_obj { - void *ptr; - int value; - struct rhash_head node; -}; - -static int __init test_rht_lookup(struct rhashtable *ht) -{ - unsigned int i; - - for (i = 0; i < TEST_ENTRIES * 2; i++) { - struct test_obj *obj; - bool expected = !(i % 2); - u32 key = i; - - obj = rhashtable_lookup(ht, &key); - - if (expected && !obj) { - pr_warn("Test failed: Could not find key %u\n", key); - return -ENOENT; - } else if (!expected && obj) { - pr_warn("Test failed: Unexpected entry found for key %u\n", - key); - return -EEXIST; - } else if (expected && obj) { - if (obj->ptr != TEST_PTR || obj->value != i) { - pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", - obj->ptr, TEST_PTR, obj->value, i); - return -EINVAL; - } - } - } - - return 0; -} - -static void test_bucket_stats(struct rhashtable *ht, bool quiet) -{ - unsigned int cnt, rcu_cnt, i, total = 0; - struct test_obj *obj; - struct bucket_table *tbl; - - tbl = rht_dereference_rcu(ht->tbl, ht); - for (i = 0; i < tbl->size; i++) { - rcu_cnt = cnt = 0; - - if (!quiet) - pr_info(" [%#4x/%zu]", i, tbl->size); - - rht_for_each_entry_rcu(obj, tbl->buckets[i], node) { - cnt++; - total++; - if (!quiet) - pr_cont(" [%p],", obj); - } - - rht_for_each_entry_rcu(obj, tbl->buckets[i], node) - rcu_cnt++; - - if (rcu_cnt != cnt) - pr_warn("Test failed: Chain count mismach %d != %d", - cnt, rcu_cnt); - - if (!quiet) - pr_cont("\n [%#x] first element: %p, chain length: %u\n", - i, tbl->buckets[i], cnt); - } - - pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", - total, ht->nelems, TEST_ENTRIES); - - if (total != ht->nelems || total != TEST_ENTRIES) - pr_warn("Test failed: Total count mismatch ^^^"); -} - -static int __init test_rhashtable(struct rhashtable *ht) -{ - struct bucket_table *tbl; - struct test_obj *obj, *next; - int err; - unsigned int i; - - /* - * Insertion Test: - * Insert TEST_ENTRIES into table with all keys even numbers - */ - pr_info(" Adding %d keys\n", TEST_ENTRIES); - for (i = 0; i < TEST_ENTRIES; i++) { - struct test_obj *obj; - - obj = kzalloc(sizeof(*obj), GFP_KERNEL); - if (!obj) { - err = -ENOMEM; - goto error; - } - - obj->ptr = TEST_PTR; - obj->value = i * 2; - - rhashtable_insert(ht, &obj->node); - } - - rcu_read_lock(); - test_bucket_stats(ht, true); - test_rht_lookup(ht); - rcu_read_unlock(); - - for (i = 0; i < TEST_NEXPANDS; i++) { - pr_info(" Table expansion iteration %u...\n", i); - rhashtable_expand(ht); - - rcu_read_lock(); - pr_info(" Verifying lookups...\n"); - test_rht_lookup(ht); - rcu_read_unlock(); - } - - for (i = 0; i < TEST_NEXPANDS; i++) { - pr_info(" Table shrinkage iteration %u...\n", i); - rhashtable_shrink(ht); - - rcu_read_lock(); - pr_info(" Verifying lookups...\n"); - test_rht_lookup(ht); - rcu_read_unlock(); - } - - rcu_read_lock(); - test_bucket_stats(ht, true); - rcu_read_unlock(); - - pr_info(" Deleting %d keys\n", TEST_ENTRIES); - for (i = 0; i < TEST_ENTRIES; i++) { - u32 key = i * 2; - - obj = rhashtable_lookup(ht, &key); - BUG_ON(!obj); - - rhashtable_remove(ht, &obj->node); - kfree(obj); - } - - return 0; - -error: - tbl = rht_dereference_rcu(ht->tbl, ht); - for (i = 0; i < tbl->size; i++) - rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node) - kfree(obj); - - return err; -} - -static int __init test_rht_init(void) -{ - struct rhashtable ht; - struct rhashtable_params params = { - .nelem_hint = TEST_HT_SIZE, - .head_offset = offsetof(struct test_obj, node), - .key_offset = offsetof(struct test_obj, value), - .key_len = sizeof(int), - .hashfn = jhash, -#ifdef CONFIG_PROVE_LOCKING - .mutex_is_held = &test_mutex_is_held, -#endif - .grow_decision = rht_grow_above_75, - .shrink_decision = rht_shrink_below_30, - }; - int err; - - pr_info("Running resizable hashtable tests...\n"); - - err = rhashtable_init(&ht, ¶ms); - if (err < 0) { - pr_warn("Test failed: Unable to initialize hashtable: %d\n", - err); - return err; - } - - err = test_rhashtable(&ht); - - rhashtable_destroy(&ht); - - return err; -} - -subsys_initcall(test_rht_init); - -#endif /* CONFIG_TEST_RHASHTABLE */ +EXPORT_SYMBOL_GPL(rhashtable_destroy); diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c new file mode 100644 index 000000000000..1dfeba73fc74 --- /dev/null +++ b/lib/test_rhashtable.c @@ -0,0 +1,227 @@ +/* + * Resizable, Scalable, Concurrent Hash Table + * + * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> + * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> + * + * Based on the following paper: + * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf + * + * Code partially derived from nft_hash + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/************************************************************************** + * Self Test + **************************************************************************/ + +#include <linux/init.h> +#include <linux/jhash.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/rcupdate.h> +#include <linux/rhashtable.h> +#include <linux/slab.h> + + +#define TEST_HT_SIZE 8 +#define TEST_ENTRIES 2048 +#define TEST_PTR ((void *) 0xdeadbeef) +#define TEST_NEXPANDS 4 + +struct test_obj { + void *ptr; + int value; + struct rhash_head node; +}; + +static int __init test_rht_lookup(struct rhashtable *ht) +{ + unsigned int i; + + for (i = 0; i < TEST_ENTRIES * 2; i++) { + struct test_obj *obj; + bool expected = !(i % 2); + u32 key = i; + + obj = rhashtable_lookup(ht, &key); + + if (expected && !obj) { + pr_warn("Test failed: Could not find key %u\n", key); + return -ENOENT; + } else if (!expected && obj) { + pr_warn("Test failed: Unexpected entry found for key %u\n", + key); + return -EEXIST; + } else if (expected && obj) { + if (obj->ptr != TEST_PTR || obj->value != i) { + pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", + obj->ptr, TEST_PTR, obj->value, i); + return -EINVAL; + } + } + } + + return 0; +} + +static void test_bucket_stats(struct rhashtable *ht, bool quiet) +{ + unsigned int cnt, rcu_cnt, i, total = 0; + struct rhash_head *pos; + struct test_obj *obj; + struct bucket_table *tbl; + + tbl = rht_dereference_rcu(ht->tbl, ht); + for (i = 0; i < tbl->size; i++) { + rcu_cnt = cnt = 0; + + if (!quiet) + pr_info(" [%#4x/%zu]", i, tbl->size); + + rht_for_each_entry_rcu(obj, pos, tbl, i, node) { + cnt++; + total++; + if (!quiet) + pr_cont(" [%p],", obj); + } + + rht_for_each_entry_rcu(obj, pos, tbl, i, node) + rcu_cnt++; + + if (rcu_cnt != cnt) + pr_warn("Test failed: Chain count mismach %d != %d", + cnt, rcu_cnt); + + if (!quiet) + pr_cont("\n [%#x] first element: %p, chain length: %u\n", + i, tbl->buckets[i], cnt); + } + + pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n", + total, atomic_read(&ht->nelems), TEST_ENTRIES); + + if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES) + pr_warn("Test failed: Total count mismatch ^^^"); +} + +static int __init test_rhashtable(struct rhashtable *ht) +{ + struct bucket_table *tbl; + struct test_obj *obj; + struct rhash_head *pos, *next; + int err; + unsigned int i; + + /* + * Insertion Test: + * Insert TEST_ENTRIES into table with all keys even numbers + */ + pr_info(" Adding %d keys\n", TEST_ENTRIES); + for (i = 0; i < TEST_ENTRIES; i++) { + struct test_obj *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) { + err = -ENOMEM; + goto error; + } + + obj->ptr = TEST_PTR; + obj->value = i * 2; + + rhashtable_insert(ht, &obj->node); + } + + rcu_read_lock(); + test_bucket_stats(ht, true); + test_rht_lookup(ht); + rcu_read_unlock(); + + for (i = 0; i < TEST_NEXPANDS; i++) { + pr_info(" Table expansion iteration %u...\n", i); + mutex_lock(&ht->mutex); + rhashtable_expand(ht); + mutex_unlock(&ht->mutex); + + rcu_read_lock(); + pr_info(" Verifying lookups...\n"); + test_rht_lookup(ht); + rcu_read_unlock(); + } + + for (i = 0; i < TEST_NEXPANDS; i++) { + pr_info(" Table shrinkage iteration %u...\n", i); + mutex_lock(&ht->mutex); + rhashtable_shrink(ht); + mutex_unlock(&ht->mutex); + + rcu_read_lock(); + pr_info(" Verifying lookups...\n"); + test_rht_lookup(ht); + rcu_read_unlock(); + } + + rcu_read_lock(); + test_bucket_stats(ht, true); + rcu_read_unlock(); + + pr_info(" Deleting %d keys\n", TEST_ENTRIES); + for (i = 0; i < TEST_ENTRIES; i++) { + u32 key = i * 2; + + obj = rhashtable_lookup(ht, &key); + BUG_ON(!obj); + + rhashtable_remove(ht, &obj->node); + kfree(obj); + } + + return 0; + +error: + tbl = rht_dereference_rcu(ht->tbl, ht); + for (i = 0; i < tbl->size; i++) + rht_for_each_entry_safe(obj, pos, next, tbl, i, node) + kfree(obj); + + return err; +} + +static int __init test_rht_init(void) +{ + struct rhashtable ht; + struct rhashtable_params params = { + .nelem_hint = TEST_HT_SIZE, + .head_offset = offsetof(struct test_obj, node), + .key_offset = offsetof(struct test_obj, value), + .key_len = sizeof(int), + .hashfn = jhash, + .nulls_base = (3U << RHT_BASE_SHIFT), + .grow_decision = rht_grow_above_75, + .shrink_decision = rht_shrink_below_30, + }; + int err; + + pr_info("Running resizable hashtable tests...\n"); + + err = rhashtable_init(&ht, ¶ms); + if (err < 0) { + pr_warn("Test failed: Unable to initialize hashtable: %d\n", + err); + return err; + } + + err = test_rhashtable(&ht); + + rhashtable_destroy(&ht); + + return err; +} + +module_init(test_rht_init); + +MODULE_LICENSE("GPL v2"); |