diff options
author | Thomas Graf <tgraf@suug.ch> | 2015-03-24 14:18:17 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-24 17:48:39 -0400 |
commit | 299e5c32a37a6bca8175db177117467bd1ce970a (patch) | |
tree | e625a03790b29449ad4992db6a3250305ff5831f /lib/rhashtable.c | |
parent | 58be8a583d8d316448bafa5926414cfb83c02dec (diff) |
rhashtable: Use 'unsigned int' consistently
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 8514f7c5f029..50abe4fec4b8 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -153,7 +153,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, return new_tbl; } -static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) +static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl = rhashtable_last_table(ht, @@ -162,7 +162,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) int err = -ENOENT; struct rhash_head *head, *next, *entry; spinlock_t *new_bucket_lock; - unsigned new_hash; + unsigned int new_hash; rht_for_each(entry, old_tbl, old_hash) { err = 0; @@ -199,7 +199,8 @@ out: return err; } -static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash) +static void rhashtable_rehash_chain(struct rhashtable *ht, + unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); spinlock_t *old_bucket_lock; @@ -244,7 +245,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht) struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl; struct rhashtable_walker *walker; - unsigned old_hash; + unsigned int old_hash; new_tbl = rht_dereference(old_tbl->future_tbl, ht); if (!new_tbl) @@ -324,11 +325,12 @@ static int rhashtable_expand(struct rhashtable *ht) static int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); - unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); + unsigned int size; int err; ASSERT_RHT_MUTEX(ht); + size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); if (size < ht->p.min_size) size = ht->p.min_size; @@ -379,9 +381,9 @@ unlock: static bool rhashtable_check_elasticity(struct rhashtable *ht, struct bucket_table *tbl, - unsigned hash) + unsigned int hash) { - unsigned elasticity = ht->elasticity; + unsigned int elasticity = ht->elasticity; struct rhash_head *head; rht_for_each(head, tbl, hash) @@ -431,7 +433,7 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct bucket_table *tbl) { struct rhash_head *head; - unsigned hash; + unsigned int hash; int err; tbl = rhashtable_last_table(ht, tbl); |