diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2017-02-06 16:59:32 +1100 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2017-02-06 16:59:32 +1100 |
commit | 4acbcfd19d6d1355caba91dbb89293752456fc6a (patch) | |
tree | da4083b30164553acf2e0733695ff3f55eed6308 | |
parent | a57ede0b5d5655e7d254b5d6cdf63ef1a0eb9eae (diff) | |
parent | 39272ecab7247e592020c1fff9d0f0881f8eef05 (diff) |
Merge remote-tracking branch 'idr/idr-4.11'
54 files changed, 1516 insertions, 1947 deletions
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index cb28579e8a94..d879f3bca107 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -1980,13 +1980,12 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) card->lbfqc = ns_stat_lfbqc_get(stat); id = le32_to_cpu(rsqe->buffer_handle); - skb = idr_find(&card->idr, id); + skb = idr_remove(&card->idr, id); if (!skb) { RXPRINTK(KERN_ERR - "nicstar%d: idr_find() failed!\n", card->index); + "nicstar%d: skb not found!\n", card->index); return; } - idr_remove(&card->idr, id); dma_sync_single_for_cpu(&card->pcidev->dev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 2f1772d5ee51..37000c6bb7f4 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2912,11 +2912,9 @@ out_idr_remove_vol: idr_remove(&connection->peer_devices, vnr); out_idr_remove_from_resource: for_each_connection(connection, resource) { - peer_device = idr_find(&connection->peer_devices, vnr); - if (peer_device) { - idr_remove(&connection->peer_devices, vnr); + peer_device = idr_remove(&connection->peer_devices, vnr); + if (peer_device) kref_put(&connection->kref, drbd_destroy_connection); - } } for_each_peer_device_safe(peer_device, tmp_peer_device, device) { list_del(&peer_device->peer_devices); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index aee149bdf4c0..a301fcf46e88 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1307,8 +1307,7 @@ static void iso_resource_work(struct work_struct *work) */ if (r->todo == ISO_RES_REALLOC && !success && !client->in_shutdown && - idr_find(&client->resource_idr, r->resource.handle)) { - idr_remove(&client->resource_idr, r->resource.handle); + idr_remove(&client->resource_idr, r->resource.handle)) { client_put(client); free = true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index c02db01f6583..0218cea6be4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -70,10 +70,10 @@ static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) struct amdgpu_bo_list *list; mutex_lock(&fpriv->bo_list_lock); - list = idr_find(&fpriv->bo_list_handles, id); + list = idr_remove(&fpriv->bo_list_handles, id); if (list) { + /* Another user may have a reference to this list still */ mutex_lock(&list->lock); - idr_remove(&fpriv->bo_list_handles, id); mutex_unlock(&list->lock); amdgpu_bo_list_free(list); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 400c66ba4c6b..cf0500671353 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -135,15 +135,11 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) struct amdgpu_ctx *ctx; mutex_lock(&mgr->lock); - ctx = idr_find(&mgr->ctx_handles, id); - if (ctx) { - idr_remove(&mgr->ctx_handles, id); + ctx = idr_remove(&mgr->ctx_handles, id); + if (ctx) kref_put(&ctx->refcount, amdgpu_ctx_do_release); - mutex_unlock(&mgr->lock); - return 0; - } mutex_unlock(&mgr->lock); - return -EINVAL; + return ctx ? 0 : -EINVAL; } static int amdgpu_ctx_query(struct amdgpu_device *adev, diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c index abdd0cf710bf..fac28bd8fbee 100644 --- a/drivers/net/wireless/marvell/mwifiex/txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/txrx.c @@ -346,9 +346,7 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, return; spin_lock_irqsave(&priv->ack_status_lock, flags); - ack_skb = idr_find(&priv->ack_status_frames, tx_status->tx_token_id); - if (ack_skb) - idr_remove(&priv->ack_status_frames, tx_status->tx_token_id); + ack_skb = idr_remove(&priv->ack_status_frames, tx_status->tx_token_id); spin_unlock_irqrestore(&priv->ack_status_lock, flags); if (ack_skb) { diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 8041710b6972..18f0ec2e1f9c 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -642,9 +642,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); spin_lock(&udev->commands_lock); - cmd = idr_find(&udev->commands, entry->hdr.cmd_id); - if (cmd) - idr_remove(&udev->commands, cmd->cmd_id); + cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); spin_unlock(&udev->commands_lock); if (!cmd) { diff --git a/include/linux/idr.h b/include/linux/idr.h index 3c01b89aed67..bf70b3ef0a07 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -12,47 +12,29 @@ #ifndef __IDR_H__ #define __IDR_H__ -#include <linux/types.h> -#include <linux/bitops.h> -#include <linux/init.h> -#include <linux/rcupdate.h> +#include <linux/radix-tree.h> +#include <linux/gfp.h> +#include <linux/percpu.h> + +struct idr { + struct radix_tree_root idr_rt; + unsigned int idr_next; +}; /* - * Using 6 bits at each layer allows us to allocate 7 layers out of each page. - * 8 bits only gave us 3 layers out of every pair of pages, which is less - * efficient except for trees with a largest element between 192-255 inclusive. + * The IDR API does not expose the tagging functionality of the radix tree + * to users. Use tag 0 to track whether a node has free space below it. */ -#define IDR_BITS 6 -#define IDR_SIZE (1 << IDR_BITS) -#define IDR_MASK ((1 << IDR_BITS)-1) - -struct idr_layer { - int prefix; /* the ID prefix of this idr_layer */ - int layer; /* distance from leaf */ - struct idr_layer __rcu *ary[1<<IDR_BITS]; - int count; /* When zero, we can release it */ - union { - /* A zero bit means "space here" */ - DECLARE_BITMAP(bitmap, IDR_SIZE); - struct rcu_head rcu_head; - }; -}; +#define IDR_FREE 0 -struct idr { - struct idr_layer __rcu *hint; /* the last layer allocated from */ - struct idr_layer __rcu *top; - int layers; /* only valid w/o concurrent changes */ - int cur; /* current pos for cyclic allocation */ - spinlock_t lock; - int id_free_cnt; - struct idr_layer *id_free; -}; +/* Set the IDR flag and the IDR_FREE tag */ +#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) -#define IDR_INIT(name) \ +#define IDR_INIT \ { \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ } -#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) +#define DEFINE_IDR(name) struct idr name = IDR_INIT /** * idr_get_cursor - Return the current position of the cyclic allocator @@ -62,9 +44,9 @@ struct idr { * idr_alloc_cyclic() if it is free (otherwise the search will start from * this position). */ -static inline unsigned int idr_get_cursor(struct idr *idr) +static inline unsigned int idr_get_cursor(const struct idr *idr) { - return READ_ONCE(idr->cur); + return READ_ONCE(idr->idr_next); } /** @@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr) */ static inline void idr_set_cursor(struct idr *idr, unsigned int val) { - WRITE_ONCE(idr->cur, val); + WRITE_ONCE(idr->idr_next, val); } /** @@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val) * period). */ -/* - * This is what we export. - */ - -void *idr_find_slowpath(struct idr *idp, int id); void idr_preload(gfp_t gfp_mask); -int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_for_each(struct idr *idp, +int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t); +int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t); +int idr_for_each(const struct idr *, int (*fn)(int id, void *p, void *data), void *data); -void *idr_get_next(struct idr *idp, int *nextid); -void *idr_replace(struct idr *idp, void *ptr, int id); -void idr_remove(struct idr *idp, int id); -void idr_destroy(struct idr *idp); -void idr_init(struct idr *idp); -bool idr_is_empty(struct idr *idp); +void *idr_get_next(struct idr *, int *nextid); +void *idr_replace(struct idr *, void *, int id); +void idr_destroy(struct idr *); + +static inline void *idr_remove(struct idr *idr, int id) +{ + return radix_tree_delete_item(&idr->idr_rt, id, NULL); +} + +static inline void idr_init(struct idr *idr) +{ + INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); + idr->idr_next = 0; +} + +static inline bool idr_is_empty(const struct idr *idr) +{ + return radix_tree_empty(&idr->idr_rt) && + radix_tree_tagged(&idr->idr_rt, IDR_FREE); +} /** * idr_preload_end - end preload section started with idr_preload() @@ -137,19 +128,14 @@ static inline void idr_preload_end(void) * This function can be called under rcu_read_lock(), given that the leaf * pointers lifetimes are correctly managed. */ -static inline void *idr_find(struct idr *idr, int id) +static inline void *idr_find(const struct idr *idr, int id) { - struct idr_layer *hint = rcu_dereference_raw(idr->hint); - - if (hint && (id & ~IDR_MASK) == hint->prefix) - return rcu_dereference_raw(hint->ary[id & IDR_MASK]); - - return idr_find_slowpath(idr, id); + return radix_tree_lookup(&idr->idr_rt, id); } /** * idr_for_each_entry - iterate over an idr's elements of a given type - * @idp: idr handle + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * @@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id) * after normal terminatinon @entry is left with the value NULL. This * is convenient for a "not found" value. */ -#define idr_for_each_entry(idp, entry, id) \ - for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) +#define idr_for_each_entry(idr, entry, id) \ + for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) /** - * idr_for_each_entry - continue iteration over an idr's elements of a given type - * @idp: idr handle + * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type + * @idr: idr handle * @entry: the type * to use as cursor * @id: id entry's key * * Continue to iterate over list of given type, continuing after * the current position. */ -#define idr_for_each_entry_continue(idp, entry, id) \ - for ((entry) = idr_get_next((idp), &(id)); \ +#define idr_for_each_entry_continue(idr, entry, id) \ + for ((entry) = idr_get_next((idr), &(id)); \ entry; \ - ++id, (entry) = idr_get_next((idp), &(id))) + ++id, (entry) = idr_get_next((idr), &(id))) /* * IDA - IDR based id allocator, use when translation from id to * pointer isn't necessary. - * - * IDA_BITMAP_LONGS is calculated to be one less to accommodate - * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ -#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) +#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) struct ida_bitmap { - long nr_busy; unsigned long bitmap[IDA_BITMAP_LONGS]; }; +DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); + struct ida { - struct idr idr; - struct ida_bitmap *free_bitmap; + struct radix_tree_root ida_rt; }; -#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } -#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) +#define IDA_INIT { \ + .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ +} +#define DEFINE_IDA(name) struct ida name = IDA_INIT int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); void ida_remove(struct ida *ida, int id); void ida_destroy(struct ida *ida); -void ida_init(struct ida *ida); int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask); void ida_simple_remove(struct ida *ida, unsigned int id); +static inline void ida_init(struct ida *ida) +{ + INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); +} + /** * ida_get_new - allocate new ID * @ida: idr handle @@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id) return ida_get_new_above(ida, 0, p_id); } -static inline bool ida_is_empty(struct ida *ida) +static inline bool ida_is_empty(const struct ida *ida) { - return idr_is_empty(&ida->idr); + return radix_tree_empty(&ida->ida_rt); } - -void __init idr_init_cache(void); - #endif /* __IDR_H__ */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 52bda854593b..e505efaab31d 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -22,11 +22,13 @@ #define _LINUX_RADIX_TREE_H #include <linux/bitops.h> -#include <linux/preempt.h> -#include <linux/types.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/list.h> +#include <linux/preempt.h> #include <linux/rcupdate.h> +#include <linux/spinlock.h> +#include <linux/types.h> /* * The bottom two bits of the slot determine how the remaining bits in the @@ -94,7 +96,7 @@ struct radix_tree_node { unsigned char count; /* Total entry count */ unsigned char exceptional; /* Exceptional entry count */ struct radix_tree_node *parent; /* Used when ascending tree */ - void *private_data; /* For tree user */ + struct radix_tree_root *root; /* The tree we belong to */ union { struct list_head private_list; /* For tree user */ struct rcu_head rcu_head; /* Used when freeing node */ @@ -103,7 +105,10 @@ struct radix_tree_node { unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; -/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ +/* The top bits of gfp_mask are used to store the root tags and the IDR flag */ +#define ROOT_IS_IDR (1 << __GFP_BITS_SHIFT) +#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) + struct radix_tree_root { gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; @@ -123,7 +128,7 @@ do { \ (root)->rnode = NULL; \ } while (0) -static inline bool radix_tree_empty(struct radix_tree_root *root) +static inline bool radix_tree_empty(const struct radix_tree_root *root) { return root->rnode == NULL; } @@ -292,10 +297,10 @@ static inline int radix_tree_insert(struct radix_tree_root *root, { return __radix_tree_insert(root, index, 0, entry); } -void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, +void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, struct radix_tree_node **nodep, void ***slotp); -void *radix_tree_lookup(struct radix_tree_root *, unsigned long); -void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); +void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); +void **radix_tree_lookup_slot(const struct radix_tree_root *, unsigned long); typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); void __radix_tree_replace(struct radix_tree_root *root, struct radix_tree_node *node, @@ -309,15 +314,17 @@ void __radix_tree_delete_node(struct radix_tree_root *root, struct radix_tree_node *node, radix_tree_update_node_t update_node, void *private); +void radix_tree_iter_delete(struct radix_tree_root *, + struct radix_tree_iter *iter, void **slot); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); void radix_tree_clear_tags(struct radix_tree_root *root, struct radix_tree_node *node, void **slot); -unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, +unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items); -unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, +unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, void ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); @@ -328,19 +335,21 @@ void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); void *radix_tree_tag_clear(struct radix_tree_root *root, unsigned long index, unsigned int tag); -int radix_tree_tag_get(struct radix_tree_root *root, +int radix_tree_tag_get(const struct radix_tree_root *, unsigned long index, unsigned int tag); -void radix_tree_iter_tag_set(struct radix_tree_root *root, +void radix_tree_iter_tag_set(struct radix_tree_root *, + const struct radix_tree_iter *iter, unsigned int tag); +void radix_tree_iter_tag_clear(struct radix_tree_root *, const struct radix_tree_iter *iter, unsigned int tag); unsigned int -radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, +radix_tree_gang_lookup_tag(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag); unsigned int -radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, +radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, void ***results, unsigned long first_index, unsigned int max_items, unsigned int tag); -int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); +int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag); static inline void radix_tree_preload_end(void) { @@ -352,10 +361,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index, unsigned new_order); int radix_tree_join(struct radix_tree_root *, unsigned long index, unsigned new_order, void *); +void **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *, + gfp_t, int end); -#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ -#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ -#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ +enum { + RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ + RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ + RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ +}; /** * radix_tree_iter_init - initialize radix tree iterator @@ -393,10 +406,44 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) * Also it fills @iter with data about chunk: position in the tree (index), * its end (next_index), and constructs a bit mask for tagged iterating (tags). */ -void **radix_tree_next_chunk(struct radix_tree_root *root, +void **radix_tree_next_chunk(const struct radix_tree_root *, struct radix_tree_iter *iter, unsigned flags); /** + * radix_tree_iter_lookup - look up an index in the radix tree + * @root: radix tree root + * @iter: iterator state + * @index: key to look up + * + * If @index is present in the radix tree, this function returns the slot + * containing it and updates @iter to describe the entry. If @index is not + * present, it returns NULL. + */ +static inline void **radix_tree_iter_lookup(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); +} + +/** + * radix_tree_iter_find - find a present entry + * @root: radix tree root + * @iter: iterator state + * @index: start location + * + * This function returns the slot containing the entry with the lowest index + * which is at least @index. If @index is larger than any present entry, this + * function returns NULL. The @iter is updated to describe the entry found. + */ +static inline void **radix_tree_iter_find(const struct radix_tree_root *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, 0); +} + +/** * radix_tree_iter_retry - retry this chunk of the iteration * @iter: iterator state * diff --git a/init/main.c b/init/main.c index 5e2ac98d5e40..988a421c0c14 100644 --- a/init/main.c +++ b/init/main.c @@ -554,7 +554,7 @@ asmlinkage __visible void __init start_kernel(void) if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n")) local_irq_disable(); - idr_init_cache(); + radix_tree_init(); /* * Allow workqueue creation and work item queueing/cancelling @@ -569,7 +569,6 @@ asmlinkage __visible void __init start_kernel(void) trace_init(); context_tracking_init(); - radix_tree_init(); /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); diff --git a/lib/idr.c b/lib/idr.c index 52d2979a05e8..21c48bb116a5 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -1,1068 +1,407 @@ -/* - * 2002-10-18 written by Jim Houston jim.houston@ccur.com - * Copyright (C) 2002 by Concurrent Computer Corporation - * Distributed under the GNU GPL license version 2. - * - * Modified by George Anzinger to reuse immediately and to use - * find bit instructions. Also removed _irq on spinlocks. - * - * Modified by Nadia Derbey to make it RCU safe. - * - * Small id to pointer translation service. - * - * It uses a radix tree like structure as a sparse array indexed - * by the id to obtain the pointer. The bitmap makes allocating - * a new id quick. - * - * You call it to allocate an id (an int) an associate with that id a - * pointer or what ever, we treat it as a (void *). You can pass this - * id to a user for him to pass back at a later time. You then pass - * that id to this code and it returns your pointer. - */ - -#ifndef TEST // to test in user space... -#include <linux/slab.h> -#include <linux/init.h> +#include <linux/bitmap.h> #include <linux/export.h> -#endif -#include <linux/err.h> -#include <linux/string.h> #include <linux/idr.h> +#include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/percpu.h> - -#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) -#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) - -/* Leave the possibility of an incomplete final layer */ -#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) -/* Number of id_layer structs to leave in free list */ -#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) - -static struct kmem_cache *idr_layer_cache; -static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); -static DEFINE_PER_CPU(int, idr_preload_cnt); +DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); static DEFINE_SPINLOCK(simple_ida_lock); -/* the maximum ID which can be allocated given idr->layers */ -static int idr_max(int layers) -{ - int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); - - return (1 << bits) - 1; -} - -/* - * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is - * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and - * so on. - */ -static int idr_layer_prefix_mask(int layer) -{ - return ~idr_max(layer + 1); -} - -static struct idr_layer *get_from_free_list(struct idr *idp) -{ - struct idr_layer *p; - unsigned long flags; - - spin_lock_irqsave(&idp->lock, flags); - if ((p = idp->id_free)) { - idp->id_free = p->ary[0]; - idp->id_free_cnt--; - p->ary[0] = NULL; - } - spin_unlock_irqrestore(&idp->lock, flags); - return(p); -} - -/** - * idr_layer_alloc - allocate a new idr_layer - * @gfp_mask: allocation mask - * @layer_idr: optional idr to allocate from - * - * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch - * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch - * an idr_layer from @idr->id_free. - * - * @layer_idr is to maintain backward compatibility with the old alloc - * interface - idr_pre_get() and idr_get_new*() - and will be removed - * together with per-pool preload buffer. - */ -static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) -{ - struct idr_layer *new; - - /* this is the old path, bypass to get_from_free_list() */ - if (layer_idr) - return get_from_free_list(layer_idr); - - /* - * Try to allocate directly from kmem_cache. We want to try this - * before preload buffer; otherwise, non-preloading idr_alloc() - * users will end up taking advantage of preloading ones. As the - * following is allowed to fail for preloaded cases, suppress - * warning this time. - */ - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); - if (new) - return new; - - /* - * Try to fetch one from the per-cpu preload buffer if in process - * context. See idr_preload() for details. - */ - if (!in_interrupt()) { - preempt_disable(); - new = __this_cpu_read(idr_preload_head); - if (new) { - __this_cpu_write(idr_preload_head, new->ary[0]); - __this_cpu_dec(idr_preload_cnt); - new->ary[0] = NULL; - } - preempt_enable(); - if (new) - return new; - } - - /* - * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so - * that memory allocation failure warning is printed as intended. - */ - return kmem_cache_zalloc(idr_layer_cache, gfp_mask); -} - -static void idr_layer_rcu_free(struct rcu_head *head) -{ - struct idr_layer *layer; - - layer = container_of(head, struct idr_layer, rcu_head); - kmem_cache_free(idr_layer_cache, layer); -} - -static inline void free_layer(struct idr *idr, struct idr_layer *p) -{ - if (idr->hint == p) - RCU_INIT_POINTER(idr->hint, NULL); - call_rcu(&p->rcu_head, idr_layer_rcu_free); -} - -/* only called when idp->lock is held */ -static void __move_to_free_list(struct idr *idp, struct idr_layer *p) -{ - p->ary[0] = idp->id_free; - idp->id_free = p; - idp->id_free_cnt++; -} - -static void move_to_free_list(struct idr *idp, struct idr_layer *p) -{ - unsigned long flags; - - /* - * Depends on the return element being zeroed. - */ - spin_lock_irqsave(&idp->lock, flags); - __move_to_free_list(idp, p); - spin_unlock_irqrestore(&idp->lock, flags); -} - -static void idr_mark_full(struct idr_layer **pa, int id) -{ - struct idr_layer *p = pa[0]; - int l = 0; - - __set_bit(id & IDR_MASK, p->bitmap); - /* - * If this layer is full mark the bit in the layer above to - * show that this part of the radix tree is full. This may - * complete the layer above and require walking up the radix - * tree. - */ - while (bitmap_full(p->bitmap, IDR_SIZE)) { - if (!(p = pa[++l])) - break; - id = id >> IDR_BITS; - __set_bit((id & IDR_MASK), p->bitmap); - } -} - -static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) -{ - while (idp->id_free_cnt < MAX_IDR_FREE) { - struct idr_layer *new; - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - if (new == NULL) - return (0); - move_to_free_list(idp, new); - } - return 1; -} - -/** - * sub_alloc - try to allocate an id without growing the tree depth - * @idp: idr handle - * @starting_id: id to start search at - * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer - * @gfp_mask: allocation mask for idr_layer_alloc() - * @layer_idr: optional idr passed to idr_layer_alloc() - * - * Allocate an id in range [@starting_id, INT_MAX] from @idp without - * growing its depth. Returns - * - * the allocated id >= 0 if successful, - * -EAGAIN if the tree needs to grow for allocation to succeed, - * -ENOSPC if the id space is exhausted, - * -ENOMEM if more idr_layers need to be allocated. - */ -static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, - gfp_t gfp_mask, struct idr *layer_idr) -{ - int n, m, sh; - struct idr_layer *p, *new; - int l, id, oid; - - id = *starting_id; - restart: - p = idp->top; - l = idp->layers; - pa[l--] = NULL; - while (1) { - /* - * We run around this while until we reach the leaf node... - */ - n = (id >> (IDR_BITS*l)) & IDR_MASK; - m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); - if (m == IDR_SIZE) { - /* no space available go back to previous layer. */ - l++; - oid = id; - id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; - - /* if already at the top layer, we need to grow */ - if (id > idr_max(idp->layers)) { - *starting_id = id; - return -EAGAIN; - } - p = pa[l]; - BUG_ON(!p); - - /* If we need to go up one layer, continue the - * loop; otherwise, restart from the top. - */ - sh = IDR_BITS * (l + 1); - if (oid >> sh == id >> sh) - continue; - else - goto restart; - } - if (m != n) { - sh = IDR_BITS*l; - id = ((id >> sh) ^ n ^ m) << sh; - } - if ((id >= MAX_IDR_BIT) || (id < 0)) - return -ENOSPC; - if (l == 0) - break; - /* - * Create the layer below if it is missing. - */ - if (!p->ary[m]) { - new = idr_layer_alloc(gfp_mask, layer_idr); - if (!new) - return -ENOMEM; - new->layer = l-1; - new->prefix = id & idr_layer_prefix_mask(new->layer); - rcu_assign_pointer(p->ary[m], new); - p->count++; - } - pa[l--] = p; - p = p->ary[m]; - } - - pa[l] = p; - return id; -} - -static int idr_get_empty_slot(struct idr *idp, int starting_id, - struct idr_layer **pa, gfp_t gfp_mask, - struct idr *layer_idr) -{ - struct idr_layer *p, *new; - int layers, v, id; - unsigned long flags; - - id = starting_id; -build_up: - p = idp->top; - layers = idp->layers; - if (unlikely(!p)) { - if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) - return -ENOMEM; - p->layer = 0; - layers = 1; - } - /* - * Add a new layer to the top of the tree if the requested - * id is larger than the currently allocated space. - */ - while (id > idr_max(layers)) { - layers++; - if (!p->count) { - /* special case: if the tree is currently empty, - * then we grow the tree by moving the top node - * upwards. - */ - p->layer++; - WARN_ON_ONCE(p->prefix); - continue; - } - if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { - /* - * The allocation failed. If we built part of - * the structure tear it down. - */ - spin_lock_irqsave(&idp->lock, flags); - for (new = p; p && p != idp->top; new = p) { - p = p->ary[0]; - new->ary[0] = NULL; - new->count = 0; - bitmap_clear(new->bitmap, 0, IDR_SIZE); - __move_to_free_list(idp, new); - } - spin_unlock_irqrestore(&idp->lock, flags); - return -ENOMEM; - } - new->ary[0] = p; - new->count = 1; - new->layer = layers-1; - new->prefix = id & idr_layer_prefix_mask(new->layer); - if (bitmap_full(p->bitmap, IDR_SIZE)) - __set_bit(0, new->bitmap); - p = new; - } - rcu_assign_pointer(idp->top, p); - idp->layers = layers; - v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); - if (v == -EAGAIN) - goto build_up; - return(v); -} - -/* - * @id and @pa are from a successful allocation from idr_get_empty_slot(). - * Install the user pointer @ptr and mark the slot full. - */ -static void idr_fill_slot(struct idr *idr, void *ptr, int id, - struct idr_layer **pa) -{ - /* update hint used for lookup, cleared from free_layer() */ - rcu_assign_pointer(idr->hint, pa[0]); - - rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); - pa[0]->count++; - idr_mark_full(pa, id); -} - - /** - * idr_preload - preload for idr_alloc() - * @gfp_mask: allocation mask to use for preloading - * - * Preload per-cpu layer buffer for idr_alloc(). Can only be used from - * process context and each idr_preload() invocation should be matched with - * idr_preload_end(). Note that preemption is disabled while preloaded. - * - * The first idr_alloc() in the preloaded section can be treated as if it - * were invoked with @gfp_mask used for preloading. This allows using more - * permissive allocation masks for idrs protected by spinlocks. - * - * For example, if idr_alloc() below fails, the failure can be treated as - * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. - * - * idr_preload(GFP_KERNEL); - * spin_lock(lock); - * - * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); - * - * spin_unlock(lock); - * idr_preload_end(); - * if (id < 0) - * error; - */ -void idr_preload(gfp_t gfp_mask) -{ - /* - * Consuming preload buffer from non-process context breaks preload - * allocation guarantee. Disallow usage from those contexts. - */ - WARN_ON_ONCE(in_interrupt()); - might_sleep_if(gfpflags_allow_blocking(gfp_mask)); - - preempt_disable(); - - /* - * idr_alloc() is likely to succeed w/o full idr_layer buffer and - * return value from idr_alloc() needs to be checked for failure - * anyway. Silently give up if allocation fails. The caller can - * treat failures from idr_alloc() as if idr_alloc() were called - * with @gfp_mask which should be enough. - */ - while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { - struct idr_layer *new; - - preempt_enable(); - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - preempt_disable(); - if (!new) - break; - - /* link the new one to per-cpu preload list */ - new->ary[0] = __this_cpu_read(idr_preload_head); - __this_cpu_write(idr_preload_head, new); - __this_cpu_inc(idr_preload_cnt); - } -} -EXPORT_SYMBOL(idr_preload); - -/** - * idr_alloc - allocate new idr entry - * @idr: the (initialized) idr + * idr_alloc - allocate an id + * @idr: idr handle * @ptr: pointer to be associated with the new id * @start: the minimum id (inclusive) - * @end: the maximum id (exclusive, <= 0 for max) - * @gfp_mask: memory allocation flags + * @end: the maximum id (exclusive) + * @gfp: memory allocation flags * - * Allocate an id in [start, end) and associate it with @ptr. If no ID is - * available in the specified range, returns -ENOSPC. On memory allocation - * failure, returns -ENOMEM. + * Allocates an unused ID in the range [start, end). Returns -ENOSPC + * if there are no unused IDs in that range. * * Note that @end is treated as max when <= 0. This is to always allow * using @start + N as @end as long as N is inside integer range. * - * The user is responsible for exclusively synchronizing all operations - * which may modify @idr. However, read-only accesses such as idr_find() - * or iteration can be performed under RCU read lock provided the user - * destroys @ptr in RCU-safe way after removal from idr. + * Simultaneous modifications to the @idr are not allowed and should be + * prevented by the user, usually with a lock. idr_alloc() may be called + * concurrently with read-only accesses to the @idr, such as idr_find() and + * idr_for_each_entry(). */ -int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) +int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) { - int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - int id; - - might_sleep_if(gfpflags_allow_blocking(gfp_mask)); + void **slot; + struct radix_tree_iter iter; - /* sanity checks */ if (WARN_ON_ONCE(start < 0)) return -EINVAL; - if (unlikely(max < start)) - return -ENOSPC; + BUG_ON(radix_tree_is_internal_node(ptr)); - /* allocate id */ - id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); - if (unlikely(id < 0)) - return id; - if (unlikely(id > max)) - return -ENOSPC; + radix_tree_iter_init(&iter, start); + slot = idr_get_free(&idr->idr_rt, &iter, gfp, end); + if (IS_ERR(slot)) + return PTR_ERR(slot); - idr_fill_slot(idr, ptr, id, pa); - return id; + radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); + radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); + return iter.index; } EXPORT_SYMBOL_GPL(idr_alloc); /** * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion - * @idr: the (initialized) idr + * @idr: idr handle * @ptr: pointer to be associated with the new id * @start: the minimum id (inclusive) - * @end: the maximum id (exclusive, <= 0 for max) - * @gfp_mask: memory allocation flags + * @end: the maximum id (exclusive) + * @gfp: memory allocation flags * - * Essentially the same as idr_alloc, but prefers to allocate progressively - * higher ids if it can. If the "cur" counter wraps, then it will start again - * at the "start" end of the range and allocate one that has already been used. + * Allocates an ID larger than the last ID allocated if one is available. + * If not, it will attempt to allocate the smallest ID that is larger or + * equal to @start. */ -int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, - gfp_t gfp_mask) +int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) { - int id; + int id, curr = idr->idr_next; - id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask); - if (id == -ENOSPC) - id = idr_alloc(idr, ptr, start, end, gfp_mask); + if (curr < start) + curr = start; - if (likely(id >= 0)) - idr->cur = id + 1; - return id; -} -EXPORT_SYMBOL(idr_alloc_cyclic); + id = idr_alloc(idr, ptr, curr, end, gfp); + if ((id == -ENOSPC) && (curr > start)) + id = idr_alloc(idr, ptr, start, curr, gfp); -static void idr_remove_warning(int id) -{ - WARN(1, "idr_remove called for id=%d which is not allocated.\n", id); -} + if (id >= 0) + idr->idr_next = id + 1U; -static void sub_remove(struct idr *idp, int shift, int id) -{ - struct idr_layer *p = idp->top; - struct idr_layer **pa[MAX_IDR_LEVEL + 1]; - struct idr_layer ***paa = &pa[0]; - struct idr_layer *to_free; - int n; - - *paa = NULL; - *++paa = &idp->top; - - while ((shift > 0) && p) { - n = (id >> shift) & IDR_MASK; - __clear_bit(n, p->bitmap); - *++paa = &p->ary[n]; - p = p->ary[n]; - shift -= IDR_BITS; - } - n = id & IDR_MASK; - if (likely(p != NULL && test_bit(n, p->bitmap))) { - __clear_bit(n, p->bitmap); - RCU_INIT_POINTER(p->ary[n], NULL); - to_free = NULL; - while(*paa && ! --((**paa)->count)){ - if (to_free) - free_layer(idp, to_free); - to_free = **paa; - **paa-- = NULL; - } - if (!*paa) - idp->layers = 0; - if (to_free) - free_layer(idp, to_free); - } else - idr_remove_warning(id); -} - -/** - * idr_remove - remove the given id and free its slot - * @idp: idr handle - * @id: unique key - */ -void idr_remove(struct idr *idp, int id) -{ - struct idr_layer *p; - struct idr_layer *to_free; - - if (id < 0) - return; - - if (id > idr_max(idp->layers)) { - idr_remove_warning(id); - return; - } - - sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); - if (idp->top && idp->top->count == 1 && (idp->layers > 1) && - idp->top->ary[0]) { - /* - * Single child at leftmost slot: we can shrink the tree. - * This level is not needed anymore since when layers are - * inserted, they are inserted at the top of the existing - * tree. - */ - to_free = idp->top; - p = idp->top->ary[0]; - rcu_assign_pointer(idp->top, p); - --idp->layers; - to_free->count = 0; - bitmap_clear(to_free->bitmap, 0, IDR_SIZE); - free_layer(idp, to_free); - } -} -EXPORT_SYMBOL(idr_remove); - -static void __idr_remove_all(struct idr *idp) -{ - int n, id, max; - int bt_mask; - struct idr_layer *p; - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - struct idr_layer **paa = &pa[0]; - - n = idp->layers * IDR_BITS; - *paa = idp->top; - RCU_INIT_POINTER(idp->top, NULL); - max = idr_max(idp->layers); - - id = 0; - while (id >= 0 && id <= max) { - p = *paa; - while (n > IDR_BITS && p) { - n -= IDR_BITS; - p = p->ary[(id >> n) & IDR_MASK]; - *++paa = p; - } - - bt_mask = id; - id += 1 << n; - /* Get the highest bit that the above add changed from 0->1. */ - while (n < fls(id ^ bt_mask)) { - if (*paa) - free_layer(idp, *paa); - n += IDR_BITS; - --paa; - } - } - idp->layers = 0; -} - -/** - * idr_destroy - release all cached layers within an idr tree - * @idp: idr handle - * - * Free all id mappings and all idp_layers. After this function, @idp is - * completely unused and can be freed / recycled. The caller is - * responsible for ensuring that no one else accesses @idp during or after - * idr_destroy(). - * - * A typical clean-up sequence for objects stored in an idr tree will use - * idr_for_each() to free all objects, if necessary, then idr_destroy() to - * free up the id mappings and cached idr_layers. - */ -void idr_destroy(struct idr *idp) -{ - __idr_remove_all(idp); - - while (idp->id_free_cnt) { - struct idr_layer *p = get_from_free_list(idp); - kmem_cache_free(idr_layer_cache, p); - } -} -EXPORT_SYMBOL(idr_destroy); - -void *idr_find_slowpath(struct idr *idp, int id) -{ - int n; - struct idr_layer *p; - - if (id < 0) - return NULL; - - p = rcu_dereference_raw(idp->top); - if (!p) - return NULL; - n = (p->layer+1) * IDR_BITS; - - if (id > idr_max(p->layer + 1)) - return NULL; - BUG_ON(n == 0); - - while (n > 0 && p) { - n -= IDR_BITS; - BUG_ON(n != p->layer*IDR_BITS); - p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); - } - return((void *)p); + return id; } -EXPORT_SYMBOL(idr_find_slowpath); +EXPORT_SYMBOL(idr_alloc_cyclic); /** * idr_for_each - iterate through all stored pointers - * @idp: idr handle + * @idr: idr handle * @fn: function to be called for each pointer - * @data: data passed back to callback function + * @data: data passed to callback function * - * Iterate over the pointers registered with the given idr. The - * callback function will be called for each pointer currently - * registered, passing the id, the pointer and the data pointer passed - * to this function. It is not safe to modify the idr tree while in - * the callback, so functions such as idr_get_new and idr_remove are - * not allowed. + * The callback function will be called for each entry in @idr, passing + * the id, the pointer and the data pointer passed to this function. * - * We check the return of @fn each time. If it returns anything other - * than %0, we break out and return that value. + * If @fn returns anything other than %0, the iteration stops and that + * value is returned from this function. * - * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). + * idr_for_each() can be called concurrently with idr_alloc() and + * idr_remove() if protected by RCU. Newly added entries may not be + * seen and deleted entries may be seen, but adding and removing entries + * will not cause other entries to be skipped, nor spurious ones to be seen. */ -int idr_for_each(struct idr *idp, - int (*fn)(int id, void *p, void *data), void *data) +int idr_for_each(const struct idr *idr, + int (*fn)(int id, void *p, void *data), void *data) { - int n, id, max, error = 0; - struct idr_layer *p; - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - struct idr_layer **paa = &pa[0]; - - n = idp->layers * IDR_BITS; - *paa = rcu_dereference_raw(idp->top); - max = idr_max(idp->layers); + struct radix_tree_iter iter; + void **slot; - id = 0; - while (id >= 0 && id <= max) { - p = *paa; - while (n > 0 && p) { - n -= IDR_BITS; - p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); - *++paa = p; - } - - if (p) { - error = fn(id, (void *)p, data); - if (error) - break; - } - - id += 1 << n; - while (n < fls(id)) { - n += IDR_BITS; - --paa; - } + radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { + int ret = fn(iter.index, *slot, data); + if (ret) + return ret; } - return error; + return 0; } EXPORT_SYMBOL(idr_for_each); /** - * idr_get_next - lookup next object of id to given id. - * @idp: idr handle - * @nextidp: pointer to lookup key - * - * Returns pointer to registered object with id, which is next number to - * given id. After being looked up, *@nextidp will be updated for the next - * iteration. - * - * This function can be called under rcu_read_lock(), given that the leaf - * pointers lifetimes are correctly managed. + * idr_get_next - Find next populated entry + * @idr: idr handle + * @nextid: Pointer to lowest possible ID to return + * + * Returns the next populated entry in the tree with an ID greater than + * or equal to the value pointed to by @nextid. On exit, @nextid is updated + * to the ID of the found value. To use in a loop, the value pointed to by + * nextid must be incremented by the user. */ -void *idr_get_next(struct idr *idp, int *nextidp) +void *idr_get_next(struct idr *idr, int *nextid) { - struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; - struct idr_layer **paa = &pa[0]; - int id = *nextidp; - int n, max; + struct radix_tree_iter iter; + void **slot; - /* find first ent */ - p = *paa = rcu_dereference_raw(idp->top); - if (!p) + slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid); + if (!slot) return NULL; - n = (p->layer + 1) * IDR_BITS; - max = idr_max(p->layer + 1); - - while (id >= 0 && id <= max) { - p = *paa; - while (n > 0 && p) { - n -= IDR_BITS; - p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); - *++paa = p; - } - - if (p) { - *nextidp = id; - return p; - } - /* - * Proceed to the next layer at the current level. Unlike - * idr_for_each(), @id isn't guaranteed to be aligned to - * layer boundary at this point and adding 1 << n may - * incorrectly skip IDs. Make sure we jump to the - * beginning of the next layer using round_up(). - */ - id = round_up(id + 1, 1 << n); - while (n < fls(id)) { - n += IDR_BITS; - --paa; - } - } - return NULL; + *nextid = iter.index; + return *slot; } EXPORT_SYMBOL(idr_get_next); - /** * idr_replace - replace pointer for given id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @id: lookup key + * @idr: idr handle + * @ptr: New pointer to associate with the ID + * @id: Lookup key * - * Replace the pointer registered with an id and return the old value. - * A %-ENOENT return indicates that @id was not found. - * A %-EINVAL return indicates that @id was not within valid constraints. + * Replace the pointer registered with an ID and return the old value. + * This function can be called under the RCU read lock concurrently with + * idr_alloc() and idr_remove() (as long as the ID being removed is not + * the one being replaced!). * - * The caller must serialize with writers. + * Returns: 0 on success. %-ENOENT indicates that @id was not found. + * %-EINVAL indicates that @id or @ptr were not valid. */ -void *idr_replace(struct idr *idp, void *ptr, int id) +void *idr_replace(struct idr *idr, void *ptr, int id) { - int n; - struct idr_layer *p, *old_p; + struct radix_tree_node *node; + void **slot = NULL; + void *entry; if (id < 0) return ERR_PTR(-EINVAL); + if (radix_tree_is_internal_node(ptr)) + return ERR_PTR(-EINVAL); - p = idp->top; - if (!p) - return ERR_PTR(-ENOENT); - - if (id > idr_max(p->layer + 1)) - return ERR_PTR(-ENOENT); - - n = p->layer * IDR_BITS; - while ((n > 0) && p) { - p = p->ary[(id >> n) & IDR_MASK]; - n -= IDR_BITS; - } - - n = id & IDR_MASK; - if (unlikely(p == NULL || !test_bit(n, p->bitmap))) + entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); + if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) return ERR_PTR(-ENOENT); - old_p = p->ary[n]; - rcu_assign_pointer(p->ary[n], ptr); + __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL); - return old_p; + return entry; } EXPORT_SYMBOL(idr_replace); -void __init idr_init_cache(void) -{ - idr_layer_cache = kmem_cache_create("idr_layer_cache", - sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); -} - -/** - * idr_init - initialize idr handle - * @idp: idr handle - * - * This function is use to set up the handle (@idp) that you will pass - * to the rest of the functions. - */ -void idr_init(struct idr *idp) -{ - memset(idp, 0, sizeof(struct idr)); - spin_lock_init(&idp->lock); -} -EXPORT_SYMBOL(idr_init); - -static int idr_has_entry(int id, void *p, void *data) -{ - return 1; -} - -bool idr_is_empty(struct idr *idp) -{ - return !idr_for_each(idp, idr_has_entry, NULL); -} -EXPORT_SYMBOL(idr_is_empty); - /** * DOC: IDA description - * IDA - IDR based ID allocator * - * This is id allocator without id -> pointer translation. Memory - * usage is much lower than full blown idr because each id only - * occupies a bit. ida uses a custom leaf node which contains - * IDA_BITMAP_BITS slots. - * - * 2007-04-25 written by Tejun Heo <htejun@gmail.com> + * The IDA is an ID allocator which does not provide the ability to + * associate an ID with a pointer. As such, it only needs to store one + * bit per ID, and so is more space efficient than an IDR. To use an IDA, + * define it using DEFINE_IDA() (or embed a &struct ida in a data structure, + * then initialise it using ida_init()). To allocate a new ID, call + * ida_simple_get(). To free an ID, call ida_simple_remove(). + * + * If you have more complex locking requirements, use a loop around + * ida_pre_get() and ida_get_new() to allocate a new ID. Then use + * ida_remove() to free an ID. You must make sure that ida_get_new() and + * ida_remove() cannot be called at the same time as each other for the + * same IDA. + * + * You can also use ida_get_new_above() if you need an ID to be allocated + * above a particular number. ida_destroy() can be used to dispose of an + * IDA without needing to free the individual IDs in it. You can use + * ida_is_empty() to find out whether the IDA has any IDs currently allocated. + * + * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward + * limitation, it should be quite straightforward to raise the maximum. */ -static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) -{ - unsigned long flags; - - if (!ida->free_bitmap) { - spin_lock_irqsave(&ida->idr.lock, flags); - if (!ida->free_bitmap) { - ida->free_bitmap = bitmap; - bitmap = NULL; - } - spin_unlock_irqrestore(&ida->idr.lock, flags); - } - - kfree(bitmap); -} - -/** - * ida_pre_get - reserve resources for ida allocation - * @ida: ida handle - * @gfp_mask: memory allocation flag - * - * This function should be called prior to locking and calling the - * following function. It preallocates enough memory to satisfy the - * worst possible allocation. - * - * If the system is REALLY out of memory this function returns %0, - * otherwise %1. +/* + * Developer's notes: + * + * The IDA uses the functionality provided by the IDR & radix tree to store + * bitmaps in each entry. The IDR_FREE tag means there is at least one bit + * free, unlike the IDR where it means at least one entry is free. + * + * I considered telling the radix tree that each slot is an order-10 node + * and storing the bit numbers in the radix tree, but the radix tree can't + * allow a single multiorder entry at index 0, which would significantly + * increase memory consumption for the IDA. So instead we divide the index + * by the number of bits in the leaf bitmap before doing a radix tree lookup. + * + * As an optimisation, if there are only a few low bits set in any given + * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional + * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits + * directly in the entry. By being really tricksy, we could store + * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising + * for 0-3 allocated IDs. + * + * We allow the radix tree 'exceptional' count to get out of date. Nothing + * in the IDA nor the radix tree code checks it. If it becomes important + * to maintain an accurate exceptional count, switch the rcu_assign_pointer() + * calls to radix_tree_iter_replace() which will correct the exceptional + * count. + * + * The IDA always requires a lock to alloc/free. If we add a 'test_bit' + * equivalent, it will still need locking. Going to RCU lookup would require + * using RCU to free bitmaps, and that's not trivial without embedding an + * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte + * bitmap, which is excessive. */ -int ida_pre_get(struct ida *ida, gfp_t gfp_mask) -{ - /* allocate idr_layers */ - if (!__idr_pre_get(&ida->idr, gfp_mask)) - return 0; - /* allocate free_bitmap */ - if (!ida->free_bitmap) { - struct ida_bitmap *bitmap; - - bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); - if (!bitmap) - return 0; - - free_bitmap(ida, bitmap); - } - - return 1; -} -EXPORT_SYMBOL(ida_pre_get); +#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS) /** * ida_get_new_above - allocate new ID above or equal to a start id - * @ida: ida handle - * @starting_id: id to start search at - * @p_id: pointer to the allocated handle + * @ida: ida handle + * @start: id to start search at + * @id: pointer to the allocated handle * - * Allocate new ID above or equal to @starting_id. It should be called - * with any required locks. + * Allocate new ID above or equal to @start. It should be called + * with any required locks to ensure that concurrent calls to + * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed. + * Consider using ida_simple_get() if you do not have complex locking + * requirements. * * If memory is required, it will return %-EAGAIN, you should unlock * and go back to the ida_pre_get() call. If the ida is full, it will - * return %-ENOSPC. - * - * Note that callers must ensure that concurrent access to @ida is not possible. - * See ida_simple_get() for a varaint which takes care of locking. + * return %-ENOSPC. On success, it will return 0. * - * @p_id returns a value in the range @starting_id ... %0x7fffffff. + * @id returns a value in the range @start ... %0x7fffffff. */ -int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) +int ida_get_new_above(struct ida *ida, int start, int *id) { - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; + struct radix_tree_root *root = &ida->ida_rt; + void **slot; + struct radix_tree_iter iter; struct ida_bitmap *bitmap; - unsigned long flags; - int idr_id = starting_id / IDA_BITMAP_BITS; - int offset = starting_id % IDA_BITMAP_BITS; - int t, id; - - restart: - /* get vacant slot */ - t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); - if (t < 0) - return t == -ENOMEM ? -EAGAIN : t; - - if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) - return -ENOSPC; - - if (t != idr_id) - offset = 0; - idr_id = t; - - /* if bitmap isn't there, create a new one */ - bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; - if (!bitmap) { - spin_lock_irqsave(&ida->idr.lock, flags); - bitmap = ida->free_bitmap; - ida->free_bitmap = NULL; - spin_unlock_irqrestore(&ida->idr.lock, flags); - - if (!bitmap) - return -EAGAIN; - - memset(bitmap, 0, sizeof(struct ida_bitmap)); - rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], - (void *)bitmap); - pa[0]->count++; - } - - /* lookup for empty slot */ - t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); - if (t == IDA_BITMAP_BITS) { - /* no empty slot after offset, continue to the next chunk */ - idr_id++; - offset = 0; - goto restart; - } - - id = idr_id * IDA_BITMAP_BITS + t; - if (id >= MAX_IDR_BIT) - return -ENOSPC; + unsigned long index; + unsigned bit, ebit; + int new; + + index = start / IDA_BITMAP_BITS; + bit = start % IDA_BITMAP_BITS; + ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; + + slot = radix_tree_iter_init(&iter, index); + for (;;) { + if (slot) + slot = radix_tree_next_slot(slot, &iter, + RADIX_TREE_ITER_TAGGED); + if (!slot) { + slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); + if (IS_ERR(slot)) { + if (slot == ERR_PTR(-ENOMEM)) + return -EAGAIN; + return PTR_ERR(slot); + } + } + if (iter.index > index) { + bit = 0; + ebit = RADIX_TREE_EXCEPTIONAL_SHIFT; + } + new = iter.index * IDA_BITMAP_BITS; + bitmap = *slot; + if (radix_tree_exception(bitmap)) { + unsigned long tmp = (unsigned long)bitmap; + ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit); + if (ebit < BITS_PER_LONG) { + tmp |= 1UL << ebit; + rcu_assign_pointer(*slot, (void *)tmp); + *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT; + return 0; + } + bitmap = this_cpu_xchg(ida_bitmap, NULL); + if (!bitmap) + return -EAGAIN; + memset(bitmap, 0, sizeof(*bitmap)); + bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; + rcu_assign_pointer(*slot, bitmap); + } - __set_bit(t, bitmap->bitmap); - if (++bitmap->nr_busy == IDA_BITMAP_BITS) - idr_mark_full(pa, idr_id); + if (bitmap) { + bit = find_next_zero_bit(bitmap->bitmap, + IDA_BITMAP_BITS, bit); + new += bit; + if (new < 0) + return -ENOSPC; + if (bit == IDA_BITMAP_BITS) + continue; - *p_id = id; + __set_bit(bit, bitmap->bitmap); + if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) + radix_tree_iter_tag_clear(root, &iter, + IDR_FREE); + } else { + new += bit; + if (new < 0) + return -ENOSPC; + if (ebit < BITS_PER_LONG) { + bitmap = (void *)((1UL << ebit) | + RADIX_TREE_EXCEPTIONAL_ENTRY); + radix_tree_iter_replace(root, &iter, slot, + bitmap); + *id = new; + return 0; + } + bitmap = this_cpu_xchg(ida_bitmap, NULL); + if (!bitmap) + return -EAGAIN; + memset(bitmap, 0, sizeof(*bitmap)); + __set_bit(bit, bitmap->bitmap); + radix_tree_iter_replace(root, &iter, slot, bitmap); + } - /* Each leaf node can handle nearly a thousand slots and the - * whole idea of ida is to have small memory foot print. - * Throw away extra resources one by one after each successful - * allocation. - */ - if (ida->idr.id_free_cnt || ida->free_bitmap) { - struct idr_layer *p = get_from_free_list(&ida->idr); - if (p) - kmem_cache_free(idr_layer_cache, p); + *id = new; + return 0; } - - return 0; } EXPORT_SYMBOL(ida_get_new_above); /** - * ida_remove - remove the given ID - * @ida: ida handle - * @id: ID to free + * ida_remove - Free the given ID + * @ida: ida handle + * @id: ID to free + * + * This function should not be called at the same time as ida_get_new_above(). */ void ida_remove(struct ida *ida, int id) { - struct idr_layer *p = ida->idr.top; - int shift = (ida->idr.layers - 1) * IDR_BITS; - int idr_id = id / IDA_BITMAP_BITS; - int offset = id % IDA_BITMAP_BITS; - int n; + unsigned long index = id / IDA_BITMAP_BITS; + unsigned offset = id % IDA_BITMAP_BITS; struct ida_bitmap *bitmap; + unsigned long *btmp; + struct radix_tree_iter iter; + void **slot; - if (idr_id > idr_max(ida->idr.layers)) + slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); + if (!slot) goto err; - /* clear full bits while looking up the leaf idr_layer */ - while ((shift > 0) && p) { - n = (idr_id >> shift) & IDR_MASK; - __clear_bit(n, p->bitmap); - p = p->ary[n]; - shift -= IDR_BITS; + bitmap = *slot; + if (radix_tree_exception(bitmap)) { + btmp = (unsigned long *)slot; + offset += RADIX_TREE_EXCEPTIONAL_SHIFT; + if (offset >= BITS_PER_LONG) + goto err; + } else { + btmp = bitmap->bitmap; } - - if (p == NULL) - goto err; - - n = idr_id & IDR_MASK; - __clear_bit(n, p->bitmap); - - bitmap = (void *)p->ary[n]; - if (!bitmap || !test_bit(offset, bitmap->bitmap)) + if (!test_bit(offset, btmp)) goto err; - /* update bitmap and remove it if empty */ - __clear_bit(offset, bitmap->bitmap); - if (--bitmap->nr_busy == 0) { - __set_bit(n, p->bitmap); /* to please idr_remove() */ - idr_remove(&ida->idr, idr_id); - free_bitmap(ida, bitmap); + __clear_bit(offset, btmp); + radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); + if (radix_tree_exception(bitmap)) { + if (*slot == (void *)RADIX_TREE_EXCEPTIONAL_ENTRY) + radix_tree_iter_delete(&ida->ida_rt, &iter, slot); + } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) { + kfree(bitmap); + radix_tree_iter_delete(&ida->ida_rt, &iter, slot); } - return; - err: WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); } EXPORT_SYMBOL(ida_remove); /** - * ida_destroy - release all cached layers within an ida tree - * @ida: ida handle + * ida_destroy - Free the contents of an ida + * @ida: ida handle + * + * Calling this function releases all resources associated with an IDA. When + * this call returns, the IDA is empty and can be reused or freed. The caller + * should not allow ida_remove() or ida_get_new_above() to be called at the + * same time. */ void ida_destroy(struct ida *ida) { - idr_destroy(&ida->idr); - kfree(ida->free_bitmap); + struct radix_tree_iter iter; + void **slot; + + radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { + struct ida_bitmap *bitmap = *slot; + if (!radix_tree_exception(bitmap)) + kfree(bitmap); + radix_tree_iter_delete(&ida->ida_rt, &iter, slot); + } } EXPORT_SYMBOL(ida_destroy); @@ -1141,18 +480,3 @@ void ida_simple_remove(struct ida *ida, unsigned int id) spin_unlock_irqrestore(&simple_ida_lock, flags); } EXPORT_SYMBOL(ida_simple_remove); - -/** - * ida_init - initialize ida handle - * @ida: ida handle - * - * This function is use to set up the handle (@ida) that you will pass - * to the rest of the functions. - */ -void ida_init(struct ida *ida) -{ - memset(ida, 0, sizeof(struct ida)); - idr_init(&ida->idr); - -} -EXPORT_SYMBOL(ida_init); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 72fab4999c00..9dc093d5ef39 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -22,20 +22,21 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/bitmap.h> +#include <linux/bitops.h> #include <linux/cpu.h> #include <linux/errno.h> +#include <linux/export.h> +#include <linux/idr.h> #include <linux/init.h> #include <linux/kernel.h> -#include <linux/export.h> -#include <linux/radix-tree.h> +#include <linux/kmemleak.h> #include <linux/percpu.h> +#include <linux/preempt.h> /* in_interrupt() */ +#include <linux/radix-tree.h> +#include <linux/rcupdate.h> #include <linux/slab.h> -#include <linux/kmemleak.h> -#include <linux/cpu.h> #include <linux/string.h> -#include <linux/bitops.h> -#include <linux/rcupdate.h> -#include <linux/preempt.h> /* in_interrupt() */ /* Number of nodes in fully populated tree of given height */ @@ -60,11 +61,28 @@ static struct kmem_cache *radix_tree_node_cachep; #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) /* + * The IDR does not have to be as high as the radix tree since it uses + * signed integers, not unsigned longs. + */ +#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) +#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ + RADIX_TREE_MAP_SHIFT)) +#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) + +/* + * The IDA is even shorter since it uses a bitmap at the last level. + */ +#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS)) +#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \ + RADIX_TREE_MAP_SHIFT)) +#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1) + +/* * Per-cpu pool of preloaded nodes */ struct radix_tree_preload { unsigned nr; - /* nodes->private_data points to next preallocated node */ + /* nodes->parent points to next preallocated node */ struct radix_tree_node *nodes; }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; @@ -83,26 +101,28 @@ static inline void *node_to_entry(void *ptr) #ifdef CONFIG_RADIX_TREE_MULTIORDER /* Sibling slots point directly to another slot in the same node */ -static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) +static inline +bool is_sibling_entry(const struct radix_tree_node *parent, void *node) { void **ptr = node; return (parent->slots <= ptr) && (ptr < parent->slots + RADIX_TREE_MAP_SIZE); } #else -static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) +static inline +bool is_sibling_entry(const struct radix_tree_node *parent, void *node) { return false; } #endif -static inline unsigned long get_slot_offset(struct radix_tree_node *parent, - void **slot) +static inline +unsigned long get_slot_offset(const struct radix_tree_node *parent, void **slot) { return slot - parent->slots; } -static unsigned int radix_tree_descend(struct radix_tree_node *parent, +static unsigned int radix_tree_descend(const struct radix_tree_node *parent, struct radix_tree_node **nodep, unsigned long index) { unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; @@ -122,7 +142,7 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent, return offset; } -static inline gfp_t root_gfp_mask(struct radix_tree_root *root) +static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) { return root->gfp_mask & __GFP_BITS_MASK; } @@ -139,42 +159,48 @@ static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, __clear_bit(offset, node->tags[tag]); } -static inline int tag_get(struct radix_tree_node *node, unsigned int tag, +static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, int offset) { return test_bit(offset, node->tags[tag]); } -static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) +static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) { - root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); + root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) { - root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); + root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear_all(struct radix_tree_root *root) { - root->gfp_mask &= __GFP_BITS_MASK; + root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1; +} + +static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) +{ + return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT)); } -static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) +static inline unsigned root_tags_get(const struct radix_tree_root *root) { - return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); + return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT; } -static inline unsigned root_tags_get(struct radix_tree_root *root) +static inline bool is_idr(const struct radix_tree_root *root) { - return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT; + return (__force unsigned)root->gfp_mask & ROOT_IS_IDR; } /* * Returns 1 if any slot in the node has this tag set. * Otherwise returns 0. */ -static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) +static inline int any_tag_set(const struct radix_tree_node *node, + unsigned int tag) { unsigned idx; for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { @@ -184,6 +210,11 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) return 0; } +static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) +{ + bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE); +} + /** * radix_tree_find_next_bit - find the next set bit in a memory region * @@ -232,11 +263,18 @@ static inline unsigned long shift_maxindex(unsigned int shift) return (RADIX_TREE_MAP_SIZE << shift) - 1; } -static inline unsigned long node_maxindex(struct radix_tree_node *node) +static inline unsigned long node_maxindex(const struct radix_tree_node *node) { return shift_maxindex(node->shift); } +static unsigned long next_index(unsigned long index, + const struct radix_tree_node *node, + unsigned long offset) +{ + return (index & ~node_maxindex(node)) + (offset << node->shift); +} + #ifndef __KERNEL__ static void dump_node(struct radix_tree_node *node, unsigned long index) { @@ -275,11 +313,59 @@ static void radix_tree_dump(struct radix_tree_root *root) { pr_debug("radix root: %p rnode %p tags %x\n", root, root->rnode, - root->gfp_mask >> __GFP_BITS_SHIFT); + root->gfp_mask >> ROOT_TAG_SHIFT); if (!radix_tree_is_internal_node(root->rnode)) return; dump_node(entry_to_node(root->rnode), 0); } + +static void dump_ida_node(void *entry, unsigned long index) +{ + unsigned long i; + + if (!entry) + return; + + if (radix_tree_is_internal_node(entry)) { + struct radix_tree_node *node = entry_to_node(entry); + + pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n", + node, node->offset, index * IDA_BITMAP_BITS, + ((index | node_maxindex(node)) + 1) * + IDA_BITMAP_BITS - 1, + node->parent, node->tags[0][0], node->shift, + node->count); + for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) + dump_ida_node(node->slots[i], + index | (i << node->shift)); + } else if (radix_tree_exceptional_entry(entry)) { + pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n", + entry, (int)(index & RADIX_TREE_MAP_MASK), + index * IDA_BITMAP_BITS, + index * IDA_BITMAP_BITS + BITS_PER_LONG - + RADIX_TREE_EXCEPTIONAL_SHIFT, + (unsigned long)entry >> + RADIX_TREE_EXCEPTIONAL_SHIFT); + } else { + struct ida_bitmap *bitmap = entry; + + pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap, + (int)(index & RADIX_TREE_MAP_MASK), + index * IDA_BITMAP_BITS, + (index + 1) * IDA_BITMAP_BITS - 1); + for (i = 0; i < IDA_BITMAP_LONGS; i++) + pr_cont(" %lx", bitmap->bitmap[i]); + pr_cont("\n"); + } +} + +static void ida_dump(struct ida *ida) +{ + struct radix_tree_root *root = &ida->ida_rt; + pr_debug("ida: %p node %p free %d\n", ida, root->rnode, + root->gfp_mask >> ROOT_TAG_SHIFT); + dump_ida_node(root->rnode, 0); +} #endif /* @@ -287,13 +373,12 @@ static void radix_tree_dump(struct radix_tree_root *root) * that the caller has pinned this thread of control to the current CPU. */ static struct radix_tree_node * -radix_tree_node_alloc(struct radix_tree_root *root, - struct radix_tree_node *parent, +radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, + struct radix_tree_root *root, unsigned int shift, unsigned int offset, unsigned int count, unsigned int exceptional) { struct radix_tree_node *ret = NULL; - gfp_t gfp_mask = root_gfp_mask(root); /* * Preload code isn't irq safe and it doesn't make sense to use @@ -321,8 +406,7 @@ radix_tree_node_alloc(struct radix_tree_root *root, rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes; - rtp->nodes = ret->private_data; - ret->private_data = NULL; + rtp->nodes = ret->parent; rtp->nr--; } /* @@ -336,11 +420,12 @@ radix_tree_node_alloc(struct radix_tree_root *root, out: BUG_ON(radix_tree_is_internal_node(ret)); if (ret) { - ret->parent = parent; ret->shift = shift; ret->offset = offset; ret->count = count; ret->exceptional = exceptional; + ret->parent = parent; + ret->root = root; } return ret; } @@ -399,7 +484,7 @@ static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { - node->private_data = rtp->nodes; + node->parent = rtp->nodes; rtp->nodes = node; rtp->nr++; } else { @@ -510,7 +595,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) return __radix_tree_preload(gfp_mask, nr_nodes); } -static unsigned radix_tree_load_root(struct radix_tree_root *root, +static unsigned radix_tree_load_root(const struct radix_tree_root *root, struct radix_tree_node **nodep, unsigned long *maxindex) { struct radix_tree_node *node = rcu_dereference_raw(root->rnode); @@ -530,7 +615,7 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root, /* * Extend a radix tree so it can store key @index. */ -static int radix_tree_extend(struct radix_tree_root *root, +static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, unsigned long index, unsigned int shift) { struct radix_tree_node *slot; @@ -543,19 +628,27 @@ static int radix_tree_extend(struct radix_tree_root *root, maxshift += RADIX_TREE_MAP_SHIFT; slot = root->rnode; - if (!slot) + if (!slot && (!is_idr(root) || root_tag_get(root, IDR_FREE))) goto out; do { - struct radix_tree_node *node = radix_tree_node_alloc(root, - NULL, shift, 0, 1, 0); + struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL, + root, shift, 0, 1, 0); if (!node) return -ENOMEM; - /* Propagate the aggregated tag info into the new root */ - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { - if (root_tag_get(root, tag)) - tag_set(node, tag, 0); + if (is_idr(root)) { + all_tag_set(node, IDR_FREE); + if (!root_tag_get(root, IDR_FREE)) { + tag_clear(node, IDR_FREE, 0); + root_tag_set(root, IDR_FREE); + } + } else { + /* Propagate the aggregated tag info to the new child */ + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { + if (root_tag_get(root, tag)) + tag_set(node, tag, 0); + } } BUG_ON(shift > BITS_PER_LONG); @@ -578,10 +671,12 @@ out: * radix_tree_shrink - shrink radix tree to minimum height * @root radix tree root */ -static inline void radix_tree_shrink(struct radix_tree_root *root, +static inline bool radix_tree_shrink(struct radix_tree_root *root, radix_tree_update_node_t update_node, void *private) { + bool shrunk = false; + for (;;) { struct radix_tree_node *node = root->rnode; struct radix_tree_node *child; @@ -614,6 +709,8 @@ static inline void radix_tree_shrink(struct radix_tree_root *root, * one (root->rnode) as far as dependent read barriers go. */ root->rnode = child; + if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) + root_tag_clear(root, IDR_FREE); /* * We have a dilemma here. The node's slot[0] must not be @@ -642,20 +739,26 @@ static inline void radix_tree_shrink(struct radix_tree_root *root, WARN_ON_ONCE(!list_empty(&node->private_list)); radix_tree_node_free(node); + shrunk = true; } + + return shrunk; } -static void delete_node(struct radix_tree_root *root, +static bool delete_node(struct radix_tree_root *root, struct radix_tree_node *node, radix_tree_update_node_t update_node, void *private) { + bool deleted = false; + do { struct radix_tree_node *parent; if (node->count) { if (node == entry_to_node(root->rnode)) - radix_tree_shrink(root, update_node, private); - return; + deleted |= radix_tree_shrink(root, update_node, + private); + return deleted; } parent = node->parent; @@ -663,15 +766,23 @@ static void delete_node(struct radix_tree_root *root, parent->slots[node->offset] = NULL; parent->count--; } else { - root_tag_clear_all(root); + /* + * Shouldn't the tags already have all been cleared + * by the caller? + */ + if (!is_idr(root)) + root_tag_clear_all(root); root->rnode = NULL; } WARN_ON_ONCE(!list_empty(&node->private_list)); radix_tree_node_free(node); + deleted = true; node = parent; } while (node); + + return deleted; } /** @@ -700,6 +811,7 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, unsigned long maxindex; unsigned int shift, offset = 0; unsigned long max = index | ((1UL << order) - 1); + gfp_t gfp = root_gfp_mask(root); shift = radix_tree_load_root(root, &child, &maxindex); @@ -707,7 +819,7 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, if (order > 0 && max == ((1UL << order) - 1)) max++; if (max > maxindex) { - int error = radix_tree_extend(root, max, shift); + int error = radix_tree_extend(root, gfp, max, shift); if (error < 0) return error; shift = error; @@ -718,7 +830,7 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, shift -= RADIX_TREE_MAP_SHIFT; if (child == NULL) { /* Have to add a child node. */ - child = radix_tree_node_alloc(root, node, shift, + child = radix_tree_node_alloc(gfp, node, root, shift, offset, 0, 0); if (!child) return -ENOMEM; @@ -741,7 +853,6 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, return 0; } -#ifdef CONFIG_RADIX_TREE_MULTIORDER /* * Free any nodes below this node. The tree is presumed to not need * shrinking, and any user data in the tree is presumed to not need a @@ -777,6 +888,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) } } +#ifdef CONFIG_RADIX_TREE_MULTIORDER static inline int insert_entries(struct radix_tree_node *node, void **slot, void *item, unsigned order, bool replace) { @@ -908,8 +1020,9 @@ EXPORT_SYMBOL(__radix_tree_insert); * allocated and @root->rnode is used as a direct slot instead of * pointing to a node, in which case *@nodep will be NULL. */ -void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, - struct radix_tree_node **nodep, void ***slotp) +void *__radix_tree_lookup(const struct radix_tree_root *root, + unsigned long index, struct radix_tree_node **nodep, + void ***slotp) { struct radix_tree_node *node, *parent; unsigned long maxindex; @@ -952,7 +1065,8 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, * exclusive from other writers. Any dereference of the slot must be done * using radix_tree_deref_slot. */ -void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +void **radix_tree_lookup_slot(const struct radix_tree_root *root, + unsigned long index) { void **slot; @@ -974,75 +1088,76 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); * them safely). No RCU barriers are required to access or modify the * returned item, however. */ -void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) +void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) { return __radix_tree_lookup(root, index, NULL, NULL); } EXPORT_SYMBOL(radix_tree_lookup); -static inline int slot_count(struct radix_tree_node *node, - void **slot) +static inline void replace_sibling_entries(struct radix_tree_node *node, + void **slot, int count, int exceptional) { - int n = 1; #ifdef CONFIG_RADIX_TREE_MULTIORDER void *ptr = node_to_entry(slot); - unsigned offset = get_slot_offset(node, slot); - int i; + unsigned offset = get_slot_offset(node, slot) + 1; - for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { - if (node->slots[offset + i] != ptr) + while (offset < RADIX_TREE_MAP_SIZE) { + if (node->slots[offset] != ptr) break; - n++; + if (count < 0) { + node->slots[offset] = NULL; + node->count--; + } + node->exceptional += exceptional; + offset++; } #endif - return n; } -static void replace_slot(struct radix_tree_root *root, - struct radix_tree_node *node, - void **slot, void *item, - bool warn_typeswitch) +static void replace_slot(void **slot, void *item, struct radix_tree_node *node, + int count, int exceptional) { - void *old = rcu_dereference_raw(*slot); - int count, exceptional; - - WARN_ON_ONCE(radix_tree_is_internal_node(item)); - - count = !!item - !!old; - exceptional = !!radix_tree_exceptional_entry(item) - - !!radix_tree_exceptional_entry(old); - - WARN_ON_ONCE(warn_typeswitch && (count || exceptional)); + if (WARN_ON_ONCE(radix_tree_is_internal_node(item))) + return; - if (node) { + if (node && (count || exceptional)) { node->count += count; - if (exceptional) { - exceptional *= slot_count(node, slot); - node->exceptional += exceptional; - } + node->exceptional += exceptional; + replace_sibling_entries(node, slot, count, exceptional); } rcu_assign_pointer(*slot, item); } -static inline void delete_sibling_entries(struct radix_tree_node *node, - void **slot) +static bool node_tag_get(const struct radix_tree_root *root, + const struct radix_tree_node *node, + unsigned int tag, unsigned int offset) { -#ifdef CONFIG_RADIX_TREE_MULTIORDER - bool exceptional = radix_tree_exceptional_entry(*slot); - void *ptr = node_to_entry(slot); - unsigned offset = get_slot_offset(node, slot); - int i; + if (node) + return tag_get(node, tag, offset); + return root_tag_get(root, tag); +} - for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { - if (node->slots[offset + i] != ptr) - break; - node->slots[offset + i] = NULL; - node->count--; - if (exceptional) - node->exceptional--; +/* + * IDR users want to be able to store NULL in the tree, so if the slot isn't + * free, don't adjust the count, even if it's transitioning between NULL and + * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still + * have empty bits, but it only stores NULL in slots when they're being + * deleted. + */ +static int calculate_count(struct radix_tree_root *root, + struct radix_tree_node *node, void **slot, + void *item, void *old) +{ + if (is_idr(root)) { + unsigned offset = get_slot_offset(node, slot); + bool free = node_tag_get(root, node, IDR_FREE, offset); + if (!free) + return 0; + if (!old) + return 1; } -#endif + return !!item - !!old; } /** @@ -1062,15 +1177,19 @@ void __radix_tree_replace(struct radix_tree_root *root, void **slot, void *item, radix_tree_update_node_t update_node, void *private) { - if (!item) - delete_sibling_entries(node, slot); + void *old = rcu_dereference_raw(*slot); + int exceptional = !!radix_tree_exceptional_entry(item) - + !!radix_tree_exceptional_entry(old); + int count = calculate_count(root, node, slot, item, old); + /* * This function supports replacing exceptional entries and * deleting entries, but that needs accounting against the * node unless the slot is root->rnode. */ - replace_slot(root, node, slot, item, - !node && slot != (void **)&root->rnode); + WARN_ON_ONCE(!node && (slot != (void **)&root->rnode) && + (count || exceptional)); + replace_slot(slot, item, node, count, exceptional); if (!node) return; @@ -1100,7 +1219,7 @@ void __radix_tree_replace(struct radix_tree_root *root, void radix_tree_replace_slot(struct radix_tree_root *root, void **slot, void *item) { - replace_slot(root, NULL, slot, item, true); + __radix_tree_replace(root, NULL, slot, item, NULL, NULL); } EXPORT_SYMBOL(radix_tree_replace_slot); @@ -1176,6 +1295,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, void **slot; unsigned int offset, end; unsigned n, tag, tags = 0; + gfp_t gfp = root_gfp_mask(root); if (!__radix_tree_lookup(root, index, &parent, &slot)) return -ENOENT; @@ -1213,7 +1333,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, for (;;) { if (node->shift > order) { - child = radix_tree_node_alloc(root, node, + child = radix_tree_node_alloc(gfp, node, root, node->shift - RADIX_TREE_MAP_SHIFT, offset, 0, 0); if (!child) @@ -1262,6 +1382,22 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index, } #endif +static void node_tag_set(struct radix_tree_root *root, + struct radix_tree_node *node, + unsigned int tag, unsigned int offset) +{ + while (node) { + if (tag_get(node, tag, offset)) + return; + tag_set(node, tag, offset); + offset = node->offset; + node = node->parent; + } + + if (!root_tag_get(root, tag)) + root_tag_set(root, tag); +} + /** * radix_tree_tag_set - set a tag on a radix tree node * @root: radix tree root @@ -1303,6 +1439,18 @@ void *radix_tree_tag_set(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_tag_set); +/** + * radix_tree_iter_tag_set - set a tag on the current iterator entry + * @root: radix tree root + * @iter: iterator state + * @tag: tag to set + */ +void radix_tree_iter_tag_set(struct radix_tree_root *root, + const struct radix_tree_iter *iter, unsigned int tag) +{ + node_tag_set(root, iter->node, tag, iter_offset(iter)); +} + static void node_tag_clear(struct radix_tree_root *root, struct radix_tree_node *node, unsigned int tag, unsigned int offset) @@ -1323,34 +1471,6 @@ static void node_tag_clear(struct radix_tree_root *root, root_tag_clear(root, tag); } -static void node_tag_set(struct radix_tree_root *root, - struct radix_tree_node *node, - unsigned int tag, unsigned int offset) -{ - while (node) { - if (tag_get(node, tag, offset)) - return; - tag_set(node, tag, offset); - offset = node->offset; - node = node->parent; - } - - if (!root_tag_get(root, tag)) - root_tag_set(root, tag); -} - -/** - * radix_tree_iter_tag_set - set a tag on the current iterator entry - * @root: radix tree root - * @iter: iterator state - * @tag: tag to set - */ -void radix_tree_iter_tag_set(struct radix_tree_root *root, - const struct radix_tree_iter *iter, unsigned int tag) -{ - node_tag_set(root, iter->node, tag, iter_offset(iter)); -} - /** * radix_tree_tag_clear - clear a tag on a radix tree node * @root: radix tree root @@ -1391,6 +1511,18 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, EXPORT_SYMBOL(radix_tree_tag_clear); /** + * radix_tree_iter_tag_clear - clear a tag on the current iterator entry + * @root: radix tree root + * @iter: iterator state + * @tag: tag to clear + */ +void radix_tree_iter_tag_clear(struct radix_tree_root *root, + const struct radix_tree_iter *iter, unsigned int tag) +{ + node_tag_clear(root, iter->node, tag, iter_offset(iter)); +} + +/** * radix_tree_tag_get - get a tag on a radix tree node * @root: radix tree root * @index: index key @@ -1405,7 +1537,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * the RCU lock is held, unless tag modification and node deletion are excluded * from concurrency. */ -int radix_tree_tag_get(struct radix_tree_root *root, +int radix_tree_tag_get(const struct radix_tree_root *root, unsigned long index, unsigned int tag) { struct radix_tree_node *node, *parent; @@ -1417,8 +1549,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return 0; - if (node == NULL) - return 0; while (radix_tree_is_internal_node(node)) { unsigned offset; @@ -1426,8 +1556,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); - if (!node) - return 0; if (!tag_get(parent, tag, offset)) return 0; if (node == RADIX_TREE_RETRY) @@ -1454,6 +1582,11 @@ static void set_iter_tags(struct radix_tree_iter *iter, unsigned tag_long = offset / BITS_PER_LONG; unsigned tag_bit = offset % BITS_PER_LONG; + if (!node) { + iter->tags = 1; + return; + } + iter->tags = node->tags[tag][tag_long] >> tag_bit; /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ @@ -1569,7 +1702,7 @@ EXPORT_SYMBOL(radix_tree_iter_resume); * @flags: RADIX_TREE_ITER_* flags and tag index * Returns: pointer to chunk first slot, or NULL if iteration is over */ -void **radix_tree_next_chunk(struct radix_tree_root *root, +void **radix_tree_next_chunk(const struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned flags) { unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; @@ -1680,7 +1813,7 @@ EXPORT_SYMBOL(radix_tree_next_chunk); * stored in 'results'. */ unsigned int -radix_tree_gang_lookup(struct radix_tree_root *root, void **results, +radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items) { struct radix_tree_iter iter; @@ -1725,7 +1858,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); * protection, radix_tree_deref_slot may fail requiring a retry. */ unsigned int -radix_tree_gang_lookup_slot(struct radix_tree_root *root, +radix_tree_gang_lookup_slot(const struct radix_tree_root *root, void ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items) { @@ -1762,7 +1895,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_slot); * returns the number of items which were placed at *@results. */ unsigned int -radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, +radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag) { @@ -1803,9 +1936,9 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag); * returns the number of slots which were placed at *@results. */ unsigned int -radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, - unsigned long first_index, unsigned int max_items, - unsigned int tag) +radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, + void ***results, unsigned long first_index, + unsigned int max_items, unsigned int tag) { struct radix_tree_iter iter; void **slot; @@ -1843,59 +1976,83 @@ void __radix_tree_delete_node(struct radix_tree_root *root, delete_node(root, node, update_node, private); } +static bool __radix_tree_delete(struct radix_tree_root *root, + struct radix_tree_node *node, void **slot) +{ + void *old = rcu_dereference_raw(*slot); + int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0; + unsigned offset = get_slot_offset(node, slot); + int tag; + + if (is_idr(root)) + node_tag_set(root, node, IDR_FREE, offset); + else + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) + node_tag_clear(root, node, tag, offset); + + replace_slot(slot, NULL, node, -1, exceptional); + return node && delete_node(root, node, NULL, NULL); +} + /** - * radix_tree_delete_item - delete an item from a radix tree - * @root: radix tree root - * @index: index key - * @item: expected item + * radix_tree_iter_delete - delete the entry at this iterator position + * @root: radix tree root + * @iter: iterator state + * @slot: pointer to slot * - * Remove @item at @index from the radix tree rooted at @root. + * Delete the entry at the position currently pointed to by the iterator. + * This may result in the current node being freed; if it is, the iterator + * is advanced so that it will not reference the freed memory. This + * function may be called without any locking if there are no other threads + * which can access this tree. + */ +void radix_tree_iter_delete(struct radix_tree_root *root, + struct radix_tree_iter *iter, void **slot) +{ + if (__radix_tree_delete(root, iter->node, slot)) + iter->index = iter->next_index; +} + +/** + * radix_tree_delete_item - delete an item from a radix tree + * @root: radix tree root + * @index: index key + * @item: expected item + * + * Remove @item at @index from the radix tree rooted at @root. * - * Returns the address of the deleted item, or NULL if it was not present - * or the entry at the given @index was not @item. + * Return: the deleted entry, or %NULL if it was not present + * or the entry at the given @index was not @item. */ void *radix_tree_delete_item(struct radix_tree_root *root, unsigned long index, void *item) { - struct radix_tree_node *node; - unsigned int offset; + struct radix_tree_node *node = NULL; void **slot; void *entry; - int tag; entry = __radix_tree_lookup(root, index, &node, &slot); - if (!entry) + if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, + get_slot_offset(node, slot)))) return NULL; if (item && entry != item) return NULL; - if (!node) { - root_tag_clear_all(root); - root->rnode = NULL; - return entry; - } - - offset = get_slot_offset(node, slot); - - /* Clear all tags associated with the item to be deleted. */ - for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) - node_tag_clear(root, node, tag, offset); - - __radix_tree_replace(root, node, slot, NULL, NULL, NULL); + __radix_tree_delete(root, node, slot); return entry; } EXPORT_SYMBOL(radix_tree_delete_item); /** - * radix_tree_delete - delete an item from a radix tree - * @root: radix tree root - * @index: index key + * radix_tree_delete - delete an entry from a radix tree + * @root: radix tree root + * @index: index key * - * Remove the item at @index from the radix tree rooted at @root. + * Remove the entry at @index from the radix tree rooted at @root. * - * Returns the address of the deleted item, or NULL if it was not present. + * Return: The deleted entry, or %NULL if it was not present. */ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) { @@ -1912,8 +2069,7 @@ void radix_tree_clear_tags(struct radix_tree_root *root, for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) node_tag_clear(root, node, tag, offset); } else { - /* Clear root node tags */ - root->gfp_mask &= __GFP_BITS_MASK; + root_tag_clear_all(root); } } @@ -1922,12 +2078,147 @@ void radix_tree_clear_tags(struct radix_tree_root *root, * @root: radix tree root * @tag: tag to test */ -int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) +int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) { return root_tag_get(root, tag); } EXPORT_SYMBOL(radix_tree_tagged); +/** + * idr_preload - preload for idr_alloc() + * @gfp_mask: allocation mask to use for preloading + * + * Preallocate memory to use for the next call to idr_alloc(). This function + * returns with preemption disabled. It will be enabled by idr_preload_end(). + */ +void idr_preload(gfp_t gfp_mask) +{ + __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE); +} +EXPORT_SYMBOL(idr_preload); + +/** + * ida_pre_get - reserve resources for ida allocation + * @ida: ida handle + * @gfp: memory allocation flags + * + * This function should be called before calling ida_get_new_above(). If it + * is unable to allocate memory, it will return %0. On success, it returns %1. + */ +int ida_pre_get(struct ida *ida, gfp_t gfp) +{ + __radix_tree_preload(gfp, IDA_PRELOAD_SIZE); + /* + * The IDA API has no preload_end() equivalent. Instead, + * ida_get_new() can return -EAGAIN, prompting the caller + * to return to the ida_pre_get() step. + */ + preempt_enable(); + + if (!this_cpu_read(ida_bitmap)) { + struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); + if (!bitmap) + return 0; + bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap); + kfree(bitmap); + } + + return 1; +} +EXPORT_SYMBOL(ida_pre_get); + +void **idr_get_free(struct radix_tree_root *root, + struct radix_tree_iter *iter, gfp_t gfp, int end) +{ + struct radix_tree_node *node = NULL, *child; + void **slot = (void **)&root->rnode; + unsigned long maxindex, start = iter->next_index; + unsigned long max = end > 0 ? end - 1 : INT_MAX; + unsigned int shift, offset = 0; + + grow: + shift = radix_tree_load_root(root, &child, &maxindex); + if (!radix_tree_tagged(root, IDR_FREE)) + start = max(start, maxindex + 1); + if (start > max) + return ERR_PTR(-ENOSPC); + + if (start > maxindex) { + int error = radix_tree_extend(root, gfp, start, shift); + if (error < 0) + return ERR_PTR(error); + shift = error; + child = root->rnode; + } + + while (shift) { + shift -= RADIX_TREE_MAP_SHIFT; + if (child == NULL) { + /* Have to add a child node. */ + child = radix_tree_node_alloc(gfp, node, root, shift, + offset, 0, 0); + if (!child) + return ERR_PTR(-ENOMEM); + all_tag_set(child, IDR_FREE); + rcu_assign_pointer(*slot, node_to_entry(child)); + if (node) + node->count++; + } else if (!radix_tree_is_internal_node(child)) + break; + + node = entry_to_node(child); + offset = radix_tree_descend(node, &child, start); + if (!tag_get(node, IDR_FREE, offset)) { + offset = radix_tree_find_next_bit(node, IDR_FREE, + offset + 1); + start = next_index(start, node, offset); + if (start > max) + return ERR_PTR(-ENOSPC); + while (offset == RADIX_TREE_MAP_SIZE) { + offset = node->offset + 1; + node = node->parent; + if (!node) + goto grow; + shift = node->shift; + } + child = node->slots[offset]; + } + slot = &node->slots[offset]; + } + + iter->index = start; + if (node) + iter->next_index = 1 + min(max, (start | node_maxindex(node))); + else + iter->next_index = 1; + iter->node = node; + __set_iter_shift(iter, shift); + set_iter_tags(iter, node, offset, IDR_FREE); + + return slot; +} + +/** + * idr_destroy - release all internal memory from an IDR + * @idr: idr handle + * + * After this function is called, the IDR is empty, and may be reused or + * the data structure containing it may be freed. + * + * A typical clean-up sequence for objects stored in an idr tree will use + * idr_for_each() to free all objects, if necessary, then idr_destroy() to + * free the memory used to keep track of those objects. + */ +void idr_destroy(struct idr *idr) +{ + struct radix_tree_node **slot = &idr->idr_rt.rnode; + if (radix_tree_is_internal_node(*slot)) + radix_tree_free_nodes(*slot); + *slot = NULL; + root_tag_set(&idr->idr_rt, IDR_FREE); +} +EXPORT_SYMBOL(idr_destroy); + static void radix_tree_node_ctor(void *arg) { @@ -1971,10 +2262,12 @@ static int radix_tree_cpu_dead(unsigned int cpu) rtp = &per_cpu(radix_tree_preloads, cpu); while (rtp->nr) { node = rtp->nodes; - rtp->nodes = node->private_data; + rtp->nodes = node->parent; kmem_cache_free(radix_tree_node_cachep, node); rtp->nr--; } + kfree(per_cpu(ida_bitmap, cpu)); + per_cpu(ida_bitmap, cpu) = NULL; return 0; } diff --git a/mm/workingset.c b/mm/workingset.c index abb58ffa3c64..80c913c89f11 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -354,10 +354,8 @@ void workingset_update_node(struct radix_tree_node *node, void *private) * as node->private_list is protected by &mapping->tree_lock. */ if (node->count && node->count == node->exceptional) { - if (list_empty(&node->private_list)) { - node->private_data = mapping; + if (list_empty(&node->private_list)) list_lru_add(&shadow_nodes, &node->private_list); - } } else { if (!list_empty(&node->private_list)) list_lru_del(&shadow_nodes, &node->private_list); @@ -435,7 +433,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, */ node = container_of(item, struct radix_tree_node, private_list); - mapping = node->private_data; + mapping = container_of(node->root, struct address_space, page_tree); /* Coming from the list, invert the lock order */ if (!spin_trylock(&mapping->tree_lock)) { diff --git a/net/mac80211/status.c b/net/mac80211/status.c index a3af6e1bfd98..0dd7c351002d 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -462,9 +462,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, unsigned long flags; spin_lock_irqsave(&local->ack_status_lock, flags); - skb = idr_find(&local->ack_status_frames, info->ack_frame_id); - if (skb) - idr_remove(&local->ack_status_frames, info->ack_frame_id); + skb = idr_remove(&local->ack_status_frames, info->ack_frame_id); spin_unlock_irqrestore(&local->ack_status_lock, flags); if (!skb) diff --git a/tools/include/asm-generic/bitops/atomic.h b/tools/include/asm-generic/bitops/atomic.h index 18663f59d72f..68b8c1516c5a 100644 --- a/tools/include/asm-generic/bitops/atomic.h +++ b/tools/include/asm-generic/bitops/atomic.h @@ -20,4 +20,7 @@ static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) (((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0; } +#define __set_bit(nr, addr) set_bit(nr, addr) +#define __clear_bit(nr, addr) clear_bit(nr, addr) + #endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */ diff --git a/tools/include/asm/bug.h b/tools/include/asm/bug.h index beda1a884b50..4790f047a89c 100644 --- a/tools/include/asm/bug.h +++ b/tools/include/asm/bug.h @@ -12,6 +12,14 @@ unlikely(__ret_warn_on); \ }) +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf("assertion failed at %s:%d\n", \ + __FILE__, __LINE__); \ + unlikely(__ret_warn_on); \ +}) + #define WARN_ON_ONCE(condition) ({ \ static int __warned; \ int __ret_warn_once = !!(condition); \ diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h index eef41d500e9e..e8b9f518e36b 100644 --- a/tools/include/linux/bitmap.h +++ b/tools/include/linux/bitmap.h @@ -4,6 +4,7 @@ #include <string.h> #include <linux/bitops.h> #include <stdlib.h> +#include <linux/kernel.h> #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index fc446343ff41..1aecad369af5 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -2,7 +2,6 @@ #define _TOOLS_LINUX_BITOPS_H_ #include <asm/types.h> -#include <linux/kernel.h> #include <linux/compiler.h> #ifndef __WORDSIZE diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index e33fc1df3935..9d294161c494 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -21,6 +21,8 @@ #endif #define __user +#define __rcu +#define __read_mostly #ifndef __attribute_const__ # define __attribute_const__ @@ -50,6 +52,8 @@ # define unlikely(x) __builtin_expect(!!(x), 0) #endif +#define uninitialized_var(x) x = *(&(x)) + #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) #include <linux/types.h> diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h new file mode 100644 index 000000000000..58397dcb19d6 --- /dev/null +++ b/tools/include/linux/spinlock.h @@ -0,0 +1,5 @@ +#define spinlock_t pthread_mutex_t +#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER; + +#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) +#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore index 11d888ca6a92..26dedaf57aab 100644 --- a/tools/testing/radix-tree/.gitignore +++ b/tools/testing/radix-tree/.gitignore @@ -1,2 +1,5 @@ +idr.c +idr-test main +multiorder radix-tree.c diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index 3635e4d3eca7..4aba7b75e43a 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -1,10 +1,10 @@ CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE LDFLAGS += -lpthread -lurcu -TARGETS = main -OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ - regression1.o regression2.o regression3.o multiorder.o \ - iteration_check.o benchmark.o +TARGETS = main idr-test multiorder +CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o +OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ + tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o ifdef BENCHMARK CFLAGS += -DBENCHMARK=1 @@ -13,17 +13,27 @@ endif targets: $(TARGETS) main: $(OFILES) - $(CC) $(CFLAGS) $(LDFLAGS) $(OFILES) -o main + $(CC) $(CFLAGS) $(LDFLAGS) $^ -o main + +idr-test: idr-test.o $(CORE_OFILES) + $(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test + +multiorder: multiorder.o $(CORE_OFILES) + $(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder clean: - $(RM) -f $(TARGETS) *.o radix-tree.c + $(RM) $(TARGETS) *.o radix-tree.c idr.c -find_next_bit.o: ../../lib/find_bit.c - $(CC) $(CFLAGS) -c -o $@ $< +vpath %.c ../../lib $(OFILES): *.h */*.h \ ../../include/linux/*.h \ - ../../../include/linux/radix-tree.h + ../../include/asm/*.h \ + ../../../include/linux/radix-tree.h \ + ../../../include/linux/idr.h radix-tree.c: ../../../lib/radix-tree.c sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ + +idr.c: ../../../lib/idr.c + sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c index 215ca86c7605..9b09ddfe462f 100644 --- a/tools/testing/radix-tree/benchmark.c +++ b/tools/testing/radix-tree/benchmark.c @@ -71,7 +71,7 @@ static void benchmark_size(unsigned long size, unsigned long step, int order) tagged = benchmark_iter(&tree, true); normal = benchmark_iter(&tree, false); - printf("Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", + printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", size, step, order, tagged, normal); item_kill_tree(&tree); @@ -85,8 +85,8 @@ void benchmark(void) 128, 256, 512, 12345, 0}; int c, s; - printf("starting benchmarks\n"); - printf("RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT); + printv(1, "starting benchmarks\n"); + printv(1, "RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT); for (c = 0; size[c]; c++) for (s = 0; step[s]; s++) diff --git a/tools/testing/radix-tree/generated/autoconf.h b/tools/testing/radix-tree/generated/autoconf.h index ad18cf5a2a3a..cf88dc5b8832 100644 --- a/tools/testing/radix-tree/generated/autoconf.h +++ b/tools/testing/radix-tree/generated/autoconf.h @@ -1,3 +1 @@ #define CONFIG_RADIX_TREE_MULTIORDER 1 -#define CONFIG_SHMEM 1 -#define CONFIG_SWAP 1 diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c new file mode 100644 index 000000000000..d15c44156812 --- /dev/null +++ b/tools/testing/radix-tree/idr-test.c @@ -0,0 +1,424 @@ +/* + * idr-test.c: Test the IDR API + * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include <linux/bitmap.h> +#include <linux/idr.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/errno.h> + +#include "test.h" + +#define DUMMY_PTR ((void *)0x12) + +int item_idr_free(int id, void *p, void *data) +{ + struct item *item = p; + assert(item->index == id); + free(p); + + return 0; +} + +void item_idr_remove(struct idr *idr, int id) +{ + struct item *item = idr_find(idr, id); + assert(item->index == id); + idr_remove(idr, id); + free(item); +} + +void idr_alloc_test(void) +{ + unsigned long i; + DEFINE_IDR(idr); + + assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0); + assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd); + idr_remove(&idr, 0x3ffd); + idr_remove(&idr, 0); + + for (i = 0x3ffe; i < 0x4003; i++) { + int id; + struct item *item; + + if (i < 0x4000) + item = item_create(i, 0); + else + item = item_create(i - 0x3fff, 0); + + id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL); + assert(id == item->index); + } + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); +} + +void idr_replace_test(void) +{ + DEFINE_IDR(idr); + + idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL); + idr_replace(&idr, &idr, 10); + + idr_destroy(&idr); +} + +/* + * Unlike the radix tree, you can put a NULL pointer -- with care -- into + * the IDR. Some interfaces, like idr_find() do not distinguish between + * "present, value is NULL" and "not present", but that's exactly what some + * users want. + */ +void idr_null_test(void) +{ + int i; + DEFINE_IDR(idr); + + assert(idr_is_empty(&idr)); + + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); + assert(!idr_is_empty(&idr)); + idr_remove(&idr, 0); + assert(idr_is_empty(&idr)); + + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); + assert(!idr_is_empty(&idr)); + idr_destroy(&idr); + assert(idr_is_empty(&idr)); + + for (i = 0; i < 10; i++) { + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i); + } + + assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL); + assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL); + assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR); + assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT)); + idr_remove(&idr, 5); + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5); + idr_remove(&idr, 5); + + for (i = 0; i < 9; i++) { + idr_remove(&idr, i); + assert(!idr_is_empty(&idr)); + } + idr_remove(&idr, 8); + assert(!idr_is_empty(&idr)); + idr_remove(&idr, 9); + assert(idr_is_empty(&idr)); + + assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); + assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT)); + assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL); + assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR); + + idr_destroy(&idr); + assert(idr_is_empty(&idr)); + + for (i = 1; i < 10; i++) { + assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i); + } + + idr_destroy(&idr); + assert(idr_is_empty(&idr)); +} + +void idr_checks(void) +{ + unsigned long i; + DEFINE_IDR(idr); + + for (i = 0; i < 10000; i++) { + struct item *item = item_create(i, 0); + assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i); + } + + assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0); + + for (i = 0; i < 5000; i++) + item_idr_remove(&idr, i); + + idr_remove(&idr, 3); + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); + + assert(idr_is_empty(&idr)); + + idr_remove(&idr, 3); + idr_remove(&idr, 0); + + for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { + struct item *item = item_create(i, 0); + assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); + } + assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC); + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); + idr_destroy(&idr); + + assert(idr_is_empty(&idr)); + + for (i = 1; i < 10000; i++) { + struct item *item = item_create(i, 0); + assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i); + } + + idr_for_each(&idr, item_idr_free, &idr); + idr_destroy(&idr); + + idr_replace_test(); + idr_alloc_test(); + idr_null_test(); +} + +/* + * Check that we get the correct error when we run out of memory doing + * allocations. To ensure we run out of memory, just "forget" to preload. + * The first test is for not having a bitmap available, and the second test + * is for not being able to allocate a level of the radix tree. + */ +void ida_check_nomem(void) +{ + DEFINE_IDA(ida); + int id, err; + + err = ida_get_new_above(&ida, 256, &id); + assert(err == -EAGAIN); + err = ida_get_new_above(&ida, 1UL << 30, &id); + assert(err == -EAGAIN); +} + +/* + * Check what happens when we fill a leaf and then delete it. This may + * discover mishandling of IDR_FREE. + */ +void ida_check_leaf(void) +{ + DEFINE_IDA(ida); + int id; + unsigned long i; + + for (i = 0; i < IDA_BITMAP_BITS; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == i); + } + + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == 0); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); +} + +/* + * Check handling of conversions between exceptional entries and full bitmaps. + */ +void ida_check_conv(void) +{ + DEFINE_IDA(ida); + int id; + unsigned long i; + + for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, i + 1, &id)); + assert(id == i + 1); + assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id)); + assert(id == i + BITS_PER_LONG); + ida_remove(&ida, i + 1); + ida_remove(&ida, i + BITS_PER_LONG); + assert(ida_is_empty(&ida)); + } + + assert(ida_pre_get(&ida, GFP_KERNEL)); + + for (i = 0; i < IDA_BITMAP_BITS * 2; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == i); + } + + for (i = IDA_BITMAP_BITS * 2; i > 0; i--) { + ida_remove(&ida, i - 1); + } + assert(ida_is_empty(&ida)); + + for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == i); + } + + for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) { + ida_remove(&ida, i - 1); + } + assert(ida_is_empty(&ida)); + + radix_tree_cpu_dead(1); + for (i = 0; i < 1000000; i++) { + int err = ida_get_new(&ida, &id); + if (err == -EAGAIN) { + assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2)); + assert(ida_pre_get(&ida, GFP_KERNEL)); + err = ida_get_new(&ida, &id); + } else { + assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2)); + } + assert(!err); + assert(id == i); + } + ida_destroy(&ida); +} + +/* + * Check allocations up to and slightly above the maximum allowed (2^31-1) ID. + * Allocating up to 2^31-1 should succeed, and then allocating the next one + * should fail. + */ +void ida_check_max(void) +{ + DEFINE_IDA(ida); + int id, err; + unsigned long i, j; + + for (j = 1; j < 65537; j *= 2) { + unsigned long base = (1UL << 31) - j; + for (i = 0; i < j; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, base, &id)); + assert(id == base + i); + } + assert(ida_pre_get(&ida, GFP_KERNEL)); + err = ida_get_new_above(&ida, base, &id); + assert(err == -ENOSPC); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + rcu_barrier(); + } +} + +void ida_check_random(void) +{ + DEFINE_IDA(ida); + DECLARE_BITMAP(bitmap, 2048); + int id; + unsigned int i; + time_t s = time(NULL); + + repeat: + memset(bitmap, 0, sizeof(bitmap)); + for (i = 0; i < 100000; i++) { + int i = rand(); + int bit = i & 2047; + if (test_bit(bit, bitmap)) { + __clear_bit(bit, bitmap); + ida_remove(&ida, bit); + } else { + __set_bit(bit, bitmap); + ida_pre_get(&ida, GFP_KERNEL); + assert(!ida_get_new_above(&ida, bit, &id)); + assert(id == bit); + } + } + ida_destroy(&ida); + if (time(NULL) < s + 10) + goto repeat; +} + +void ida_checks(void) +{ + DEFINE_IDA(ida); + int id; + unsigned long i; + + ida_check_nomem(); + + for (i = 0; i < 10000; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + assert(id == i); + } + + ida_remove(&ida, 20); + ida_remove(&ida, 21); + for (i = 0; i < 3; i++) { + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new(&ida, &id)); + if (i == 2) + assert(id == 10000); + } + + for (i = 0; i < 5000; i++) + ida_remove(&ida, i); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 5000, &id)); + assert(id == 10001); + + ida_destroy(&ida); + + assert(ida_is_empty(&ida)); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 1, &id)); + assert(id == 1); + + ida_remove(&ida, id); + assert(ida_is_empty(&ida)); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 1, &id)); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 1, &id)); + assert(id == 1); + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 1025, &id)); + assert(id == 1025); + assert(ida_pre_get(&ida, GFP_KERNEL)); + assert(!ida_get_new_above(&ida, 10000, &id)); + assert(id == 10000); + ida_remove(&ida, 1025); + ida_destroy(&ida); + assert(ida_is_empty(&ida)); + + ida_check_leaf(); + ida_check_max(); + ida_check_conv(); + ida_check_random(); + + radix_tree_cpu_dead(1); +} + +int __weak main(void) +{ + radix_tree_init(); + idr_checks(); + ida_checks(); + rcu_barrier(); + if (nr_allocated) + printf("nr_allocated = %d\n", nr_allocated); + return 0; +} diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c index 7572b7ed930e..a92bab513701 100644 --- a/tools/testing/radix-tree/iteration_check.c +++ b/tools/testing/radix-tree/iteration_check.c @@ -177,7 +177,7 @@ void iteration_test(unsigned order, unsigned test_duration) { int i; - printf("Running %siteration tests for %d seconds\n", + printv(1, "Running %siteration tests for %d seconds\n", order > 0 ? "multiorder " : "", test_duration); max_order = order; diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index d31ea7c9abec..cf48c8473f48 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -5,7 +5,7 @@ #include <unistd.h> #include <assert.h> -#include <linux/mempool.h> +#include <linux/gfp.h> #include <linux/poison.h> #include <linux/slab.h> #include <linux/radix-tree.h> @@ -13,6 +13,8 @@ int nr_allocated; int preempt_count; +int kmalloc_verbose; +int test_verbose; struct kmem_cache { pthread_mutex_t lock; @@ -22,27 +24,6 @@ struct kmem_cache { void (*ctor)(void *); }; -void *mempool_alloc(mempool_t *pool, int gfp_mask) -{ - return pool->alloc(gfp_mask, pool->data); -} - -void mempool_free(void *element, mempool_t *pool) -{ - pool->free(element, pool->data); -} - -mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data) -{ - mempool_t *ret = malloc(sizeof(*ret)); - - ret->alloc = alloc_fn; - ret->free = free_fn; - ret->data = pool_data; - return ret; -} - void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) { struct radix_tree_node *node; @@ -54,9 +35,9 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) if (cachep->nr_objs) { cachep->nr_objs--; node = cachep->objs; - cachep->objs = node->private_data; + cachep->objs = node->parent; pthread_mutex_unlock(&cachep->lock); - node->private_data = NULL; + node->parent = NULL; } else { pthread_mutex_unlock(&cachep->lock); node = malloc(cachep->size); @@ -65,6 +46,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) } uatomic_inc(&nr_allocated); + if (kmalloc_verbose) + printf("Allocating %p from slab\n", node); return node; } @@ -72,6 +55,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) { assert(objp); uatomic_dec(&nr_allocated); + if (kmalloc_verbose) + printf("Freeing %p to slab\n", objp); pthread_mutex_lock(&cachep->lock); if (cachep->nr_objs > 10) { memset(objp, POISON_FREE, cachep->size); @@ -79,7 +64,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) } else { struct radix_tree_node *node = objp; cachep->nr_objs++; - node->private_data = cachep->objs; + node->parent = cachep->objs; cachep->objs = node; } pthread_mutex_unlock(&cachep->lock); @@ -89,6 +74,8 @@ void *kmalloc(size_t size, gfp_t gfp) { void *ret = malloc(size); uatomic_inc(&nr_allocated); + if (kmalloc_verbose) + printf("Allocating %p from malloc\n", ret); return ret; } @@ -97,6 +84,8 @@ void kfree(void *p) if (!p) return; uatomic_dec(&nr_allocated); + if (kmalloc_verbose) + printf("Freeing %p to malloc\n", p); free(p); } diff --git a/tools/testing/radix-tree/linux/bitops.h b/tools/testing/radix-tree/linux/bitops.h deleted file mode 100644 index a13e9bc76eec..000000000000 --- a/tools/testing/radix-tree/linux/bitops.h +++ /dev/null @@ -1,160 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ -#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ - -#include <linux/types.h> -#include <linux/bitops/find.h> -#include <linux/bitops/hweight.h> -#include <linux/kernel.h> - -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITS_PER_BYTE 8 -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - - *p |= mask; -} - -static inline void __clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - - *p &= ~mask; -} - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __change_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - - *p ^= mask; -} - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old | mask; - return (old & mask) != 0; -} - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} - -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static inline int test_bit(int nr, const volatile unsigned long *addr) -{ - return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); -} - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - int num = 0; - - if ((word & 0xffffffff) == 0) { - num += 32; - word >>= 32; - } - if ((word & 0xffff) == 0) { - num += 16; - word >>= 16; - } - if ((word & 0xff) == 0) { - num += 8; - word >>= 8; - } - if ((word & 0xf) == 0) { - num += 4; - word >>= 4; - } - if ((word & 0x3) == 0) { - num += 2; - word >>= 2; - } - if ((word & 0x1) == 0) - num += 1; - return num; -} - -unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, - unsigned long offset); - -static inline unsigned long hweight_long(unsigned long w) -{ - return sizeof(w) == 4 ? hweight32(w) : hweight64(w); -} - -#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/__ffs.h b/tools/testing/radix-tree/linux/bitops/__ffs.h deleted file mode 100644 index 9a3274aecf83..000000000000 --- a/tools/testing/radix-tree/linux/bitops/__ffs.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS___FFS_H_ -#define _ASM_GENERIC_BITOPS___FFS_H_ - -#include <asm/types.h> - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - int num = 0; - -#if BITS_PER_LONG == 64 - if ((word & 0xffffffff) == 0) { - num += 32; - word >>= 32; - } -#endif - if ((word & 0xffff) == 0) { - num += 16; - word >>= 16; - } - if ((word & 0xff) == 0) { - num += 8; - word >>= 8; - } - if ((word & 0xf) == 0) { - num += 4; - word >>= 4; - } - if ((word & 0x3) == 0) { - num += 2; - word >>= 2; - } - if ((word & 0x1) == 0) - num += 1; - return num; -} - -#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/ffs.h b/tools/testing/radix-tree/linux/bitops/ffs.h deleted file mode 100644 index fbbb43af7dc0..000000000000 --- a/tools/testing/radix-tree/linux/bitops/ffs.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FFS_H_ -#define _ASM_GENERIC_BITOPS_FFS_H_ - -/** - * ffs - find first bit set - * @x: the word to search - * - * This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ -static inline int ffs(int x) -{ - int r = 1; - - if (!x) - return 0; - if (!(x & 0xffff)) { - x >>= 16; - r += 16; - } - if (!(x & 0xff)) { - x >>= 8; - r += 8; - } - if (!(x & 0xf)) { - x >>= 4; - r += 4; - } - if (!(x & 3)) { - x >>= 2; - r += 2; - } - if (!(x & 1)) { - x >>= 1; - r += 1; - } - return r; -} - -#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/ffz.h b/tools/testing/radix-tree/linux/bitops/ffz.h deleted file mode 100644 index 6744bd4cdf46..000000000000 --- a/tools/testing/radix-tree/linux/bitops/ffz.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FFZ_H_ -#define _ASM_GENERIC_BITOPS_FFZ_H_ - -/* - * ffz - find first zero in word. - * @word: The word to search - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -#define ffz(x) __ffs(~(x)) - -#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/find.h b/tools/testing/radix-tree/linux/bitops/find.h deleted file mode 100644 index 72a51e5a12ef..000000000000 --- a/tools/testing/radix-tree/linux/bitops/find.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FIND_H_ -#define _ASM_GENERIC_BITOPS_FIND_H_ - -extern unsigned long find_next_bit(const unsigned long *addr, unsigned long - size, unsigned long offset); - -extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned - long size, unsigned long offset); - -#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) -#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) - -#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/fls.h b/tools/testing/radix-tree/linux/bitops/fls.h deleted file mode 100644 index 850859bc5069..000000000000 --- a/tools/testing/radix-tree/linux/bitops/fls.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FLS_H_ -#define _ASM_GENERIC_BITOPS_FLS_H_ - -/** - * fls - find last (most-significant) bit set - * @x: the word to search - * - * This is defined the same way as ffs. - * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. - */ - -static inline int fls(int x) -{ - int r = 32; - - if (!x) - return 0; - if (!(x & 0xffff0000u)) { - x <<= 16; - r -= 16; - } - if (!(x & 0xff000000u)) { - x <<= 8; - r -= 8; - } - if (!(x & 0xf0000000u)) { - x <<= 4; - r -= 4; - } - if (!(x & 0xc0000000u)) { - x <<= 2; - r -= 2; - } - if (!(x & 0x80000000u)) { - x <<= 1; - r -= 1; - } - return r; -} - -#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/fls64.h b/tools/testing/radix-tree/linux/bitops/fls64.h deleted file mode 100644 index 1b6b17ce2428..000000000000 --- a/tools/testing/radix-tree/linux/bitops/fls64.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ -#define _ASM_GENERIC_BITOPS_FLS64_H_ - -#include <asm/types.h> - -static inline int fls64(__u64 x) -{ - __u32 h = x >> 32; - if (h) - return fls(h) + 32; - return fls(x); -} - -#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/hweight.h b/tools/testing/radix-tree/linux/bitops/hweight.h deleted file mode 100644 index fbbc383771da..000000000000 --- a/tools/testing/radix-tree/linux/bitops/hweight.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ -#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ - -#include <asm/types.h> - -extern unsigned int hweight32(unsigned int w); -extern unsigned int hweight16(unsigned int w); -extern unsigned int hweight8(unsigned int w); -extern unsigned long hweight64(__u64 w); - -#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/le.h b/tools/testing/radix-tree/linux/bitops/le.h deleted file mode 100644 index b9c7e5d2d2ad..000000000000 --- a/tools/testing/radix-tree/linux/bitops/le.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_LE_H_ -#define _ASM_GENERIC_BITOPS_LE_H_ - -#include <asm/types.h> -#include <asm/byteorder.h> - -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) - -#if defined(__LITTLE_ENDIAN) - -#define generic_test_le_bit(nr, addr) test_bit(nr, addr) -#define generic___set_le_bit(nr, addr) __set_bit(nr, addr) -#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr) - -#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr) -#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr) - -#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr) -#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) - -#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) - -#elif defined(__BIG_ENDIAN) - -#define generic_test_le_bit(nr, addr) \ - test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) -#define generic___set_le_bit(nr, addr) \ - __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) -#define generic___clear_le_bit(nr, addr) \ - __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) - -#define generic_test_and_set_le_bit(nr, addr) \ - test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) -#define generic_test_and_clear_le_bit(nr, addr) \ - test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) - -#define generic___test_and_set_le_bit(nr, addr) \ - __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) -#define generic___test_and_clear_le_bit(nr, addr) \ - __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) - -extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, - unsigned long size, unsigned long offset); - -#else -#error "Please fix <asm/byteorder.h>" -#endif - -#define generic_find_first_zero_le_bit(addr, size) \ - generic_find_next_zero_le_bit((addr), (size), 0) - -#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ diff --git a/tools/testing/radix-tree/linux/bitops/non-atomic.h b/tools/testing/radix-tree/linux/bitops/non-atomic.h deleted file mode 100644 index 6a1bcb9d2c4a..000000000000 --- a/tools/testing/radix-tree/linux/bitops/non-atomic.h +++ /dev/null @@ -1,110 +0,0 @@ -#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ -#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ - -#include <asm/types.h> - -#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p |= mask; -} - -static inline void __clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p &= ~mask; -} - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __change_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - - *p ^= mask; -} - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old | mask; - return (old & mask) != 0; -} - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); - unsigned long old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} - -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static inline int test_bit(int nr, const volatile unsigned long *addr) -{ - return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); -} - -#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/tools/testing/radix-tree/linux/export.h b/tools/testing/radix-tree/linux/export.h deleted file mode 100644 index b6afd131998d..000000000000 --- a/tools/testing/radix-tree/linux/export.h +++ /dev/null @@ -1,2 +0,0 @@ - -#define EXPORT_SYMBOL(sym) diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h index 5b09b2ce6c33..39a0dcb9475a 100644 --- a/tools/testing/radix-tree/linux/gfp.h +++ b/tools/testing/radix-tree/linux/gfp.h @@ -1,6 +1,8 @@ #ifndef _GFP_H #define _GFP_H +#include <linux/types.h> + #define __GFP_BITS_SHIFT 26 #define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) @@ -13,10 +15,12 @@ #define __GFP_DIRECT_RECLAIM 0x400000u #define __GFP_KSWAPD_RECLAIM 0x2000000u -#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM) +#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM) + +#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) -#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) -#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { diff --git a/tools/testing/radix-tree/linux/idr.h b/tools/testing/radix-tree/linux/idr.h new file mode 100644 index 000000000000..4e342f2e37cf --- /dev/null +++ b/tools/testing/radix-tree/linux/idr.h @@ -0,0 +1 @@ +#include "../../../../include/linux/idr.h" diff --git a/tools/testing/radix-tree/linux/init.h b/tools/testing/radix-tree/linux/init.h index 360cabb3c4e7..1bb0afc21309 100644 --- a/tools/testing/radix-tree/linux/init.h +++ b/tools/testing/radix-tree/linux/init.h @@ -1 +1 @@ -/* An empty file stub that allows radix-tree.c to compile. */ +#define __init diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h index 9b43b4975d83..677b8c0f60f9 100644 --- a/tools/testing/radix-tree/linux/kernel.h +++ b/tools/testing/radix-tree/linux/kernel.h @@ -1,14 +1,15 @@ #ifndef _KERNEL_H #define _KERNEL_H -#include <assert.h> +#include "../../include/linux/kernel.h" #include <string.h> #include <stdio.h> -#include <stddef.h> #include <limits.h> -#include "../../include/linux/compiler.h" -#include "../../include/linux/err.h" +#include <linux/compiler.h> +#include <linux/err.h> +#include <linux/bitops.h> +#include <linux/log2.h> #include "../../../include/linux/kconfig.h" #ifdef BENCHMARK @@ -17,48 +18,10 @@ #define RADIX_TREE_MAP_SHIFT 3 #endif -#ifndef NULL -#define NULL 0 -#endif - -#define BUG_ON(expr) assert(!(expr)) -#define WARN_ON(expr) assert(!(expr)) -#define __init -#define __must_check -#define panic(expr) #define printk printf -#define __force -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define pr_debug printk - -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define cpu_relax() barrier() +#define pr_cont printk #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) -#define container_of(ptr, type, member) ({ \ - const typeof( ((type *)0)->member ) *__mptr = (ptr); \ - (type *)( (char *)__mptr - offsetof(type, member) );}) -#define min(a, b) ((a) < (b) ? (a) : (b)) - -#define cond_resched() sched_yield() - -static inline int in_interrupt(void) -{ - return 0; -} - -/* - * This looks more complex than it should be. But we need to - * get the type for the ~ right in round_down (it needs to be - * as wide as the result!), and we want to evaluate the macro - * arguments just once each. - */ -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) -#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) -#define round_down(x, y) ((x) & ~__round_mask(x, y)) - -#define xchg(ptr, x) uatomic_xchg(ptr, x) - #endif /* _KERNEL_H */ diff --git a/tools/testing/radix-tree/linux/mempool.h b/tools/testing/radix-tree/linux/mempool.h deleted file mode 100644 index 6a2dc55b41d6..000000000000 --- a/tools/testing/radix-tree/linux/mempool.h +++ /dev/null @@ -1,16 +0,0 @@ - -#include <linux/slab.h> - -typedef void *(mempool_alloc_t)(int gfp_mask, void *pool_data); -typedef void (mempool_free_t)(void *element, void *pool_data); - -typedef struct { - mempool_alloc_t *alloc; - mempool_free_t *free; - void *data; -} mempool_t; - -void *mempool_alloc(mempool_t *pool, int gfp_mask); -void mempool_free(void *element, mempool_t *pool); -mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data); diff --git a/tools/testing/radix-tree/linux/percpu.h b/tools/testing/radix-tree/linux/percpu.h index 5837f1d56f17..3ea01a1a88c2 100644 --- a/tools/testing/radix-tree/linux/percpu.h +++ b/tools/testing/radix-tree/linux/percpu.h @@ -1,7 +1,10 @@ - +#define DECLARE_PER_CPU(type, val) extern type val #define DEFINE_PER_CPU(type, val) type val #define __get_cpu_var(var) var #define this_cpu_ptr(var) var +#define this_cpu_read(var) var +#define this_cpu_xchg(var, val) uatomic_xchg(&var, val) +#define this_cpu_cmpxchg(var, old, new) uatomic_cmpxchg(&var, old, new) #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) diff --git a/tools/testing/radix-tree/linux/preempt.h b/tools/testing/radix-tree/linux/preempt.h index 65c04c226965..35c5ac81529f 100644 --- a/tools/testing/radix-tree/linux/preempt.h +++ b/tools/testing/radix-tree/linux/preempt.h @@ -1,4 +1,14 @@ +#ifndef __LINUX_PREEMPT_H +#define __LINUX_PREEMPT_H + extern int preempt_count; #define preempt_disable() uatomic_inc(&preempt_count) #define preempt_enable() uatomic_dec(&preempt_count) + +static inline int in_interrupt(void) +{ + return 0; +} + +#endif /* __LINUX_PREEMPT_H */ diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h index ce694ddd4aea..ddd135fa3af7 100644 --- a/tools/testing/radix-tree/linux/radix-tree.h +++ b/tools/testing/radix-tree/linux/radix-tree.h @@ -1 +1,24 @@ +#ifndef _TEST_RADIX_TREE_H +#define _TEST_RADIX_TREE_H #include "../../../../include/linux/radix-tree.h" + +extern int kmalloc_verbose; +extern int test_verbose; + +static inline void trace_call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head)) +{ + if (kmalloc_verbose) + printf("Delaying free of %p to slab\n", (char *)head - + offsetof(struct radix_tree_node, rcu_head)); + call_rcu(head, func); +} + +#define printv(verbosity_level, fmt, ...) \ + if(test_verbose >= verbosity_level) \ + printf(fmt, ##__VA_ARGS__) + +#undef call_rcu +#define call_rcu(x, y) trace_call_rcu(x, y) + +#endif /* _TEST_RADIX_TREE_H */ diff --git a/tools/testing/radix-tree/linux/types.h b/tools/testing/radix-tree/linux/types.h deleted file mode 100644 index 8491d89873bb..000000000000 --- a/tools/testing/radix-tree/linux/types.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _TYPES_H -#define _TYPES_H - -#include "../../include/linux/types.h" - -#define __rcu -#define __read_mostly - -static inline void INIT_LIST_HEAD(struct list_head *list) -{ - list->next = list; - list->prev = list; -} - -typedef struct { - unsigned int x; -} spinlock_t; - -#define uninitialized_var(x) x = x - -#include <linux/gfp.h> - -#endif diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index f7e9801a6754..86de448b699e 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c @@ -3,6 +3,7 @@ #include <unistd.h> #include <time.h> #include <assert.h> +#include <limits.h> #include <linux/slab.h> #include <linux/radix-tree.h> @@ -67,7 +68,7 @@ void big_gang_check(bool long_run) for (i = 0; i < (long_run ? 1000 : 3); i++) { __big_gang_check(); - printf("%d ", i); + printv(2, "%d ", i); fflush(stdout); } } @@ -128,14 +129,19 @@ void check_copied_tags(struct radix_tree_root *tree, unsigned long start, unsign putchar('.'); */ if (idx[i] < start || idx[i] > end) { if (item_tag_get(tree, idx[i], totag)) { - printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag)); + printv(2, "%lu-%lu: %lu, tags %d-%d\n", start, + end, idx[i], item_tag_get(tree, idx[i], + fromtag), + item_tag_get(tree, idx[i], totag)); } assert(!item_tag_get(tree, idx[i], totag)); continue; } if (item_tag_get(tree, idx[i], fromtag) ^ item_tag_get(tree, idx[i], totag)) { - printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag)); + printv(2, "%lu-%lu: %lu, tags %d-%d\n", start, end, + idx[i], item_tag_get(tree, idx[i], fromtag), + item_tag_get(tree, idx[i], totag)); } assert(!(item_tag_get(tree, idx[i], fromtag) ^ item_tag_get(tree, idx[i], totag))); @@ -237,7 +243,7 @@ static void __locate_check(struct radix_tree_root *tree, unsigned long index, item = item_lookup(tree, index); index2 = find_item(tree, item); if (index != index2) { - printf("index %ld order %d inserted; found %ld\n", + printv(2, "index %ld order %d inserted; found %ld\n", index, order, index2); abort(); } @@ -288,43 +294,48 @@ static void single_thread_tests(bool long_run) { int i; - printf("starting single_thread_tests: %d allocated, preempt %d\n", + printv(1, "starting single_thread_tests: %d allocated, preempt %d\n", nr_allocated, preempt_count); multiorder_checks(); rcu_barrier(); - printf("after multiorder_check: %d allocated, preempt %d\n", + printv(2, "after multiorder_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); locate_check(); rcu_barrier(); - printf("after locate_check: %d allocated, preempt %d\n", + printv(2, "after locate_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); tag_check(); rcu_barrier(); - printf("after tag_check: %d allocated, preempt %d\n", + printv(2, "after tag_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); gang_check(); rcu_barrier(); - printf("after gang_check: %d allocated, preempt %d\n", + printv(2, "after gang_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); add_and_check(); rcu_barrier(); - printf("after add_and_check: %d allocated, preempt %d\n", + printv(2, "after add_and_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); dynamic_height_check(); rcu_barrier(); - printf("after dynamic_height_check: %d allocated, preempt %d\n", + printv(2, "after dynamic_height_check: %d allocated, preempt %d\n", + nr_allocated, preempt_count); + idr_checks(); + ida_checks(); + rcu_barrier(); + printv(2, "after idr_checks: %d allocated, preempt %d\n", nr_allocated, preempt_count); big_gang_check(long_run); rcu_barrier(); - printf("after big_gang_check: %d allocated, preempt %d\n", + printv(2, "after big_gang_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); for (i = 0; i < (long_run ? 2000 : 3); i++) { copy_tag_check(); - printf("%d ", i); + printv(2, "%d ", i); fflush(stdout); } rcu_barrier(); - printf("after copy_tag_check: %d allocated, preempt %d\n", + printv(2, "after copy_tag_check: %d allocated, preempt %d\n", nr_allocated, preempt_count); } @@ -334,16 +345,20 @@ int main(int argc, char **argv) int opt; unsigned int seed = time(NULL); - while ((opt = getopt(argc, argv, "ls:")) != -1) { + while ((opt = getopt(argc, argv, "ls:v")) != -1) { if (opt == 'l') long_run = true; else if (opt == 's') seed = strtoul(optarg, NULL, 0); + else if (opt == 'v') + test_verbose++; } printf("random seed %u\n", seed); srand(seed); + printf("running tests\n"); + rcu_register_thread(); radix_tree_init(); @@ -360,9 +375,11 @@ int main(int argc, char **argv) benchmark(); rcu_barrier(); - printf("after rcu_barrier: %d allocated, preempt %d\n", + printv(2, "after rcu_barrier: %d allocated, preempt %d\n", nr_allocated, preempt_count); rcu_unregister_thread(); + printf("tests completed\n"); + exit(0); } diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index f79812a5e070..72d80f7059d3 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -30,7 +30,7 @@ static void __multiorder_tag_test(int index, int order) /* our canonical entry */ base = index & ~((1 << order) - 1); - printf("Multiorder tag test with index %d, canonical entry %d\n", + printv(2, "Multiorder tag test with index %d, canonical entry %d\n", index, base); err = item_insert_order(&tree, index, order); @@ -150,7 +150,7 @@ static void multiorder_check(unsigned long index, int order) struct item *item2 = item_create(min, order); RADIX_TREE(tree, GFP_KERNEL); - printf("Multiorder index %ld, order %d\n", index, order); + printv(2, "Multiorder index %ld, order %d\n", index, order); assert(item_insert_order(&tree, index, order) == 0); @@ -188,7 +188,7 @@ static void multiorder_shrink(unsigned long index, int order) RADIX_TREE(tree, GFP_KERNEL); struct radix_tree_node *node; - printf("Multiorder shrink index %ld, order %d\n", index, order); + printv(2, "Multiorder shrink index %ld, order %d\n", index, order); assert(item_insert_order(&tree, 0, order) == 0); @@ -209,7 +209,8 @@ static void multiorder_shrink(unsigned long index, int order) item_check_absent(&tree, i); if (!item_delete(&tree, 0)) { - printf("failed to delete index %ld (order %d)\n", index, order); abort(); + printv(2, "failed to delete index %ld (order %d)\n", index, order); + abort(); } for (i = 0; i < 2*max; i++) @@ -234,7 +235,7 @@ void multiorder_iteration(void) void **slot; int i, j, err; - printf("Multiorder iteration test\n"); + printv(1, "Multiorder iteration test\n"); #define NUM_ENTRIES 11 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128}; @@ -275,7 +276,7 @@ void multiorder_tagged_iteration(void) void **slot; int i, j; - printf("Multiorder tagged iteration test\n"); + printv(1, "Multiorder tagged iteration test\n"); #define MT_NUM_ENTRIES 9 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128}; @@ -453,7 +454,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) { struct radix_tree_preload *rtp = &radix_tree_preloads; if (rtp->nr != 0) - printf("split(%u %u) remaining %u\n", old_order, new_order, + printv(2, "split(%u %u) remaining %u\n", old_order, new_order, rtp->nr); /* * Can't check for equality here as some nodes may have been @@ -461,7 +462,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) * nodes allocated since they should have all been preloaded. */ if (nr_allocated > alloc) - printf("split(%u %u) allocated %u %u\n", old_order, new_order, + printv(2, "split(%u %u) allocated %u %u\n", old_order, new_order, alloc, nr_allocated); } @@ -633,3 +634,10 @@ void multiorder_checks(void) radix_tree_cpu_dead(0); } + +int __weak main(void) +{ + radix_tree_init(); + multiorder_checks(); + return 0; +} diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c index 0d6813a61b37..bf97742fc18c 100644 --- a/tools/testing/radix-tree/regression1.c +++ b/tools/testing/radix-tree/regression1.c @@ -193,7 +193,7 @@ void regression1_test(void) long arg; /* Regression #1 */ - printf("running regression test 1, should finish in under a minute\n"); + printv(1, "running regression test 1, should finish in under a minute\n"); nr_threads = 2; pthread_barrier_init(&worker_barrier, NULL, nr_threads); @@ -216,5 +216,5 @@ void regression1_test(void) free(threads); - printf("regression test 1, done\n"); + printv(1, "regression test 1, done\n"); } diff --git a/tools/testing/radix-tree/regression2.c b/tools/testing/radix-tree/regression2.c index a41325d7a170..a24d1beec7c8 100644 --- a/tools/testing/radix-tree/regression2.c +++ b/tools/testing/radix-tree/regression2.c @@ -80,7 +80,7 @@ void regression2_test(void) unsigned long int start, end; struct page *pages[1]; - printf("running regression test 2 (should take milliseconds)\n"); + printv(1, "running regression test 2 (should take milliseconds)\n"); /* 0. */ for (i = 0; i <= max_slots - 1; i++) { p = page_alloc(); @@ -116,5 +116,5 @@ void regression2_test(void) /* We remove all the remained nodes */ radix_tree_delete(&mt_tree, max_slots); - printf("regression test 2, done\n"); + printv(1, "regression test 2, done\n"); } diff --git a/tools/testing/radix-tree/regression3.c b/tools/testing/radix-tree/regression3.c index b594841fae85..670c3d2ae7b1 100644 --- a/tools/testing/radix-tree/regression3.c +++ b/tools/testing/radix-tree/regression3.c @@ -34,21 +34,21 @@ void regression3_test(void) void **slot; bool first; - printf("running regression test 3 (should take milliseconds)\n"); + printv(1, "running regression test 3 (should take milliseconds)\n"); radix_tree_insert(&root, 0, ptr0); radix_tree_tag_set(&root, 0, 0); first = true; radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) { - printf("tagged %ld %p\n", iter.index, *slot); + printv(2, "tagged %ld %p\n", iter.index, *slot); if (first) { radix_tree_insert(&root, 1, ptr); radix_tree_tag_set(&root, 1, 0); first = false; } if (radix_tree_deref_retry(*slot)) { - printf("retry at %ld\n", iter.index); + printv(2, "retry at %ld\n", iter.index); slot = radix_tree_iter_retry(&iter); continue; } @@ -57,13 +57,13 @@ void regression3_test(void) first = true; radix_tree_for_each_slot(slot, &root, &iter, 0) { - printf("slot %ld %p\n", iter.index, *slot); + printv(2, "slot %ld %p\n", iter.index, *slot); if (first) { radix_tree_insert(&root, 1, ptr); first = false; } if (radix_tree_deref_retry(*slot)) { - printk("retry at %ld\n", iter.index); + printv(2, "retry at %ld\n", iter.index); slot = radix_tree_iter_retry(&iter); continue; } @@ -72,30 +72,30 @@ void regression3_test(void) first = true; radix_tree_for_each_contig(slot, &root, &iter, 0) { - printk("contig %ld %p\n", iter.index, *slot); + printv(2, "contig %ld %p\n", iter.index, *slot); if (first) { radix_tree_insert(&root, 1, ptr); first = false; } if (radix_tree_deref_retry(*slot)) { - printk("retry at %ld\n", iter.index); + printv(2, "retry at %ld\n", iter.index); slot = radix_tree_iter_retry(&iter); continue; } } radix_tree_for_each_slot(slot, &root, &iter, 0) { - printf("slot %ld %p\n", iter.index, *slot); + printv(2, "slot %ld %p\n", iter.index, *slot); if (!iter.index) { - printf("next at %ld\n", iter.index); + printv(2, "next at %ld\n", iter.index); slot = radix_tree_iter_resume(slot, &iter); } } radix_tree_for_each_contig(slot, &root, &iter, 0) { - printf("contig %ld %p\n", iter.index, *slot); + printv(2, "contig %ld %p\n", iter.index, *slot); if (!iter.index) { - printf("next at %ld\n", iter.index); + printv(2, "next at %ld\n", iter.index); slot = radix_tree_iter_resume(slot, &iter); } } @@ -103,9 +103,9 @@ void regression3_test(void) radix_tree_tag_set(&root, 0, 0); radix_tree_tag_set(&root, 1, 0); radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) { - printf("tagged %ld %p\n", iter.index, *slot); + printv(2, "tagged %ld %p\n", iter.index, *slot); if (!iter.index) { - printf("next at %ld\n", iter.index); + printv(2, "next at %ld\n", iter.index); slot = radix_tree_iter_resume(slot, &iter); } } @@ -113,5 +113,5 @@ void regression3_test(void) radix_tree_delete(&root, 0); radix_tree_delete(&root, 1); - printf("regression test 3 passed\n"); + printv(1, "regression test 3 passed\n"); } diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c index fd98c132207a..d4ff00989245 100644 --- a/tools/testing/radix-tree/tag_check.c +++ b/tools/testing/radix-tree/tag_check.c @@ -49,10 +49,10 @@ void simple_checks(void) } verify_tag_consistency(&tree, 0); verify_tag_consistency(&tree, 1); - printf("before item_kill_tree: %d allocated\n", nr_allocated); + printv(2, "before item_kill_tree: %d allocated\n", nr_allocated); item_kill_tree(&tree); rcu_barrier(); - printf("after item_kill_tree: %d allocated\n", nr_allocated); + printv(2, "after item_kill_tree: %d allocated\n", nr_allocated); } /* @@ -257,7 +257,7 @@ static void do_thrash(struct radix_tree_root *tree, char *thrash_state, int tag) gang_check(tree, thrash_state, tag); - printf("%d(%d) %d(%d) %d(%d) %d(%d) / " + printv(2, "%d(%d) %d(%d) %d(%d) %d(%d) / " "%d(%d) present, %d(%d) tagged\n", insert_chunk, nr_inserted, delete_chunk, nr_deleted, @@ -296,13 +296,13 @@ static void __leak_check(void) { RADIX_TREE(tree, GFP_KERNEL); - printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); + printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated); item_insert(&tree, 1000000); - printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); + printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated); item_delete(&tree, 1000000); - printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); + printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated); item_kill_tree(&tree); - printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); + printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated); } static void single_check(void) @@ -336,15 +336,15 @@ void tag_check(void) extend_checks(); contract_checks(); rcu_barrier(); - printf("after extend_checks: %d allocated\n", nr_allocated); + printv(2, "after extend_checks: %d allocated\n", nr_allocated); __leak_check(); leak_check(); rcu_barrier(); - printf("after leak_check: %d allocated\n", nr_allocated); + printv(2, "after leak_check: %d allocated\n", nr_allocated); simple_checks(); rcu_barrier(); - printf("after simple_checks: %d allocated\n", nr_allocated); + printv(2, "after simple_checks: %d allocated\n", nr_allocated); thrash_tags(); rcu_barrier(); - printf("after thrash_tags: %d allocated\n", nr_allocated); + printv(2, "after thrash_tags: %d allocated\n", nr_allocated); } diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index 056a23b56467..b30e11d9d271 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -34,6 +34,8 @@ void tag_check(void); void multiorder_checks(void); void iteration_test(unsigned order, unsigned duration); void benchmark(void); +void idr_checks(void); +void ida_checks(void); struct item * item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); |