diff options
author | Josef Bacik <josef@toxicpanda.com> | 2019-06-20 15:37:45 -0400 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2019-09-09 14:59:04 +0200 |
commit | 2e405ad842546a1a37aaa586d5140d071cb1f802 (patch) | |
tree | 42299ec8e870e4953d7b5b6a498d61ed652f3d33 | |
parent | aac0023c2106952538414254960c51dcf0dc39e9 (diff) |
btrfs: migrate the block group lookup code
Move these bits first as they are the easiest to move. Export two of
the helpers so they can be moved all at once.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
[ minor style updates ]
Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r-- | fs/btrfs/Makefile | 2 | ||||
-rw-r--r-- | fs/btrfs/block-group.c | 95 | ||||
-rw-r--r-- | fs/btrfs/block-group.h | 7 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 3 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 96 |
5 files changed, 105 insertions, 98 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 76a843198bcb..82200dbca5ac 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -11,7 +11,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \ - block-rsv.o delalloc-space.o + block-rsv.o delalloc-space.o block-group.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c new file mode 100644 index 000000000000..ebe7b1c5c1e3 --- /dev/null +++ b/fs/btrfs/block-group.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "ctree.h" +#include "block-group.h" + +/* + * This will return the block group at or after bytenr if contains is 0, else + * it will return the block group that contains the bytenr + */ +static struct btrfs_block_group_cache *block_group_cache_tree_search( + struct btrfs_fs_info *info, u64 bytenr, int contains) +{ + struct btrfs_block_group_cache *cache, *ret = NULL; + struct rb_node *n; + u64 end, start; + + spin_lock(&info->block_group_cache_lock); + n = info->block_group_cache_tree.rb_node; + + while (n) { + cache = rb_entry(n, struct btrfs_block_group_cache, + cache_node); + end = cache->key.objectid + cache->key.offset - 1; + start = cache->key.objectid; + + if (bytenr < start) { + if (!contains && (!ret || start < ret->key.objectid)) + ret = cache; + n = n->rb_left; + } else if (bytenr > start) { + if (contains && bytenr <= end) { + ret = cache; + break; + } + n = n->rb_right; + } else { + ret = cache; + break; + } + } + if (ret) { + btrfs_get_block_group(ret); + if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) + info->first_logical_byte = ret->key.objectid; + } + spin_unlock(&info->block_group_cache_lock); + + return ret; +} + +/* + * Return the block group that starts at or after bytenr + */ +struct btrfs_block_group_cache *btrfs_lookup_first_block_group( + struct btrfs_fs_info *info, u64 bytenr) +{ + return block_group_cache_tree_search(info, bytenr, 0); +} + +/* + * Return the block group that contains the given bytenr + */ +struct btrfs_block_group_cache *btrfs_lookup_block_group( + struct btrfs_fs_info *info, u64 bytenr) +{ + return block_group_cache_tree_search(info, bytenr, 1); +} + +struct btrfs_block_group_cache *btrfs_next_block_group( + struct btrfs_block_group_cache *cache) +{ + struct btrfs_fs_info *fs_info = cache->fs_info; + struct rb_node *node; + + spin_lock(&fs_info->block_group_cache_lock); + + /* If our block group was removed, we need a full search. */ + if (RB_EMPTY_NODE(&cache->cache_node)) { + const u64 next_bytenr = cache->key.objectid + cache->key.offset; + + spin_unlock(&fs_info->block_group_cache_lock); + btrfs_put_block_group(cache); + cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; + } + node = rb_next(&cache->cache_node); + btrfs_put_block_group(cache); + if (node) { + cache = rb_entry(node, struct btrfs_block_group_cache, + cache_node); + btrfs_get_block_group(cache); + } else + cache = NULL; + spin_unlock(&fs_info->block_group_cache_lock); + return cache; +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 054745007519..87bac0d5ad69 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -151,4 +151,11 @@ static inline int btrfs_should_fragment_free_space( } #endif +struct btrfs_block_group_cache *btrfs_lookup_first_block_group( + struct btrfs_fs_info *info, u64 bytenr); +struct btrfs_block_group_cache *btrfs_lookup_block_group( + struct btrfs_fs_info *info, u64 bytenr); +struct btrfs_block_group_cache *btrfs_next_block_group( + struct btrfs_block_group_cache *cache); + #endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e95fdd1d9dd2..49ac72c3d0cd 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2496,9 +2496,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, int btrfs_exclude_logged_extents(struct extent_buffer *eb); int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, u64 bytenr); -struct btrfs_block_group_cache *btrfs_lookup_block_group( - struct btrfs_fs_info *info, - u64 bytenr); void btrfs_get_block_group(struct btrfs_block_group_cache *cache); void btrfs_put_block_group(struct btrfs_block_group_cache *cache); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f28697131f22..a454945227ca 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -133,52 +133,6 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, return 0; } -/* - * This will return the block group at or after bytenr if contains is 0, else - * it will return the block group that contains the bytenr - */ -static struct btrfs_block_group_cache * -block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, - int contains) -{ - struct btrfs_block_group_cache *cache, *ret = NULL; - struct rb_node *n; - u64 end, start; - - spin_lock(&info->block_group_cache_lock); - n = info->block_group_cache_tree.rb_node; - - while (n) { - cache = rb_entry(n, struct btrfs_block_group_cache, - cache_node); - end = cache->key.objectid + cache->key.offset - 1; - start = cache->key.objectid; - - if (bytenr < start) { - if (!contains && (!ret || start < ret->key.objectid)) - ret = cache; - n = n->rb_left; - } else if (bytenr > start) { - if (contains && bytenr <= end) { - ret = cache; - break; - } - n = n->rb_right; - } else { - ret = cache; - break; - } - } - if (ret) { - btrfs_get_block_group(ret); - if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) - info->first_logical_byte = ret->key.objectid; - } - spin_unlock(&info->block_group_cache_lock); - - return ret; -} - static int add_excluded_extent(struct btrfs_fs_info *fs_info, u64 start, u64 num_bytes) { @@ -673,24 +627,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, return ret; } -/* - * return the block group that starts at or after bytenr - */ -static struct btrfs_block_group_cache * -btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) -{ - return block_group_cache_tree_search(info, bytenr, 0); -} - -/* - * return the block group that contains the given bytenr - */ -struct btrfs_block_group_cache *btrfs_lookup_block_group( - struct btrfs_fs_info *info, - u64 bytenr) -{ - return block_group_cache_tree_search(info, bytenr, 1); -} static u64 generic_ref_to_space_flags(struct btrfs_ref *ref) { @@ -3146,34 +3082,6 @@ fail: } -static struct btrfs_block_group_cache *next_block_group( - struct btrfs_block_group_cache *cache) -{ - struct btrfs_fs_info *fs_info = cache->fs_info; - struct rb_node *node; - - spin_lock(&fs_info->block_group_cache_lock); - - /* If our block group was removed, we need a full search. */ - if (RB_EMPTY_NODE(&cache->cache_node)) { - const u64 next_bytenr = cache->key.objectid + cache->key.offset; - - spin_unlock(&fs_info->block_group_cache_lock); - btrfs_put_block_group(cache); - cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; - } - node = rb_next(&cache->cache_node); - btrfs_put_block_group(cache); - if (node) { - cache = rb_entry(node, struct btrfs_block_group_cache, - cache_node); - btrfs_get_block_group(cache); - } else - cache = NULL; - spin_unlock(&fs_info->block_group_cache_lock); - return cache; -} - static int cache_save_setup(struct btrfs_block_group_cache *block_group, struct btrfs_trans_handle *trans, struct btrfs_path *path) @@ -7651,7 +7559,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) if (block_group->iref) break; spin_unlock(&block_group->lock); - block_group = next_block_group(block_group); + block_group = btrfs_next_block_group(block_group); } if (!block_group) { if (last == 0) @@ -8872,7 +8780,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) return -EINVAL; cache = btrfs_lookup_first_block_group(fs_info, range->start); - for (; cache; cache = next_block_group(cache)) { + for (; cache; cache = btrfs_next_block_group(cache)) { if (cache->key.objectid >= range_end) { btrfs_put_block_group(cache); break; |