summaryrefslogtreecommitdiff
path: root/fs/btrfs/defrag.c
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2024-08-27 04:05:48 +0200
committerDavid Sterba <dsterba@suse.com>2024-09-10 16:51:19 +0200
commit276940915f232f8569124811fd8a9524f27f5748 (patch)
tree3ed84fee9e666e802ba93c11f22847f046bb5d4b /fs/btrfs/defrag.c
parentffc531652d1039b2c5049a58814d74352f684837 (diff)
btrfs: clear defragmented inodes using postorder in btrfs_cleanup_defrag_inodes()
btrfs_cleanup_defrag_inodes() is not called frequently, only in remount or unmount, but the way it frees the inodes in fs_info->defrag_inodes is inefficient. Each time it needs to locate first node, remove it, potentially rebalance tree until it's done. This allows to do a conditional reschedule. For cleanups the rbtree_postorder_for_each_entry_safe() iterator is convenient but we can't reschedule and restart iteration because some of the tree nodes would be already freed. The cleanup operation is kmem_cache_free() which will likely take the fast path for most objects so rescheduling should not be necessary. Reviewed-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/defrag.c')
-rw-r--r--fs/btrfs/defrag.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index 41d67065d02b..89f51252d25c 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -212,20 +212,14 @@ out:
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
{
- struct inode_defrag *defrag;
- struct rb_node *node;
+ struct inode_defrag *defrag, *next;
spin_lock(&fs_info->defrag_inodes_lock);
- node = rb_first(&fs_info->defrag_inodes);
- while (node) {
- rb_erase(node, &fs_info->defrag_inodes);
- defrag = rb_entry(node, struct inode_defrag, rb_node);
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- cond_resched_lock(&fs_info->defrag_inodes_lock);
+ rbtree_postorder_for_each_entry_safe(defrag, next,
+ &fs_info->defrag_inodes, rb_node)
+ kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- node = rb_first(&fs_info->defrag_inodes);
- }
spin_unlock(&fs_info->defrag_inodes_lock);
}