summaryrefslogtreecommitdiff
path: root/fs/btrfs/defrag.c
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2024-08-27 04:26:51 +0200
committerDavid Sterba <dsterba@suse.com>2024-09-10 16:51:20 +0200
commitdf2825e98507d10cb037a308087ecd7cb3f6688d (patch)
tree31c3fc60e3e44b7f9f5941dfca85fd520c303096 /fs/btrfs/defrag.c
parent11e3107d47cb266a284169f36c2293af3f397fdb (diff)
btrfs: always pass readahead state to defrag
Defrag ioctl passes readahead from the file, but autodefrag does not have a file so the readahead state is allocated when needed. The autodefrag loop in cleaner thread iterates over inodes so we can simply provide an on-stack readahead state and will not need to allocate it in btrfs_defrag_file(). The size is 32 bytes which is acceptable. Reviewed-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/defrag.c')
-rw-r--r--fs/btrfs/defrag.c32
1 files changed, 11 insertions, 21 deletions
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index 5b6bf0a59b23..acf1f39e45d0 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -219,7 +219,8 @@ void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
#define BTRFS_DEFRAG_BATCH 1024
static int btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
- struct inode_defrag *defrag)
+ struct inode_defrag *defrag,
+ struct file_ra_state *ra)
{
struct btrfs_root *inode_root;
struct inode *inode;
@@ -258,9 +259,10 @@ again:
range.len = (u64)-1;
range.start = cur;
range.extent_thresh = defrag->extent_thresh;
+ file_ra_state_init(ra, inode->i_mapping);
sb_start_write(fs_info->sb);
- ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+ ret = btrfs_defrag_file(inode, ra, &range, defrag->transid,
BTRFS_DEFRAG_BATCH);
sb_end_write(fs_info->sb);
iput(inode);
@@ -287,6 +289,8 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
atomic_inc(&fs_info->defrag_running);
while (1) {
+ struct file_ra_state ra = { 0 };
+
/* Pause the auto defragger. */
if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
break;
@@ -309,7 +313,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
first_ino = defrag->ino + 1;
root_objectid = defrag->root;
- btrfs_run_defrag_inode(fs_info, defrag);
+ btrfs_run_defrag_inode(fs_info, defrag, &ra);
}
atomic_dec(&fs_info->defrag_running);
@@ -1302,8 +1306,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
if (entry->start + range_len <= *last_scanned_ret)
continue;
- if (ra)
- page_cache_sync_readahead(inode->vfs_inode.i_mapping,
+ page_cache_sync_readahead(inode->vfs_inode.i_mapping,
ra, NULL, entry->start >> PAGE_SHIFT,
((entry->start + range_len - 1) >> PAGE_SHIFT) -
(entry->start >> PAGE_SHIFT) + 1);
@@ -1335,7 +1338,7 @@ out:
* Entry point to file defragmentation.
*
* @inode: inode to be defragged
- * @ra: readahead state (can be NUL)
+ * @ra: readahead state
* @range: defrag options including range and flags
* @newer_than: minimum transid to defrag
* @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
@@ -1357,12 +1360,13 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
u64 cur;
u64 last_byte;
bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
- bool ra_allocated = false;
int compress_type = BTRFS_COMPRESS_ZLIB;
int ret = 0;
u32 extent_thresh = range->extent_thresh;
pgoff_t start_index;
+ ASSERT(ra);
+
if (isize == 0)
return 0;
@@ -1392,18 +1396,6 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
/*
- * If we were not given a ra, allocate a readahead context. As
- * readahead is just an optimization, defrag will work without it so
- * we don't error out.
- */
- if (!ra) {
- ra_allocated = true;
- ra = kzalloc(sizeof(*ra), GFP_KERNEL);
- if (ra)
- file_ra_state_init(ra, inode->i_mapping);
- }
-
- /*
* Make writeback start from the beginning of the range, so that the
* defrag range can be written sequentially.
*/
@@ -1457,8 +1449,6 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
cond_resched();
}
- if (ra_allocated)
- kfree(ra);
/*
* Update range.start for autodefrag, this will indicate where to start
* in next run.