summaryrefslogtreecommitdiff
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2023-06-12 15:23:29 +0800
committerDavid Sterba <dsterba@suse.com>2023-06-19 13:59:40 +0200
commit81db6ae842b3c07edd67278e3693f53a28d694cf (patch)
treed4f794b7f10970a60828c5de16a18a3cafadee28 /fs/btrfs/scrub.c
parentc2bbc0bab0bb3cdd38914fe714f9b6c3f7544e88 (diff)
btrfs: scrub: remove btrfs_fs_info::scrub_wr_completion_workers
Since the scrub rework introduced by commit 2af2aaf98205 ("btrfs: scrub: introduce structure for new BTRFS_STRIPE_LEN based interface") and later commits, scrub only needs one single workqueue, fs_info::scrub_worker. That scrub_wr_completion_workers is initially to handle the delay work after write bios finished. But the new scrub code goes submit-and-wait for write bios, thus all the work are done inside the scrub_worker. The last user of fs_info::scrub_wr_completion_workers is removed in commit 16f93993498b ("btrfs: scrub: remove the old writeback infrastructure"), so we can safely remove the workqueue. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 297beaefd49c..2c7fdbb60314 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2698,17 +2698,12 @@ static void scrub_workers_put(struct btrfs_fs_info *fs_info)
if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
&fs_info->scrub_lock)) {
struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
- struct workqueue_struct *scrub_wr_comp =
- fs_info->scrub_wr_completion_workers;
fs_info->scrub_workers = NULL;
- fs_info->scrub_wr_completion_workers = NULL;
mutex_unlock(&fs_info->scrub_lock);
if (scrub_workers)
destroy_workqueue(scrub_workers);
- if (scrub_wr_comp)
- destroy_workqueue(scrub_wr_comp);
}
}
@@ -2719,7 +2714,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
int is_dev_replace)
{
struct workqueue_struct *scrub_workers = NULL;
- struct workqueue_struct *scrub_wr_comp = NULL;
unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
int max_active = fs_info->thread_pool_size;
int ret = -ENOMEM;
@@ -2732,18 +2726,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
else
scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
if (!scrub_workers)
- goto fail_scrub_workers;
-
- scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active);
- if (!scrub_wr_comp)
- goto fail_scrub_wr_completion_workers;
+ return -ENOMEM;
mutex_lock(&fs_info->scrub_lock);
if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
- ASSERT(fs_info->scrub_workers == NULL &&
- fs_info->scrub_wr_completion_workers == NULL);
+ ASSERT(fs_info->scrub_workers == NULL);
fs_info->scrub_workers = scrub_workers;
- fs_info->scrub_wr_completion_workers = scrub_wr_comp;
refcount_set(&fs_info->scrub_workers_refcnt, 1);
mutex_unlock(&fs_info->scrub_lock);
return 0;
@@ -2754,10 +2742,7 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
ret = 0;
- destroy_workqueue(scrub_wr_comp);
-fail_scrub_wr_completion_workers:
destroy_workqueue(scrub_workers);
-fail_scrub_workers:
return ret;
}