btrfs: scrub: remove btrfs_fs_info::scrub_wr_completion_workers
Since the scrub rework introduced by commit2af2aaf982
("btrfs: scrub: introduce structure for new BTRFS_STRIPE_LEN based interface") and later commits, scrub only needs one single workqueue, fs_info::scrub_worker. That scrub_wr_completion_workers is initially to handle the delay work after write bios finished. But the new scrub code goes submit-and-wait for write bios, thus all the work are done inside the scrub_worker. The last user of fs_info::scrub_wr_completion_workers is removed in commit16f9399349
("btrfs: scrub: remove the old writeback infrastructure"), so we can safely remove the workqueue. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
c2bbc0bab0
commit
81db6ae842
|
@ -642,7 +642,6 @@ struct btrfs_fs_info {
|
|||
*/
|
||||
refcount_t scrub_workers_refcnt;
|
||||
struct workqueue_struct *scrub_workers;
|
||||
struct workqueue_struct *scrub_wr_completion_workers;
|
||||
struct btrfs_subpage_info *subpage_info;
|
||||
|
||||
struct btrfs_discard_ctl discard_ctl;
|
||||
|
|
|
@ -2698,17 +2698,12 @@ static void scrub_workers_put(struct btrfs_fs_info *fs_info)
|
|||
if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
|
||||
&fs_info->scrub_lock)) {
|
||||
struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
|
||||
struct workqueue_struct *scrub_wr_comp =
|
||||
fs_info->scrub_wr_completion_workers;
|
||||
|
||||
fs_info->scrub_workers = NULL;
|
||||
fs_info->scrub_wr_completion_workers = NULL;
|
||||
mutex_unlock(&fs_info->scrub_lock);
|
||||
|
||||
if (scrub_workers)
|
||||
destroy_workqueue(scrub_workers);
|
||||
if (scrub_wr_comp)
|
||||
destroy_workqueue(scrub_wr_comp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2719,7 +2714,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
|
|||
int is_dev_replace)
|
||||
{
|
||||
struct workqueue_struct *scrub_workers = NULL;
|
||||
struct workqueue_struct *scrub_wr_comp = NULL;
|
||||
unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
|
||||
int max_active = fs_info->thread_pool_size;
|
||||
int ret = -ENOMEM;
|
||||
|
@ -2732,18 +2726,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
|
|||
else
|
||||
scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
|
||||
if (!scrub_workers)
|
||||
goto fail_scrub_workers;
|
||||
|
||||
scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active);
|
||||
if (!scrub_wr_comp)
|
||||
goto fail_scrub_wr_completion_workers;
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&fs_info->scrub_lock);
|
||||
if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
|
||||
ASSERT(fs_info->scrub_workers == NULL &&
|
||||
fs_info->scrub_wr_completion_workers == NULL);
|
||||
ASSERT(fs_info->scrub_workers == NULL);
|
||||
fs_info->scrub_workers = scrub_workers;
|
||||
fs_info->scrub_wr_completion_workers = scrub_wr_comp;
|
||||
refcount_set(&fs_info->scrub_workers_refcnt, 1);
|
||||
mutex_unlock(&fs_info->scrub_lock);
|
||||
return 0;
|
||||
|
@ -2754,10 +2742,7 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
|
|||
|
||||
ret = 0;
|
||||
|
||||
destroy_workqueue(scrub_wr_comp);
|
||||
fail_scrub_wr_completion_workers:
|
||||
destroy_workqueue(scrub_workers);
|
||||
fail_scrub_workers:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue