Btrfs: Wait for IO on the block device inodes of newly added devices
btrfs-vol -a /dev/xxx will zero the first and last two MB of the device. The kernel code needs to wait for this IO to finish before it adds the device. btrfs metadata IO does not happen through the block device inode. A separate address space is used, allowing the zero filled buffer heads in the block device inode to be written to disk after FS metadata starts going down to the disk via the btrfs metadata inode. The end result is zero filled metadata blocks after adding new devices into the filesystem. The fix is a simple filemap_write_and_wait on the block device inode before actually inserting it into the pool of available devices. Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
1a40e23b95
commit
8c8bee1d7c
|
@ -610,6 +610,7 @@ struct btrfs_fs_info {
|
|||
struct list_head dead_roots;
|
||||
|
||||
atomic_t nr_async_submits;
|
||||
atomic_t async_submit_draining;
|
||||
atomic_t nr_async_bios;
|
||||
atomic_t tree_log_writers;
|
||||
atomic_t tree_log_commit;
|
||||
|
|
|
@ -460,6 +460,13 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
|||
async->submit_bio_hook = submit_bio_hook;
|
||||
async->work.func = run_one_async_submit;
|
||||
async->work.flags = 0;
|
||||
|
||||
while(atomic_read(&fs_info->async_submit_draining) &&
|
||||
atomic_read(&fs_info->nr_async_submits)) {
|
||||
wait_event(fs_info->async_submit_wait,
|
||||
(atomic_read(&fs_info->nr_async_submits) == 0));
|
||||
}
|
||||
|
||||
atomic_inc(&fs_info->nr_async_submits);
|
||||
btrfs_queue_worker(&fs_info->workers, &async->work);
|
||||
|
||||
|
@ -495,11 +502,8 @@ static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
|||
int mirror_num)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
u64 offset;
|
||||
int ret;
|
||||
|
||||
offset = bio->bi_sector << 9;
|
||||
|
||||
/*
|
||||
* when we're called for a write, we're already in the async
|
||||
* submission context. Just jump into btrfs_map_bio
|
||||
|
@ -1360,6 +1364,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
|||
INIT_LIST_HEAD(&fs_info->space_info);
|
||||
btrfs_mapping_init(&fs_info->mapping_tree);
|
||||
atomic_set(&fs_info->nr_async_submits, 0);
|
||||
atomic_set(&fs_info->async_submit_draining, 0);
|
||||
atomic_set(&fs_info->nr_async_bios, 0);
|
||||
atomic_set(&fs_info->throttles, 0);
|
||||
atomic_set(&fs_info->throttle_gen, 0);
|
||||
|
|
|
@ -3440,13 +3440,24 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root)
|
|||
list_del_init(&binode->delalloc_inodes);
|
||||
spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
|
||||
if (inode) {
|
||||
filemap_write_and_wait(inode->i_mapping);
|
||||
filemap_flush(inode->i_mapping);
|
||||
iput(inode);
|
||||
}
|
||||
cond_resched();
|
||||
spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
|
||||
|
||||
/* the filemap_flush will queue IO into the worker threads, but
|
||||
* we have to make sure the IO is actually started and that
|
||||
* ordered extents get created before we return
|
||||
*/
|
||||
atomic_inc(&root->fs_info->async_submit_draining);
|
||||
while(atomic_read(&root->fs_info->nr_async_submits)) {
|
||||
wait_event(root->fs_info->async_submit_wait,
|
||||
(atomic_read(&root->fs_info->nr_async_submits) == 0));
|
||||
}
|
||||
atomic_dec(&root->fs_info->async_submit_draining);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1038,6 +1038,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
filemap_write_and_wait(bdev->bd_inode->i_mapping);
|
||||
mutex_lock(&root->fs_info->volume_mutex);
|
||||
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
|
|
Loading…
Reference in New Issue