Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes from Chris Mason: "Filipe hit two problems in my block group cache patches. We finalized the fixes last week and ran through more tests" * 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: Btrfs: prevent list corruption during free space cache processing Btrfs: fix inode cache writeout
This commit is contained in:
commit
f583381f50
|
@ -1119,10 +1119,7 @@ static int flush_dirty_cache(struct inode *inode)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void noinline_for_stack
|
static void noinline_for_stack
|
||||||
cleanup_write_cache_enospc(struct inode *inode,
|
cleanup_bitmap_list(struct list_head *bitmap_list)
|
||||||
struct btrfs_io_ctl *io_ctl,
|
|
||||||
struct extent_state **cached_state,
|
|
||||||
struct list_head *bitmap_list)
|
|
||||||
{
|
{
|
||||||
struct list_head *pos, *n;
|
struct list_head *pos, *n;
|
||||||
|
|
||||||
|
@ -1131,6 +1128,14 @@ cleanup_write_cache_enospc(struct inode *inode,
|
||||||
list_entry(pos, struct btrfs_free_space, list);
|
list_entry(pos, struct btrfs_free_space, list);
|
||||||
list_del_init(&entry->list);
|
list_del_init(&entry->list);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void noinline_for_stack
|
||||||
|
cleanup_write_cache_enospc(struct inode *inode,
|
||||||
|
struct btrfs_io_ctl *io_ctl,
|
||||||
|
struct extent_state **cached_state,
|
||||||
|
struct list_head *bitmap_list)
|
||||||
|
{
|
||||||
io_ctl_drop_pages(io_ctl);
|
io_ctl_drop_pages(io_ctl);
|
||||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
|
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
|
||||||
i_size_read(inode) - 1, cached_state,
|
i_size_read(inode) - 1, cached_state,
|
||||||
|
@ -1149,7 +1154,8 @@ int btrfs_wait_cache_io(struct btrfs_root *root,
|
||||||
if (!inode)
|
if (!inode)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
root = root->fs_info->tree_root;
|
if (block_group)
|
||||||
|
root = root->fs_info->tree_root;
|
||||||
|
|
||||||
/* Flush the dirty pages in the cache file. */
|
/* Flush the dirty pages in the cache file. */
|
||||||
ret = flush_dirty_cache(inode);
|
ret = flush_dirty_cache(inode);
|
||||||
|
@ -1265,11 +1271,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
||||||
ret = write_cache_extent_entries(io_ctl, ctl,
|
ret = write_cache_extent_entries(io_ctl, ctl,
|
||||||
block_group, &entries, &bitmaps,
|
block_group, &entries, &bitmaps,
|
||||||
&bitmap_list);
|
&bitmap_list);
|
||||||
spin_unlock(&ctl->tree_lock);
|
if (ret)
|
||||||
if (ret) {
|
goto out_nospc_locked;
|
||||||
mutex_unlock(&ctl->cache_writeout_mutex);
|
|
||||||
goto out_nospc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some spaces that are freed in the current transaction are pinned,
|
* Some spaces that are freed in the current transaction are pinned,
|
||||||
|
@ -1280,17 +1283,14 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
|
||||||
* the dirty list and redo it. No locking needed
|
* the dirty list and redo it. No locking needed
|
||||||
*/
|
*/
|
||||||
ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
|
ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
|
||||||
if (ret) {
|
if (ret)
|
||||||
mutex_unlock(&ctl->cache_writeout_mutex);
|
goto out_nospc_locked;
|
||||||
goto out_nospc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At last, we write out all the bitmaps and keep cache_writeout_mutex
|
* At last, we write out all the bitmaps and keep cache_writeout_mutex
|
||||||
* locked while doing it because a concurrent trim can be manipulating
|
* locked while doing it because a concurrent trim can be manipulating
|
||||||
* or freeing the bitmap.
|
* or freeing the bitmap.
|
||||||
*/
|
*/
|
||||||
spin_lock(&ctl->tree_lock);
|
|
||||||
ret = write_bitmap_entries(io_ctl, &bitmap_list);
|
ret = write_bitmap_entries(io_ctl, &bitmap_list);
|
||||||
spin_unlock(&ctl->tree_lock);
|
spin_unlock(&ctl->tree_lock);
|
||||||
mutex_unlock(&ctl->cache_writeout_mutex);
|
mutex_unlock(&ctl->cache_writeout_mutex);
|
||||||
|
@ -1343,6 +1343,11 @@ out:
|
||||||
iput(inode);
|
iput(inode);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
out_nospc_locked:
|
||||||
|
cleanup_bitmap_list(&bitmap_list);
|
||||||
|
spin_unlock(&ctl->tree_lock);
|
||||||
|
mutex_unlock(&ctl->cache_writeout_mutex);
|
||||||
|
|
||||||
out_nospc:
|
out_nospc:
|
||||||
cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
|
cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
|
||||||
|
|
||||||
|
@ -3463,9 +3468,12 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
|
||||||
if (!btrfs_test_opt(root, INODE_MAP_CACHE))
|
if (!btrfs_test_opt(root, INODE_MAP_CACHE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
memset(&io_ctl, 0, sizeof(io_ctl));
|
||||||
ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
|
ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
|
||||||
trans, path, 0) ||
|
trans, path, 0);
|
||||||
btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
|
if (!ret)
|
||||||
|
ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
btrfs_delalloc_release_metadata(inode, inode->i_size);
|
btrfs_delalloc_release_metadata(inode, inode->i_size);
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
|
|
Loading…
Reference in New Issue