Merge branch 'for-linus-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
 "A couple of small fixes"

* 'for-linus-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: check prepare_uptodate_page() error code earlier
  Btrfs: check for empty bitmap list in setup_cluster_bitmaps
  btrfs: fix misleading warning when space cache failed to load
  Btrfs: fix transaction handle leak in balance
  Btrfs: fix unprotected list move from unused_bgs to deleted_bgs list
This commit is contained in:
Linus Torvalds 2015-12-18 15:35:08 -08:00
commit fc315e3e5c
6 changed files with 29 additions and 15 deletions

View File

@ -10480,11 +10480,15 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* until transaction commit to do the actual discard. * until transaction commit to do the actual discard.
*/ */
if (trimming) { if (trimming) {
WARN_ON(!list_empty(&block_group->bg_list)); spin_lock(&fs_info->unused_bgs_lock);
spin_lock(&trans->transaction->deleted_bgs_lock); /*
* A concurrent scrub might have added us to the list
* fs_info->unused_bgs, so use a list_move operation
* to add the block group to the deleted_bgs list.
*/
list_move(&block_group->bg_list, list_move(&block_group->bg_list,
&trans->transaction->deleted_bgs); &trans->transaction->deleted_bgs);
spin_unlock(&trans->transaction->deleted_bgs_lock); spin_unlock(&fs_info->unused_bgs_lock);
btrfs_get_block_group(block_group); btrfs_get_block_group(block_group);
} }
end_trans: end_trans:

View File

@ -1291,7 +1291,8 @@ out:
* on error we return an unlocked page and the error value * on error we return an unlocked page and the error value
* on success we return a locked page and 0 * on success we return a locked page and 0
*/ */
static int prepare_uptodate_page(struct page *page, u64 pos, static int prepare_uptodate_page(struct inode *inode,
struct page *page, u64 pos,
bool force_uptodate) bool force_uptodate)
{ {
int ret = 0; int ret = 0;
@ -1306,6 +1307,10 @@ static int prepare_uptodate_page(struct page *page, u64 pos,
unlock_page(page); unlock_page(page);
return -EIO; return -EIO;
} }
if (page->mapping != inode->i_mapping) {
unlock_page(page);
return -EAGAIN;
}
} }
return 0; return 0;
} }
@ -1324,6 +1329,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
int faili; int faili;
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
again:
pages[i] = find_or_create_page(inode->i_mapping, index + i, pages[i] = find_or_create_page(inode->i_mapping, index + i,
mask | __GFP_WRITE); mask | __GFP_WRITE);
if (!pages[i]) { if (!pages[i]) {
@ -1333,13 +1339,17 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
} }
if (i == 0) if (i == 0)
err = prepare_uptodate_page(pages[i], pos, err = prepare_uptodate_page(inode, pages[i], pos,
force_uptodate); force_uptodate);
if (i == num_pages - 1) if (!err && i == num_pages - 1)
err = prepare_uptodate_page(pages[i], err = prepare_uptodate_page(inode, pages[i],
pos + write_bytes, false); pos + write_bytes, false);
if (err) { if (err) {
page_cache_release(pages[i]); page_cache_release(pages[i]);
if (err == -EAGAIN) {
err = 0;
goto again;
}
faili = i - 1; faili = i - 1;
goto fail; goto fail;
} }

View File

@ -891,7 +891,7 @@ out:
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
ret = 0; ret = 0;
btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now",
block_group->key.objectid); block_group->key.objectid);
} }
@ -2972,7 +2972,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
u64 cont1_bytes, u64 min_bytes) u64 cont1_bytes, u64 min_bytes)
{ {
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *entry; struct btrfs_free_space *entry = NULL;
int ret = -ENOSPC; int ret = -ENOSPC;
u64 bitmap_offset = offset_to_bitmap(ctl, offset); u64 bitmap_offset = offset_to_bitmap(ctl, offset);
@ -2983,8 +2983,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
* The bitmap that covers offset won't be in the list unless offset * The bitmap that covers offset won't be in the list unless offset
* is just its start offset. * is just its start offset.
*/ */
entry = list_first_entry(bitmaps, struct btrfs_free_space, list); if (!list_empty(bitmaps))
if (entry->offset != bitmap_offset) { entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
if (!entry || entry->offset != bitmap_offset) {
entry = tree_search_offset(ctl, bitmap_offset, 1, 0); entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
if (entry && list_empty(&entry->list)) if (entry && list_empty(&entry->list))
list_add(&entry->list, bitmaps); list_add(&entry->list, bitmaps);

View File

@ -274,7 +274,6 @@ loop:
cur_trans->num_dirty_bgs = 0; cur_trans->num_dirty_bgs = 0;
spin_lock_init(&cur_trans->dirty_bgs_lock); spin_lock_init(&cur_trans->dirty_bgs_lock);
INIT_LIST_HEAD(&cur_trans->deleted_bgs); INIT_LIST_HEAD(&cur_trans->deleted_bgs);
spin_lock_init(&cur_trans->deleted_bgs_lock);
spin_lock_init(&cur_trans->dropped_roots_lock); spin_lock_init(&cur_trans->dropped_roots_lock);
list_add_tail(&cur_trans->list, &fs_info->trans_list); list_add_tail(&cur_trans->list, &fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages, extent_io_tree_init(&cur_trans->dirty_pages,

View File

@ -77,8 +77,8 @@ struct btrfs_transaction {
*/ */
struct mutex cache_write_mutex; struct mutex cache_write_mutex;
spinlock_t dirty_bgs_lock; spinlock_t dirty_bgs_lock;
/* Protected by spin lock fs_info->unused_bgs_lock. */
struct list_head deleted_bgs; struct list_head deleted_bgs;
spinlock_t deleted_bgs_lock;
spinlock_t dropped_roots_lock; spinlock_t dropped_roots_lock;
struct btrfs_delayed_ref_root delayed_refs; struct btrfs_delayed_ref_root delayed_refs;
int aborted; int aborted;

View File

@ -3548,12 +3548,11 @@ again:
ret = btrfs_force_chunk_alloc(trans, chunk_root, ret = btrfs_force_chunk_alloc(trans, chunk_root,
BTRFS_BLOCK_GROUP_DATA); BTRFS_BLOCK_GROUP_DATA);
btrfs_end_transaction(trans, chunk_root);
if (ret < 0) { if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex); mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error; goto error;
} }
btrfs_end_transaction(trans, chunk_root);
chunk_reserved = 1; chunk_reserved = 1;
} }