Merge branch 'for-4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs fixes from David Sterba: "We've collected a bunch of isolated fixes, for crashes, user-visible behaviour or missing bits from other subsystem cleanups from the past. The overall number is not small but I was not able to make it significantly smaller. Most of the patches are supposed to go to stable" * 'for-4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: log csums for all modified extents Btrfs: fix unexpected result when dio reading corrupted blocks btrfs: Report error on removing qgroup if del_qgroup_item fails Btrfs: skip checksum when reading compressed data if some IO have failed Btrfs: fix kernel oops while reading compressed data Btrfs: use btrfs_op instead of bio_op in __btrfs_map_block Btrfs: do not backup tree roots when fsync btrfs: remove BTRFS_FS_QUOTA_DISABLING flag btrfs: propagate error to btrfs_cmp_data_prepare caller btrfs: prevent to set invalid default subvolid Btrfs: send: fix error number for unknown inode types btrfs: fix NULL pointer dereference from free_reloc_roots() btrfs: finish ordered extent cleaning if no progress is found btrfs: clear ordered flag on cleaning up ordered extents Btrfs: fix incorrect {node,sector}size endianness from BTRFS_IOC_FS_INFO Btrfs: do not reset bio->bi_ops while writing bio Btrfs: use the new helper wbc_to_write_flags
This commit is contained in:
commit
5ba88cd6e9
|
@ -107,7 +107,8 @@ static void end_compressed_bio_read(struct bio *bio)
|
|||
struct inode *inode;
|
||||
struct page *page;
|
||||
unsigned long index;
|
||||
int ret;
|
||||
unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
|
||||
int ret = 0;
|
||||
|
||||
if (bio->bi_status)
|
||||
cb->errors = 1;
|
||||
|
@ -118,6 +119,21 @@ static void end_compressed_bio_read(struct bio *bio)
|
|||
if (!refcount_dec_and_test(&cb->pending_bios))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Record the correct mirror_num in cb->orig_bio so that
|
||||
* read-repair can work properly.
|
||||
*/
|
||||
ASSERT(btrfs_io_bio(cb->orig_bio));
|
||||
btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
|
||||
cb->mirror_num = mirror;
|
||||
|
||||
/*
|
||||
* Some IO in this cb have failed, just skip checksum as there
|
||||
* is no way it could be correct.
|
||||
*/
|
||||
if (cb->errors == 1)
|
||||
goto csum_failed;
|
||||
|
||||
inode = cb->inode;
|
||||
ret = check_compressed_csum(BTRFS_I(inode), cb,
|
||||
(u64)bio->bi_iter.bi_sector << 9);
|
||||
|
|
|
@ -709,7 +709,6 @@ struct btrfs_delayed_root;
|
|||
#define BTRFS_FS_OPEN 5
|
||||
#define BTRFS_FS_QUOTA_ENABLED 6
|
||||
#define BTRFS_FS_QUOTA_ENABLING 7
|
||||
#define BTRFS_FS_QUOTA_DISABLING 8
|
||||
#define BTRFS_FS_UPDATE_UUID_TREE_GEN 9
|
||||
#define BTRFS_FS_CREATING_FREE_SPACE_TREE 10
|
||||
#define BTRFS_FS_BTREE_ERR 11
|
||||
|
|
|
@ -3643,7 +3643,14 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
|
|||
u64 flags;
|
||||
|
||||
do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
|
||||
backup_super_roots(fs_info);
|
||||
|
||||
/*
|
||||
* max_mirrors == 0 indicates we're from commit_transaction,
|
||||
* not from fsync where the tree roots in fs_info have not
|
||||
* been consistent on disk.
|
||||
*/
|
||||
if (max_mirrors == 0)
|
||||
backup_super_roots(fs_info);
|
||||
|
||||
sb = fs_info->super_for_commit;
|
||||
dev_item = &sb->dev_item;
|
||||
|
|
|
@ -3471,8 +3471,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
|||
unsigned int write_flags = 0;
|
||||
unsigned long nr_written = 0;
|
||||
|
||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||
write_flags = REQ_SYNC;
|
||||
write_flags = wbc_to_write_flags(wbc);
|
||||
|
||||
trace___extent_writepage(page, inode, wbc);
|
||||
|
||||
|
@ -3718,7 +3717,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
|||
unsigned long i, num_pages;
|
||||
unsigned long bio_flags = 0;
|
||||
unsigned long start, end;
|
||||
unsigned int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META;
|
||||
unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
|
||||
int ret = 0;
|
||||
|
||||
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
|
||||
|
@ -4063,9 +4062,6 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
|
|||
if (epd->bio) {
|
||||
int ret;
|
||||
|
||||
bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
|
||||
epd->sync_io ? REQ_SYNC : 0);
|
||||
|
||||
ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
|
||||
BUG_ON(ret < 0); /* -ENOMEM */
|
||||
epd->bio = NULL;
|
||||
|
|
|
@ -135,6 +135,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
|
|||
const u64 offset,
|
||||
const u64 bytes)
|
||||
{
|
||||
unsigned long index = offset >> PAGE_SHIFT;
|
||||
unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
|
||||
struct page *page;
|
||||
|
||||
while (index <= end_index) {
|
||||
page = find_get_page(inode->i_mapping, index);
|
||||
index++;
|
||||
if (!page)
|
||||
continue;
|
||||
ClearPagePrivate2(page);
|
||||
put_page(page);
|
||||
}
|
||||
return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
|
||||
bytes - PAGE_SIZE, false);
|
||||
}
|
||||
|
@ -8357,11 +8369,8 @@ static void btrfs_endio_direct_read(struct bio *bio)
|
|||
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
|
||||
blk_status_t err = bio->bi_status;
|
||||
|
||||
if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) {
|
||||
if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
|
||||
err = btrfs_subio_endio_read(inode, io_bio, err);
|
||||
if (!err)
|
||||
bio->bi_status = 0;
|
||||
}
|
||||
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
|
||||
dip->logical_offset + dip->bytes - 1);
|
||||
|
@ -8369,7 +8378,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
|
|||
|
||||
kfree(dip);
|
||||
|
||||
dio_bio->bi_status = bio->bi_status;
|
||||
dio_bio->bi_status = err;
|
||||
dio_end_io(dio_bio);
|
||||
|
||||
if (io_bio->end_io)
|
||||
|
@ -8387,6 +8396,7 @@ static void __endio_write_update_ordered(struct inode *inode,
|
|||
btrfs_work_func_t func;
|
||||
u64 ordered_offset = offset;
|
||||
u64 ordered_bytes = bytes;
|
||||
u64 last_offset;
|
||||
int ret;
|
||||
|
||||
if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
|
||||
|
@ -8398,6 +8408,7 @@ static void __endio_write_update_ordered(struct inode *inode,
|
|||
}
|
||||
|
||||
again:
|
||||
last_offset = ordered_offset;
|
||||
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
|
||||
&ordered_offset,
|
||||
ordered_bytes,
|
||||
|
@ -8408,6 +8419,12 @@ again:
|
|||
btrfs_init_work(&ordered->work, func, finish_ordered_fn, NULL, NULL);
|
||||
btrfs_queue_work(wq, &ordered->work);
|
||||
out_test:
|
||||
/*
|
||||
* If btrfs_dec_test_ordered_pending does not find any ordered extent
|
||||
* in the range, we can exit.
|
||||
*/
|
||||
if (ordered_offset == last_offset)
|
||||
return;
|
||||
/*
|
||||
* our bio might span multiple ordered extents. If we haven't
|
||||
* completed the accounting for the whole dio, go back and try again
|
||||
|
|
|
@ -2773,9 +2773,9 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
|
|||
}
|
||||
mutex_unlock(&fs_devices->device_list_mutex);
|
||||
|
||||
fi_args->nodesize = fs_info->super_copy->nodesize;
|
||||
fi_args->sectorsize = fs_info->super_copy->sectorsize;
|
||||
fi_args->clone_alignment = fs_info->super_copy->sectorsize;
|
||||
fi_args->nodesize = fs_info->nodesize;
|
||||
fi_args->sectorsize = fs_info->sectorsize;
|
||||
fi_args->clone_alignment = fs_info->sectorsize;
|
||||
|
||||
if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
|
||||
ret = -EFAULT;
|
||||
|
@ -3032,7 +3032,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
|
|||
out:
|
||||
if (ret)
|
||||
btrfs_cmp_data_free(cmp);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
|
||||
|
@ -4061,6 +4061,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
|
|||
ret = PTR_ERR(new_root);
|
||||
goto out;
|
||||
}
|
||||
if (!is_fstree(new_root->objectid)) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
|
|
|
@ -807,7 +807,6 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
ret = 0;
|
||||
out:
|
||||
set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
@ -953,7 +952,6 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
|
|||
if (!fs_info->quota_root)
|
||||
goto out;
|
||||
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
|
||||
set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
|
||||
btrfs_qgroup_wait_for_completion(fs_info, false);
|
||||
spin_lock(&fs_info->qgroup_lock);
|
||||
quota_root = fs_info->quota_root;
|
||||
|
@ -1307,6 +1305,8 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
}
|
||||
ret = del_qgroup_item(trans, quota_root, qgroupid);
|
||||
if (ret && ret != -ENOENT)
|
||||
goto out;
|
||||
|
||||
while (!list_empty(&qgroup->groups)) {
|
||||
list = list_first_entry(&qgroup->groups,
|
||||
|
@ -2086,8 +2086,6 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
|
||||
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
|
||||
if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
|
||||
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
|
||||
|
||||
spin_lock(&fs_info->qgroup_lock);
|
||||
while (!list_empty(&fs_info->dirty_qgroups)) {
|
||||
|
|
|
@ -2400,11 +2400,11 @@ void free_reloc_roots(struct list_head *list)
|
|||
while (!list_empty(list)) {
|
||||
reloc_root = list_entry(list->next, struct btrfs_root,
|
||||
root_list);
|
||||
__del_reloc_root(reloc_root);
|
||||
free_extent_buffer(reloc_root->node);
|
||||
free_extent_buffer(reloc_root->commit_root);
|
||||
reloc_root->node = NULL;
|
||||
reloc_root->commit_root = NULL;
|
||||
__del_reloc_root(reloc_root);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2630,7 +2630,7 @@ static int send_create_inode(struct send_ctx *sctx, u64 ino)
|
|||
} else {
|
||||
btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
|
||||
(int)(mode & S_IFMT));
|
||||
ret = -ENOTSUPP;
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -4181,6 +4181,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
|
|||
struct extent_map *em, *n;
|
||||
struct list_head extents;
|
||||
struct extent_map_tree *tree = &inode->extent_tree;
|
||||
u64 logged_start, logged_end;
|
||||
u64 test_gen;
|
||||
int ret = 0;
|
||||
int num = 0;
|
||||
|
@ -4190,10 +4191,11 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
|
|||
down_write(&inode->dio_sem);
|
||||
write_lock(&tree->lock);
|
||||
test_gen = root->fs_info->last_trans_committed;
|
||||
logged_start = start;
|
||||
logged_end = end;
|
||||
|
||||
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
|
||||
list_del_init(&em->list);
|
||||
|
||||
/*
|
||||
* Just an arbitrary number, this can be really CPU intensive
|
||||
* once we start getting a lot of extents, and really once we
|
||||
|
@ -4208,6 +4210,12 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (em->generation <= test_gen)
|
||||
continue;
|
||||
|
||||
if (em->start < logged_start)
|
||||
logged_start = em->start;
|
||||
if ((em->start + em->len - 1) > logged_end)
|
||||
logged_end = em->start + em->len - 1;
|
||||
|
||||
/* Need a ref to keep it from getting evicted from cache */
|
||||
refcount_inc(&em->refs);
|
||||
set_bit(EXTENT_FLAG_LOGGING, &em->flags);
|
||||
|
@ -4216,7 +4224,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
list_sort(NULL, &extents, extent_cmp);
|
||||
btrfs_get_logged_extents(inode, logged_list, start, end);
|
||||
btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
|
||||
/*
|
||||
* Some ordered extents started by fsync might have completed
|
||||
* before we could collect them into the list logged_list, which
|
||||
|
|
|
@ -6166,7 +6166,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
|
|||
map_length = length;
|
||||
|
||||
btrfs_bio_counter_inc_blocked(fs_info);
|
||||
ret = __btrfs_map_block(fs_info, bio_op(bio), logical,
|
||||
ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
|
||||
&map_length, &bbio, mirror_num, 1);
|
||||
if (ret) {
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
|
|
Loading…
Reference in New Issue