ext4: use common helpers in all places reading metadata buffers
Revome all open codes that read metadata buffers, switch to use ext4_read_bh_*() common helpers. Signed-off-by: zhangyi (F) <yi.zhang@huawei.com> Suggested-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20200924073337.861472-4-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
parent
fa491b14cd
commit
2d069c0889
|
@ -494,12 +494,10 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
|
||||||
* submit the buffer_head for reading
|
* submit the buffer_head for reading
|
||||||
*/
|
*/
|
||||||
set_buffer_new(bh);
|
set_buffer_new(bh);
|
||||||
clear_buffer_verified(bh);
|
|
||||||
trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked);
|
trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked);
|
||||||
bh->b_end_io = ext4_end_bitmap_read;
|
ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO |
|
||||||
get_bh(bh);
|
(ignore_locked ? REQ_RAHEAD : 0),
|
||||||
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO |
|
ext4_end_bitmap_read);
|
||||||
(ignore_locked ? REQ_RAHEAD : 0), bh);
|
|
||||||
return bh;
|
return bh;
|
||||||
verify:
|
verify:
|
||||||
err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
|
err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
|
||||||
|
|
|
@ -501,8 +501,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
|
||||||
|
|
||||||
if (!bh_uptodate_or_lock(bh)) {
|
if (!bh_uptodate_or_lock(bh)) {
|
||||||
trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
|
trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
|
||||||
clear_buffer_verified(bh);
|
err = ext4_read_bh(bh, 0, NULL);
|
||||||
err = bh_submit_read(bh);
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
|
|
|
@ -188,12 +188,8 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
|
||||||
/*
|
/*
|
||||||
* submit the buffer_head for reading
|
* submit the buffer_head for reading
|
||||||
*/
|
*/
|
||||||
clear_buffer_verified(bh);
|
|
||||||
trace_ext4_load_inode_bitmap(sb, block_group);
|
trace_ext4_load_inode_bitmap(sb, block_group);
|
||||||
bh->b_end_io = ext4_end_bitmap_read;
|
ext4_read_bh(bh, REQ_META | REQ_PRIO, ext4_end_bitmap_read);
|
||||||
get_bh(bh);
|
|
||||||
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
|
|
||||||
wait_on_buffer(bh);
|
|
||||||
ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
|
ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
|
|
|
@ -163,7 +163,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!bh_uptodate_or_lock(bh)) {
|
if (!bh_uptodate_or_lock(bh)) {
|
||||||
if (bh_submit_read(bh) < 0) {
|
if (ext4_read_bh(bh, 0, NULL) < 0) {
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
goto failure;
|
goto failure;
|
||||||
}
|
}
|
||||||
|
|
|
@ -878,19 +878,20 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
|
||||||
ext4_lblk_t block, int map_flags)
|
ext4_lblk_t block, int map_flags)
|
||||||
{
|
{
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
|
int ret;
|
||||||
|
|
||||||
bh = ext4_getblk(handle, inode, block, map_flags);
|
bh = ext4_getblk(handle, inode, block, map_flags);
|
||||||
if (IS_ERR(bh))
|
if (IS_ERR(bh))
|
||||||
return bh;
|
return bh;
|
||||||
if (!bh || ext4_buffer_uptodate(bh))
|
if (!bh || ext4_buffer_uptodate(bh))
|
||||||
return bh;
|
return bh;
|
||||||
clear_buffer_verified(bh);
|
|
||||||
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
|
ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
|
||||||
wait_on_buffer(bh);
|
if (ret) {
|
||||||
if (buffer_uptodate(bh))
|
|
||||||
return bh;
|
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
return ERR_PTR(-EIO);
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
return bh;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Read a contiguous batch of blocks. */
|
/* Read a contiguous batch of blocks. */
|
||||||
|
@ -910,11 +911,8 @@ int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
|
||||||
|
|
||||||
for (i = 0; i < bh_count; i++)
|
for (i = 0; i < bh_count; i++)
|
||||||
/* Note that NULL bhs[i] is valid because of holes. */
|
/* Note that NULL bhs[i] is valid because of holes. */
|
||||||
if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) {
|
if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
|
||||||
clear_buffer_verified(bhs[i]);
|
ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
|
||||||
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1,
|
|
||||||
&bhs[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!wait)
|
if (!wait)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1084,7 +1082,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||||
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
|
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
|
||||||
!buffer_unwritten(bh) &&
|
!buffer_unwritten(bh) &&
|
||||||
(block_start < from || block_end > to)) {
|
(block_start < from || block_end > to)) {
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
ext4_read_bh_lock(bh, 0, false);
|
||||||
wait[nr_wait++] = bh;
|
wait[nr_wait++] = bh;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3756,11 +3754,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
|
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
err = -EIO;
|
err = ext4_read_bh_lock(bh, 0, true);
|
||||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
if (err)
|
||||||
wait_on_buffer(bh);
|
|
||||||
/* Uhhuh. Read error. Complain and punt. */
|
|
||||||
if (!buffer_uptodate(bh))
|
|
||||||
goto unlock;
|
goto unlock;
|
||||||
if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
|
if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
|
||||||
/* We expect the key to be set. */
|
/* We expect the key to be set. */
|
||||||
|
@ -4404,9 +4399,7 @@ make_io:
|
||||||
* Read the block from disk.
|
* Read the block from disk.
|
||||||
*/
|
*/
|
||||||
trace_ext4_load_inode(inode);
|
trace_ext4_load_inode(inode);
|
||||||
get_bh(bh);
|
ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
|
||||||
bh->b_end_io = end_buffer_read_sync;
|
|
||||||
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
|
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
|
|
|
@ -85,15 +85,11 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
get_bh(*bh);
|
|
||||||
lock_buffer(*bh);
|
lock_buffer(*bh);
|
||||||
(*bh)->b_end_io = end_buffer_read_sync;
|
ret = ext4_read_bh(*bh, REQ_META | REQ_PRIO, NULL);
|
||||||
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh);
|
if (ret)
|
||||||
wait_on_buffer(*bh);
|
|
||||||
if (!buffer_uptodate(*bh)) {
|
|
||||||
ret = -EIO;
|
|
||||||
goto warn_exit;
|
goto warn_exit;
|
||||||
}
|
|
||||||
mmp = (struct mmp_struct *)((*bh)->b_data);
|
mmp = (struct mmp_struct *)((*bh)->b_data);
|
||||||
if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
|
if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) {
|
||||||
ret = -EFSCORRUPTED;
|
ret = -EFSCORRUPTED;
|
||||||
|
|
|
@ -215,7 +215,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
|
||||||
for (i = 0; i < nr; i++) {
|
for (i = 0; i < nr; i++) {
|
||||||
bh = arr[i];
|
bh = arr[i];
|
||||||
if (!bh_uptodate_or_lock(bh)) {
|
if (!bh_uptodate_or_lock(bh)) {
|
||||||
err = bh_submit_read(bh);
|
err = ext4_read_bh(bh, 0, NULL);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1245,7 +1245,7 @@ static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
|
||||||
if (unlikely(!bh))
|
if (unlikely(!bh))
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!bh_uptodate_or_lock(bh)) {
|
if (!bh_uptodate_or_lock(bh)) {
|
||||||
if (bh_submit_read(bh) < 0) {
|
if (ext4_read_bh(bh, 0, NULL) < 0) {
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -212,19 +212,21 @@ int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait)
|
||||||
struct buffer_head *
|
struct buffer_head *
|
||||||
ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
|
ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
|
||||||
{
|
{
|
||||||
struct buffer_head *bh = sb_getblk(sb, block);
|
struct buffer_head *bh;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
bh = sb_getblk(sb, block);
|
||||||
if (bh == NULL)
|
if (bh == NULL)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
if (ext4_buffer_uptodate(bh))
|
if (ext4_buffer_uptodate(bh))
|
||||||
return bh;
|
return bh;
|
||||||
clear_buffer_verified(bh);
|
|
||||||
ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
|
ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true);
|
||||||
wait_on_buffer(bh);
|
if (ret) {
|
||||||
if (buffer_uptodate(bh))
|
|
||||||
return bh;
|
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
return ERR_PTR(-EIO);
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
return bh;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_verify_csum_type(struct super_block *sb,
|
static int ext4_verify_csum_type(struct super_block *sb,
|
||||||
|
@ -5176,9 +5178,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
|
||||||
goto out_bdev;
|
goto out_bdev;
|
||||||
}
|
}
|
||||||
journal->j_private = sb;
|
journal->j_private = sb;
|
||||||
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
|
if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) {
|
||||||
wait_on_buffer(journal->j_sb_buffer);
|
|
||||||
if (!buffer_uptodate(journal->j_sb_buffer)) {
|
|
||||||
ext4_msg(sb, KERN_ERR, "I/O error on journal device");
|
ext4_msg(sb, KERN_ERR, "I/O error on journal device");
|
||||||
goto out_journal;
|
goto out_journal;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue