fs: have ll_rw_block users pass in op and flags separately
This has ll_rw_block users pass in the operation and flags separately, so ll_rw_block can setup the bio op and bi_rw flags on the bio that is submitted. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
2a222ca992
commit
dfec8a14fc
19
fs/buffer.c
19
fs/buffer.c
|
@ -588,7 +588,7 @@ void write_boundary_block(struct block_device *bdev,
|
|||
struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
|
||||
if (bh) {
|
||||
if (buffer_dirty(bh))
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
|
||||
put_bh(bh);
|
||||
}
|
||||
}
|
||||
|
@ -1395,7 +1395,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
|
|||
{
|
||||
struct buffer_head *bh = __getblk(bdev, block, size);
|
||||
if (likely(bh)) {
|
||||
ll_rw_block(READA, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, READA, 1, &bh);
|
||||
brelse(bh);
|
||||
}
|
||||
}
|
||||
|
@ -1955,7 +1955,7 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
|||
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
|
||||
!buffer_unwritten(bh) &&
|
||||
(block_start < from || block_end > to)) {
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
*wait_bh++=bh;
|
||||
}
|
||||
}
|
||||
|
@ -2852,7 +2852,7 @@ int block_truncate_page(struct address_space *mapping,
|
|||
|
||||
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
|
||||
err = -EIO;
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
wait_on_buffer(bh);
|
||||
/* Uhhuh. Read error. Complain and punt. */
|
||||
if (!buffer_uptodate(bh))
|
||||
|
@ -3051,7 +3051,8 @@ EXPORT_SYMBOL(submit_bh);
|
|||
|
||||
/**
|
||||
* ll_rw_block: low-level access to block devices (DEPRECATED)
|
||||
* @rw: whether to %READ or %WRITE or maybe %READA (readahead)
|
||||
* @op: whether to %READ or %WRITE
|
||||
* @op_flags: rq_flag_bits or %READA (readahead)
|
||||
* @nr: number of &struct buffer_heads in the array
|
||||
* @bhs: array of pointers to &struct buffer_head
|
||||
*
|
||||
|
@ -3074,7 +3075,7 @@ EXPORT_SYMBOL(submit_bh);
|
|||
* All of the buffers must be for the same device, and must also be a
|
||||
* multiple of the current approved size for the device.
|
||||
*/
|
||||
void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
|
||||
void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -3083,18 +3084,18 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
|
|||
|
||||
if (!trylock_buffer(bh))
|
||||
continue;
|
||||
if (rw == WRITE) {
|
||||
if (op == WRITE) {
|
||||
if (test_clear_buffer_dirty(bh)) {
|
||||
bh->b_end_io = end_buffer_write_sync;
|
||||
get_bh(bh);
|
||||
submit_bh(rw, 0, bh);
|
||||
submit_bh(op, op_flags, bh);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
if (!buffer_uptodate(bh)) {
|
||||
bh->b_end_io = end_buffer_read_sync;
|
||||
get_bh(bh);
|
||||
submit_bh(rw, 0, bh);
|
||||
submit_bh(op, op_flags, bh);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -981,7 +981,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
|
|||
return bh;
|
||||
if (!bh || buffer_uptodate(bh))
|
||||
return bh;
|
||||
ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
|
||||
wait_on_buffer(bh);
|
||||
if (buffer_uptodate(bh))
|
||||
return bh;
|
||||
|
@ -1135,7 +1135,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
|
|||
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
|
||||
!buffer_unwritten(bh) &&
|
||||
(block_start < from || block_end > to)) {
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
*wait_bh++ = bh;
|
||||
decrypt = ext4_encrypted_inode(inode) &&
|
||||
S_ISREG(inode->i_mode);
|
||||
|
@ -3698,7 +3698,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
|
|||
|
||||
if (!buffer_uptodate(bh)) {
|
||||
err = -EIO;
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
wait_on_buffer(bh);
|
||||
/* Uhhuh. Read error. Complain and punt. */
|
||||
if (!buffer_uptodate(bh))
|
||||
|
|
|
@ -1443,7 +1443,8 @@ restart:
|
|||
}
|
||||
bh_use[ra_max] = bh;
|
||||
if (bh)
|
||||
ll_rw_block(READ | REQ_META | REQ_PRIO,
|
||||
ll_rw_block(REQ_OP_READ,
|
||||
REQ_META | REQ_PRIO,
|
||||
1, &bh);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4204,7 +4204,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
|
|||
goto out_bdev;
|
||||
}
|
||||
journal->j_private = sb;
|
||||
ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
|
||||
ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
|
||||
wait_on_buffer(journal->j_sb_buffer);
|
||||
if (!buffer_uptodate(journal->j_sb_buffer)) {
|
||||
ext4_msg(sb, KERN_ERR, "I/O error on journal device");
|
||||
|
|
|
@ -974,7 +974,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
|
|||
|
||||
if (!buffer_uptodate(bh)) {
|
||||
err = -EIO;
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
wait_on_buffer(bh);
|
||||
/* Uhhuh. Read error. Complain and punt. */
|
||||
if (!buffer_uptodate(bh))
|
||||
|
|
|
@ -449,7 +449,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
|||
if (buffer_uptodate(first_bh))
|
||||
goto out;
|
||||
if (!buffer_locked(first_bh))
|
||||
ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
|
||||
ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh);
|
||||
|
||||
dblock++;
|
||||
extlen--;
|
||||
|
@ -458,7 +458,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
|||
bh = gfs2_getbuf(gl, dblock, CREATE);
|
||||
|
||||
if (!buffer_uptodate(bh) && !buffer_locked(bh))
|
||||
ll_rw_block(READA | REQ_META, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, READA | REQ_META, 1, &bh);
|
||||
brelse(bh);
|
||||
dblock++;
|
||||
extlen--;
|
||||
|
|
|
@ -730,7 +730,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
|
|||
if (PageUptodate(page))
|
||||
set_buffer_uptodate(bh);
|
||||
if (!buffer_uptodate(bh)) {
|
||||
ll_rw_block(READ | REQ_META, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, REQ_META, 1, &bh);
|
||||
wait_on_buffer(bh);
|
||||
if (!buffer_uptodate(bh))
|
||||
goto unlock_out;
|
||||
|
|
|
@ -81,7 +81,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
|||
blocknum = block_start >> bufshift;
|
||||
memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
|
||||
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
|
||||
ll_rw_block(READ, haveblocks, bhs);
|
||||
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
|
||||
|
||||
curbh = 0;
|
||||
curpage = 0;
|
||||
|
|
|
@ -1498,7 +1498,7 @@ static int journal_get_superblock(journal_t *journal)
|
|||
|
||||
J_ASSERT(bh != NULL);
|
||||
if (!buffer_uptodate(bh)) {
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
wait_on_buffer(bh);
|
||||
if (!buffer_uptodate(bh)) {
|
||||
printk(KERN_ERR
|
||||
|
|
|
@ -104,7 +104,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
|
|||
if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
|
||||
bufs[nbufs++] = bh;
|
||||
if (nbufs == MAXBUF) {
|
||||
ll_rw_block(READ, nbufs, bufs);
|
||||
ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
|
||||
journal_brelse_array(bufs, nbufs);
|
||||
nbufs = 0;
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
|
|||
}
|
||||
|
||||
if (nbufs)
|
||||
ll_rw_block(READ, nbufs, bufs);
|
||||
ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
|
||||
err = 0;
|
||||
|
||||
failed:
|
||||
|
|
|
@ -640,7 +640,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
|
|||
!buffer_new(bh) &&
|
||||
ocfs2_should_read_blk(inode, page, block_start) &&
|
||||
(block_start < from || block_end > to)) {
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
*wait_bh++=bh;
|
||||
}
|
||||
|
||||
|
|
|
@ -1819,7 +1819,7 @@ static int ocfs2_get_sector(struct super_block *sb,
|
|||
if (!buffer_dirty(*bh))
|
||||
clear_buffer_uptodate(*bh);
|
||||
unlock_buffer(*bh);
|
||||
ll_rw_block(READ, 1, bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, bh);
|
||||
wait_on_buffer(*bh);
|
||||
if (!buffer_uptodate(*bh)) {
|
||||
mlog_errno(-EIO);
|
||||
|
|
|
@ -870,7 +870,7 @@ loop_next:
|
|||
*/
|
||||
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
|
||||
spin_unlock(lock);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
|
||||
spin_lock(lock);
|
||||
}
|
||||
put_bh(bh);
|
||||
|
@ -1057,7 +1057,7 @@ static int flush_commit_list(struct super_block *s,
|
|||
if (tbh) {
|
||||
if (buffer_dirty(tbh)) {
|
||||
depth = reiserfs_write_unlock_nested(s);
|
||||
ll_rw_block(WRITE, 1, &tbh);
|
||||
ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh);
|
||||
reiserfs_write_lock_nested(s, depth);
|
||||
}
|
||||
put_bh(tbh) ;
|
||||
|
@ -2244,7 +2244,7 @@ abort_replay:
|
|||
}
|
||||
}
|
||||
/* read in the log blocks, memcpy to the corresponding real block */
|
||||
ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
|
||||
ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks);
|
||||
for (i = 0; i < get_desc_trans_len(desc); i++) {
|
||||
|
||||
wait_on_buffer(log_blocks[i]);
|
||||
|
@ -2346,7 +2346,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
|
|||
} else
|
||||
bhlist[j++] = bh;
|
||||
}
|
||||
ll_rw_block(READ, j, bhlist);
|
||||
ll_rw_block(REQ_OP_READ, 0, j, bhlist);
|
||||
for (i = 1; i < j; i++)
|
||||
brelse(bhlist[i]);
|
||||
bh = bhlist[0];
|
||||
|
|
|
@ -551,7 +551,7 @@ static int search_by_key_reada(struct super_block *s,
|
|||
if (!buffer_uptodate(bh[j])) {
|
||||
if (depth == -1)
|
||||
depth = reiserfs_write_unlock_nested(s);
|
||||
ll_rw_block(READA, 1, bh + j);
|
||||
ll_rw_block(REQ_OP_READ, READA, 1, bh + j);
|
||||
}
|
||||
brelse(bh[j]);
|
||||
}
|
||||
|
@ -660,7 +660,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
|
|||
if (!buffer_uptodate(bh) && depth == -1)
|
||||
depth = reiserfs_write_unlock_nested(sb);
|
||||
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
wait_on_buffer(bh);
|
||||
|
||||
if (depth != -1)
|
||||
|
|
|
@ -1661,7 +1661,7 @@ static int read_super_block(struct super_block *s, int offset)
|
|||
/* after journal replay, reread all bitmap and super blocks */
|
||||
static int reread_meta_blocks(struct super_block *s)
|
||||
{
|
||||
ll_rw_block(READ, 1, &SB_BUFFER_WITH_SB(s));
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &SB_BUFFER_WITH_SB(s));
|
||||
wait_on_buffer(SB_BUFFER_WITH_SB(s));
|
||||
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
|
||||
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
|
||||
|
|
|
@ -124,7 +124,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
|
|||
goto block_release;
|
||||
bytes += msblk->devblksize;
|
||||
}
|
||||
ll_rw_block(READ, b, bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, b, bh);
|
||||
} else {
|
||||
/*
|
||||
* Metadata block.
|
||||
|
@ -156,7 +156,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
|
|||
goto block_release;
|
||||
bytes += msblk->devblksize;
|
||||
}
|
||||
ll_rw_block(READ, b - 1, bh + 1);
|
||||
ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1);
|
||||
}
|
||||
|
||||
for (i = 0; i < b; i++) {
|
||||
|
|
|
@ -113,7 +113,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
|
|||
brelse(tmp);
|
||||
}
|
||||
if (num) {
|
||||
ll_rw_block(READA, num, bha);
|
||||
ll_rw_block(REQ_OP_READ, READA, num, bha);
|
||||
for (i = 0; i < num; i++)
|
||||
brelse(bha[i]);
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
|
|||
brelse(tmp);
|
||||
}
|
||||
if (num) {
|
||||
ll_rw_block(READA, num, bha);
|
||||
ll_rw_block(REQ_OP_READ, READA, num, bha);
|
||||
for (i = 0; i < num; i++)
|
||||
brelse(bha[i]);
|
||||
}
|
||||
|
|
|
@ -1199,7 +1199,7 @@ struct buffer_head *udf_bread(struct inode *inode, int block,
|
|||
if (buffer_uptodate(bh))
|
||||
return bh;
|
||||
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
|
||||
wait_on_buffer(bh);
|
||||
if (buffer_uptodate(bh))
|
||||
|
|
|
@ -292,7 +292,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
|
|||
if (!buffer_mapped(bh))
|
||||
map_bh(bh, inode->i_sb, oldb + pos);
|
||||
if (!buffer_uptodate(bh)) {
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
|
||||
wait_on_buffer(bh);
|
||||
if (!buffer_uptodate(bh)) {
|
||||
ufs_error(inode->i_sb, __func__,
|
||||
|
|
|
@ -187,7 +187,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
|
|||
void free_buffer_head(struct buffer_head * bh);
|
||||
void unlock_buffer(struct buffer_head *bh);
|
||||
void __lock_buffer(struct buffer_head *bh);
|
||||
void ll_rw_block(int, int, struct buffer_head * bh[]);
|
||||
void ll_rw_block(int, int, int, struct buffer_head * bh[]);
|
||||
int sync_dirty_buffer(struct buffer_head *bh);
|
||||
int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
|
||||
void write_dirty_buffer(struct buffer_head *bh, int op_flags);
|
||||
|
|
Loading…
Reference in New Issue