udf: replace ll_rw_block()
ll_rw_block() is not safe for the sync read path because it cannot guarantee that submitting read IO if the buffer has been locked. We could get false positive EIO after wait_on_buffer() if the buffer has been locked by others. So stop using ll_rw_block(). We also switch to new bh_readahead_batch() helper for the buffer array readahead path. Link: https://lkml.kernel.org/r/20220901133505.2510834-11-yi.zhang@huawei.com Signed-off-by: Zhang Yi <yi.zhang@huawei.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d554822e82
commit
59a16786fa
|
@ -130,7 +130,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
|
|||
brelse(tmp);
|
||||
}
|
||||
if (num) {
|
||||
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha);
|
||||
bh_readahead_batch(num, bha, REQ_RAHEAD);
|
||||
for (i = 0; i < num; i++)
|
||||
brelse(bha[i]);
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
|
|||
brelse(tmp);
|
||||
}
|
||||
if (num) {
|
||||
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha);
|
||||
bh_readahead_batch(num, bha, REQ_RAHEAD);
|
||||
for (i = 0; i < num; i++)
|
||||
brelse(bha[i]);
|
||||
}
|
||||
|
|
|
@ -1211,13 +1211,7 @@ struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
|
|||
if (!bh)
|
||||
return NULL;
|
||||
|
||||
if (buffer_uptodate(bh))
|
||||
return bh;
|
||||
|
||||
ll_rw_block(REQ_OP_READ, 1, &bh);
|
||||
|
||||
wait_on_buffer(bh);
|
||||
if (buffer_uptodate(bh))
|
||||
if (bh_read(bh, 0) >= 0)
|
||||
return bh;
|
||||
|
||||
brelse(bh);
|
||||
|
|
Loading…
Reference in New Issue