fs/buffer: Use the new blk_opf_t type
Improve static type checking by using the new blk_opf_t type for block layer request flags. Change WRITE into REQ_OP_WRITE. This patch does not change any functionality since REQ_OP_WRITE == WRITE == 1. Reviewed-by: Jan Kara <jack@suse.cz> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20220714180729.1065367-47-bvanassche@acm.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f8e6e4bd9f
commit
3ae7286943
21
fs/buffer.c
21
fs/buffer.c
|
@ -52,8 +52,8 @@
|
|||
#include "internal.h"
|
||||
|
||||
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
|
||||
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
struct writeback_control *wbc);
|
||||
static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
|
||||
struct buffer_head *bh, struct writeback_control *wbc);
|
||||
|
||||
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
|
||||
|
||||
|
@ -1716,7 +1716,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
|
|||
struct buffer_head *bh, *head;
|
||||
unsigned int blocksize, bbits;
|
||||
int nr_underway = 0;
|
||||
int write_flags = wbc_to_write_flags(wbc);
|
||||
blk_opf_t write_flags = wbc_to_write_flags(wbc);
|
||||
|
||||
head = create_page_buffers(page, inode,
|
||||
(1 << BH_Dirty)|(1 << BH_Uptodate));
|
||||
|
@ -2994,8 +2994,8 @@ static void end_bio_bh_io_sync(struct bio *bio)
|
|||
bio_put(bio);
|
||||
}
|
||||
|
||||
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
struct writeback_control *wbc)
|
||||
static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
|
||||
struct buffer_head *bh, struct writeback_control *wbc)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
|
@ -3040,7 +3040,7 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int submit_bh(int op, int op_flags, struct buffer_head *bh)
|
||||
int submit_bh(enum req_op op, blk_opf_t op_flags, struct buffer_head *bh)
|
||||
{
|
||||
return submit_bh_wbc(op, op_flags, bh, NULL);
|
||||
}
|
||||
|
@ -3072,7 +3072,8 @@ EXPORT_SYMBOL(submit_bh);
|
|||
* All of the buffers must be for the same device, and must also be a
|
||||
* multiple of the current approved size for the device.
|
||||
*/
|
||||
void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
|
||||
void ll_rw_block(enum req_op op, blk_opf_t op_flags, int nr,
|
||||
struct buffer_head *bhs[])
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -3081,7 +3082,7 @@ void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
|
|||
|
||||
if (!trylock_buffer(bh))
|
||||
continue;
|
||||
if (op == WRITE) {
|
||||
if (op == REQ_OP_WRITE) {
|
||||
if (test_clear_buffer_dirty(bh)) {
|
||||
bh->b_end_io = end_buffer_write_sync;
|
||||
get_bh(bh);
|
||||
|
@ -3101,7 +3102,7 @@ void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
|
|||
}
|
||||
EXPORT_SYMBOL(ll_rw_block);
|
||||
|
||||
void write_dirty_buffer(struct buffer_head *bh, int op_flags)
|
||||
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
|
||||
{
|
||||
lock_buffer(bh);
|
||||
if (!test_clear_buffer_dirty(bh)) {
|
||||
|
@ -3119,7 +3120,7 @@ EXPORT_SYMBOL(write_dirty_buffer);
|
|||
* and then start new I/O and then wait upon it. The caller must have a ref on
|
||||
* the buffer_head.
|
||||
*/
|
||||
int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
|
||||
int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#define _LINUX_BUFFER_HEAD_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/blk_types.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
@ -201,11 +202,11 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
|
|||
void free_buffer_head(struct buffer_head * bh);
|
||||
void unlock_buffer(struct buffer_head *bh);
|
||||
void __lock_buffer(struct buffer_head *bh);
|
||||
void ll_rw_block(int, int, int, struct buffer_head * bh[]);
|
||||
void ll_rw_block(enum req_op, blk_opf_t, int, struct buffer_head * bh[]);
|
||||
int sync_dirty_buffer(struct buffer_head *bh);
|
||||
int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
|
||||
void write_dirty_buffer(struct buffer_head *bh, int op_flags);
|
||||
int submit_bh(int, int, struct buffer_head *);
|
||||
int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
|
||||
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
|
||||
int submit_bh(enum req_op, blk_opf_t, struct buffer_head *);
|
||||
void write_boundary_block(struct block_device *bdev,
|
||||
sector_t bblock, unsigned blocksize);
|
||||
int bh_uptodate_or_lock(struct buffer_head *bh);
|
||||
|
|
Loading…
Reference in New Issue