fs: Convert __set_page_dirty_no_writeback to noop_dirty_folio
This is a mechanical change. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs Tested-by: David Howells <dhowells@redhat.com> # afs
This commit is contained in:
parent
e621900ad2
commit
46de8b9794
|
@ -346,7 +346,7 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
|
|||
}
|
||||
|
||||
static const struct address_space_operations dev_dax_aops = {
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
};
|
||||
|
||||
static int dax_open(struct inode *inode, struct file *filp)
|
||||
|
|
2
fs/aio.c
2
fs/aio.c
|
@ -478,7 +478,7 @@ out:
|
|||
#endif
|
||||
|
||||
static const struct address_space_operations aio_ctx_aops = {
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
#if IS_ENABLED(CONFIG_MIGRATION)
|
||||
.migratepage = aio_migratepage,
|
||||
#endif
|
||||
|
|
|
@ -1000,7 +1000,7 @@ const struct address_space_operations ext2_nobh_aops = {
|
|||
static const struct address_space_operations ext2_dax_aops = {
|
||||
.writepages = ext2_dax_writepages,
|
||||
.direct_IO = noop_direct_IO,
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -3630,7 +3630,7 @@ static const struct address_space_operations ext4_da_aops = {
|
|||
static const struct address_space_operations ext4_dax_aops = {
|
||||
.writepages = ext4_dax_writepages,
|
||||
.direct_IO = noop_direct_IO,
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
.bmap = ext4_bmap,
|
||||
.swap_activate = ext4_iomap_swap_activate,
|
||||
};
|
||||
|
|
|
@ -1326,7 +1326,7 @@ bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
|
|||
static const struct address_space_operations fuse_dax_file_aops = {
|
||||
.writepages = fuse_dax_writepages,
|
||||
.direct_IO = noop_direct_IO,
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
};
|
||||
|
||||
static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags)
|
||||
|
|
|
@ -1144,7 +1144,7 @@ static void hugetlbfs_destroy_inode(struct inode *inode)
|
|||
static const struct address_space_operations hugetlbfs_aops = {
|
||||
.write_begin = hugetlbfs_write_begin,
|
||||
.write_end = hugetlbfs_write_end,
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
.migratepage = hugetlbfs_migrate_page,
|
||||
.error_remove_page = hugetlbfs_error_remove_page,
|
||||
};
|
||||
|
|
|
@ -631,7 +631,7 @@ const struct address_space_operations ram_aops = {
|
|||
.readpage = simple_readpage,
|
||||
.write_begin = simple_write_begin,
|
||||
.write_end = simple_write_end,
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
};
|
||||
EXPORT_SYMBOL(ram_aops);
|
||||
|
||||
|
@ -1220,7 +1220,7 @@ EXPORT_SYMBOL(kfree_link);
|
|||
struct inode *alloc_anon_inode(struct super_block *s)
|
||||
{
|
||||
static const struct address_space_operations anon_aops = {
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
};
|
||||
struct inode *inode = new_inode_pseudo(s);
|
||||
|
||||
|
|
|
@ -581,6 +581,6 @@ const struct address_space_operations xfs_address_space_operations = {
|
|||
const struct address_space_operations xfs_dax_aops = {
|
||||
.writepages = xfs_dax_writepages,
|
||||
.direct_IO = noop_direct_IO,
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
.swap_activate = xfs_iomap_swapfile_activate,
|
||||
};
|
||||
|
|
|
@ -919,7 +919,7 @@ static inline int __must_check write_one_page(struct page *page)
|
|||
}
|
||||
|
||||
int __set_page_dirty_nobuffers(struct page *page);
|
||||
int __set_page_dirty_no_writeback(struct page *page);
|
||||
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
|
||||
|
||||
void page_endio(struct page *page, bool is_write, int err);
|
||||
|
||||
|
|
|
@ -2430,13 +2430,13 @@ EXPORT_SYMBOL(folio_write_one);
|
|||
/*
|
||||
* For address_spaces which do not use buffers nor write back.
|
||||
*/
|
||||
int __set_page_dirty_no_writeback(struct page *page)
|
||||
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
|
||||
{
|
||||
if (!PageDirty(page))
|
||||
return !TestSetPageDirty(page);
|
||||
return 0;
|
||||
if (!folio_test_dirty(folio))
|
||||
return !folio_test_set_dirty(folio);
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(__set_page_dirty_no_writeback);
|
||||
EXPORT_SYMBOL(noop_dirty_folio);
|
||||
|
||||
/*
|
||||
* Helper function for set_page_dirty family.
|
||||
|
|
|
@ -453,6 +453,6 @@ bool swap_dirty_folio(struct address_space *mapping, struct folio *folio)
|
|||
return aops->dirty_folio(mapping, folio);
|
||||
return aops->set_page_dirty(&folio->page);
|
||||
} else {
|
||||
return __set_page_dirty_no_writeback(&folio->page);
|
||||
return noop_dirty_folio(mapping, folio);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -152,7 +152,7 @@ static void secretmem_freepage(struct page *page)
|
|||
}
|
||||
|
||||
const struct address_space_operations secretmem_aops = {
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
.freepage = secretmem_freepage,
|
||||
.migratepage = secretmem_migratepage,
|
||||
.isolate_page = secretmem_isolate_page,
|
||||
|
|
|
@ -3753,7 +3753,7 @@ static int shmem_error_remove_page(struct address_space *mapping,
|
|||
|
||||
const struct address_space_operations shmem_aops = {
|
||||
.writepage = shmem_writepage,
|
||||
.set_page_dirty = __set_page_dirty_no_writeback,
|
||||
.dirty_folio = noop_dirty_folio,
|
||||
#ifdef CONFIG_TMPFS
|
||||
.write_begin = shmem_write_begin,
|
||||
.write_end = shmem_write_end,
|
||||
|
|
Loading…
Reference in New Issue