fs: Introduce aops->read_folio
Change all the callers of ->readpage to call ->read_folio in preference, if it exists. This is a transitional duplication, and will be removed by the end of the series. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
parent
6c2ae0d5db
commit
5efe7448a1
|
@ -2401,7 +2401,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
{
|
||||
struct address_space *mapping = filp->f_mapping;
|
||||
|
||||
if (!mapping->a_ops->readpage)
|
||||
if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
|
||||
return -ENOEXEC;
|
||||
|
||||
file_accessed(filp);
|
||||
|
|
|
@ -2824,7 +2824,10 @@ int nobh_truncate_page(struct address_space *mapping,
|
|||
|
||||
/* Ok, it's mapped. Make sure it's up-to-date */
|
||||
if (!folio_test_uptodate(folio)) {
|
||||
err = mapping->a_ops->readpage(NULL, &folio->page);
|
||||
if (mapping->a_ops->read_folio)
|
||||
err = mapping->a_ops->read_folio(NULL, folio);
|
||||
else
|
||||
err = mapping->a_ops->readpage(NULL, &folio->page);
|
||||
if (err) {
|
||||
folio_put(folio);
|
||||
goto out;
|
||||
|
|
|
@ -1772,7 +1772,7 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
{
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
|
||||
if (!mapping->a_ops->readpage)
|
||||
if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
|
||||
return -ENOEXEC;
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &ceph_vmops;
|
||||
|
|
|
@ -336,6 +336,7 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
|
|||
struct address_space_operations {
|
||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
||||
int (*readpage)(struct file *, struct page *);
|
||||
int (*read_folio)(struct file *, struct folio *);
|
||||
|
||||
/* Write back some dirty pages from this mapping. */
|
||||
int (*writepages)(struct address_space *, struct writeback_control *);
|
||||
|
|
|
@ -790,7 +790,7 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
|
|||
* and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
|
||||
* see uprobe_register().
|
||||
*/
|
||||
if (mapping->a_ops->readpage)
|
||||
if (mapping->a_ops->read_folio || mapping->a_ops->readpage)
|
||||
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
|
||||
else
|
||||
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
|
||||
|
@ -1143,7 +1143,9 @@ static int __uprobe_register(struct inode *inode, loff_t offset,
|
|||
return -EINVAL;
|
||||
|
||||
/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
|
||||
if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
|
||||
if (!inode->i_mapping->a_ops->read_folio &&
|
||||
!inode->i_mapping->a_ops->readpage &&
|
||||
!shmem_mapping(inode->i_mapping))
|
||||
return -EIO;
|
||||
/* Racy, just to catch the obvious mistakes */
|
||||
if (offset > i_size_read(inode))
|
||||
|
|
|
@ -2419,7 +2419,10 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
|
|||
*/
|
||||
folio_clear_error(folio);
|
||||
/* Start the actual read. The read will unlock the page. */
|
||||
error = mapping->a_ops->readpage(file, &folio->page);
|
||||
if (mapping->a_ops->read_folio)
|
||||
error = mapping->a_ops->read_folio(file, folio);
|
||||
else
|
||||
error = mapping->a_ops->readpage(file, &folio->page);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -3447,7 +3450,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
{
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
|
||||
if (!mapping->a_ops->readpage)
|
||||
if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage)
|
||||
return -ENOEXEC;
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &generic_file_vm_ops;
|
||||
|
@ -3505,6 +3508,8 @@ repeat:
|
|||
filler:
|
||||
if (filler)
|
||||
err = filler(data, &folio->page);
|
||||
else if (mapping->a_ops->read_folio)
|
||||
err = mapping->a_ops->read_folio(data, folio);
|
||||
else
|
||||
err = mapping->a_ops->readpage(data, &folio->page);
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* explicitly requested by the application. Readahead only ever
|
||||
* attempts to read folios that are not yet in the page cache. If a
|
||||
* folio is present but not up-to-date, readahead will not try to read
|
||||
* it. In that case a simple ->readpage() will be requested.
|
||||
* it. In that case a simple ->read_folio() will be requested.
|
||||
*
|
||||
* Readahead is triggered when an application read request (whether a
|
||||
* system call or a page fault) finds that the requested folio is not in
|
||||
|
@ -78,7 +78,7 @@
|
|||
* address space operation, for which mpage_readahead() is a canonical
|
||||
* implementation. ->readahead() should normally initiate reads on all
|
||||
* folios, but may fail to read any or all folios without causing an I/O
|
||||
* error. The page cache reading code will issue a ->readpage() request
|
||||
* error. The page cache reading code will issue a ->read_folio() request
|
||||
* for any folio which ->readahead() did not read, and only an error
|
||||
* from this will be final.
|
||||
*
|
||||
|
@ -110,7 +110,7 @@
|
|||
* were not fetched with readahead_folio(). This will allow a
|
||||
* subsequent synchronous readahead request to try them again. If they
|
||||
* are left in the page cache, then they will be read individually using
|
||||
* ->readpage() which may be less efficient.
|
||||
* ->read_folio() which may be less efficient.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
@ -170,8 +170,11 @@ static void read_pages(struct readahead_control *rac)
|
|||
}
|
||||
folio_unlock(folio);
|
||||
}
|
||||
} else if (aops->read_folio) {
|
||||
while ((folio = readahead_folio(rac)) != NULL)
|
||||
aops->read_folio(rac->file, folio);
|
||||
} else {
|
||||
while ((folio = readahead_folio(rac)))
|
||||
while ((folio = readahead_folio(rac)) != NULL)
|
||||
aops->readpage(rac->file, &folio->page);
|
||||
}
|
||||
|
||||
|
@ -302,7 +305,8 @@ void force_page_cache_ra(struct readahead_control *ractl,
|
|||
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||
unsigned long max_pages, index;
|
||||
|
||||
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead))
|
||||
if (unlikely(!mapping->a_ops->read_folio &&
|
||||
!mapping->a_ops->readpage && !mapping->a_ops->readahead))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
|
@ -3041,7 +3041,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|||
/*
|
||||
* Read the swap header.
|
||||
*/
|
||||
if (!mapping->a_ops->readpage) {
|
||||
if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) {
|
||||
error = -EINVAL;
|
||||
goto bad_swap_unlock_inode;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue