mm/writeback: Add folio_wait_writeback()
wait_on_page_writeback_killable() only has one caller, so convert it to call folio_wait_writeback_killable(). For the wait_on_page_writeback() callers, add a compatibility wrapper around folio_wait_writeback(). Turning PageWriteback() into folio_test_writeback() eliminates a call to compound_head() which saves 8 bytes and 15 bytes in the two functions. Unfortunately, that is more than offset by adding the wait_on_page_writeback compatibility wrapper for a net increase in text of 7 bytes. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Jeff Layton <jlayton@kernel.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: David Howells <dhowells@redhat.com>
This commit is contained in:
parent
4268b48077
commit
490e016f22
|
@ -861,7 +861,8 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
*/
|
||||
vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
|
||||
{
|
||||
struct page *page = thp_head(vmf->page);
|
||||
struct folio *folio = page_folio(vmf->page);
|
||||
struct page *page = &folio->page;
|
||||
struct file *file = vmf->vma->vm_file;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct afs_vnode *vnode = AFS_FS_I(inode);
|
||||
|
@ -884,7 +885,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
|
|||
goto out;
|
||||
#endif
|
||||
|
||||
if (wait_on_page_writeback_killable(page))
|
||||
if (folio_wait_writeback_killable(folio))
|
||||
goto out;
|
||||
|
||||
if (lock_page_killable(page) < 0)
|
||||
|
@ -894,8 +895,8 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
|
|||
* details the portion of the page we need to write back and we might
|
||||
* need to redirty the page if there's a problem.
|
||||
*/
|
||||
if (wait_on_page_writeback_killable(page) < 0) {
|
||||
unlock_page(page);
|
||||
if (folio_wait_writeback_killable(folio) < 0) {
|
||||
folio_unlock(folio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -766,7 +766,8 @@ static inline int wait_on_page_locked_killable(struct page *page)
|
|||
|
||||
int put_and_wait_on_page_locked(struct page *page, int state);
|
||||
void wait_on_page_writeback(struct page *page);
|
||||
int wait_on_page_writeback_killable(struct page *page);
|
||||
void folio_wait_writeback(struct folio *folio);
|
||||
int folio_wait_writeback_killable(struct folio *folio);
|
||||
void end_page_writeback(struct page *page);
|
||||
void folio_end_writeback(struct folio *folio);
|
||||
void wait_for_stable_page(struct page *page);
|
||||
|
|
|
@ -23,3 +23,9 @@ void end_page_writeback(struct page *page)
|
|||
return folio_end_writeback(page_folio(page));
|
||||
}
|
||||
EXPORT_SYMBOL(end_page_writeback);
|
||||
|
||||
void wait_on_page_writeback(struct page *page)
|
||||
{
|
||||
return folio_wait_writeback(page_folio(page));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wait_on_page_writeback);
|
||||
|
|
|
@ -2873,33 +2873,51 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
|
|||
}
|
||||
EXPORT_SYMBOL(__test_set_page_writeback);
|
||||
|
||||
/*
|
||||
* Wait for a page to complete writeback
|
||||
/**
|
||||
* folio_wait_writeback - Wait for a folio to finish writeback.
|
||||
* @folio: The folio to wait for.
|
||||
*
|
||||
* If the folio is currently being written back to storage, wait for the
|
||||
* I/O to complete.
|
||||
*
|
||||
* Context: Sleeps. Must be called in process context and with
|
||||
* no spinlocks held. Caller should hold a reference on the folio.
|
||||
* If the folio is not locked, writeback may start again after writeback
|
||||
* has finished.
|
||||
*/
|
||||
void wait_on_page_writeback(struct page *page)
|
||||
void folio_wait_writeback(struct folio *folio)
|
||||
{
|
||||
while (PageWriteback(page)) {
|
||||
trace_wait_on_page_writeback(page, page_mapping(page));
|
||||
wait_on_page_bit(page, PG_writeback);
|
||||
while (folio_test_writeback(folio)) {
|
||||
trace_wait_on_page_writeback(&folio->page, folio_mapping(folio));
|
||||
wait_on_page_bit(&folio->page, PG_writeback);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wait_on_page_writeback);
|
||||
EXPORT_SYMBOL_GPL(folio_wait_writeback);
|
||||
|
||||
/*
|
||||
* Wait for a page to complete writeback. Returns -EINTR if we get a
|
||||
* fatal signal while waiting.
|
||||
/**
|
||||
* folio_wait_writeback_killable - Wait for a folio to finish writeback.
|
||||
* @folio: The folio to wait for.
|
||||
*
|
||||
* If the folio is currently being written back to storage, wait for the
|
||||
* I/O to complete or a fatal signal to arrive.
|
||||
*
|
||||
* Context: Sleeps. Must be called in process context and with
|
||||
* no spinlocks held. Caller should hold a reference on the folio.
|
||||
* If the folio is not locked, writeback may start again after writeback
|
||||
* has finished.
|
||||
* Return: 0 on success, -EINTR if we get a fatal signal while waiting.
|
||||
*/
|
||||
int wait_on_page_writeback_killable(struct page *page)
|
||||
int folio_wait_writeback_killable(struct folio *folio)
|
||||
{
|
||||
while (PageWriteback(page)) {
|
||||
trace_wait_on_page_writeback(page, page_mapping(page));
|
||||
if (wait_on_page_bit_killable(page, PG_writeback))
|
||||
while (folio_test_writeback(folio)) {
|
||||
trace_wait_on_page_writeback(&folio->page, folio_mapping(folio));
|
||||
if (wait_on_page_bit_killable(&folio->page, PG_writeback))
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wait_on_page_writeback_killable);
|
||||
EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
|
||||
|
||||
/**
|
||||
* wait_for_stable_page() - wait for writeback to finish, if necessary.
|
||||
|
|
Loading…
Reference in New Issue