shmem: convert shmem_getpage_gfp() to shmem_get_folio_gfp()
Add a shmem_getpage_gfp() wrapper for compatibility with current users. Link: https://lkml.kernel.org/r/20220902194653.1739778-22-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5739a81cf8
commit
fc26babbc7
mm
66
mm/shmem.c
66
mm/shmem.c
|
@ -139,17 +139,6 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
||||||
struct folio **foliop, enum sgp_type sgp,
|
struct folio **foliop, enum sgp_type sgp,
|
||||||
gfp_t gfp, struct vm_area_struct *vma,
|
gfp_t gfp, struct vm_area_struct *vma,
|
||||||
vm_fault_t *fault_type);
|
vm_fault_t *fault_type);
|
||||||
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
|
||||||
struct page **pagep, enum sgp_type sgp,
|
|
||||||
gfp_t gfp, struct vm_area_struct *vma,
|
|
||||||
struct vm_fault *vmf, vm_fault_t *fault_type);
|
|
||||||
|
|
||||||
int shmem_getpage(struct inode *inode, pgoff_t index,
|
|
||||||
struct page **pagep, enum sgp_type sgp)
|
|
||||||
{
|
|
||||||
return shmem_getpage_gfp(inode, index, pagep, sgp,
|
|
||||||
mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
|
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
|
||||||
{
|
{
|
||||||
|
@ -1595,7 +1584,7 @@ failed:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When a page is moved from swapcache to shmem filecache (either by the
|
* When a page is moved from swapcache to shmem filecache (either by the
|
||||||
* usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
|
* usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
|
||||||
* shmem_unuse_inode()), it may have been read in earlier from swap, in
|
* shmem_unuse_inode()), it may have been read in earlier from swap, in
|
||||||
* ignorance of the mapping it belongs to. If that mapping has special
|
* ignorance of the mapping it belongs to. If that mapping has special
|
||||||
* constraints (like the gma500 GEM driver, which requires RAM below 4GB),
|
* constraints (like the gma500 GEM driver, which requires RAM below 4GB),
|
||||||
|
@ -1812,7 +1801,7 @@ unlock:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* shmem_getpage_gfp - find page in cache, or get from swap, or allocate
|
* shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
|
||||||
*
|
*
|
||||||
* If we allocate a new one we do not mark it dirty. That's up to the
|
* If we allocate a new one we do not mark it dirty. That's up to the
|
||||||
* vm. If we swap it in we mark it dirty since we also free the swap
|
* vm. If we swap it in we mark it dirty since we also free the swap
|
||||||
|
@ -1821,8 +1810,8 @@ unlock:
|
||||||
* vma, vmf, and fault_type are only supplied by shmem_fault:
|
* vma, vmf, and fault_type are only supplied by shmem_fault:
|
||||||
* otherwise they are NULL.
|
* otherwise they are NULL.
|
||||||
*/
|
*/
|
||||||
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
|
||||||
struct page **pagep, enum sgp_type sgp, gfp_t gfp,
|
struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
|
||||||
struct vm_area_struct *vma, struct vm_fault *vmf,
|
struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||||
vm_fault_t *fault_type)
|
vm_fault_t *fault_type)
|
||||||
{
|
{
|
||||||
|
@ -1864,7 +1853,7 @@ repeat:
|
||||||
if (error == -EEXIST)
|
if (error == -EEXIST)
|
||||||
goto repeat;
|
goto repeat;
|
||||||
|
|
||||||
*pagep = &folio->page;
|
*foliop = folio;
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1874,7 +1863,7 @@ repeat:
|
||||||
folio_mark_accessed(folio);
|
folio_mark_accessed(folio);
|
||||||
if (folio_test_uptodate(folio))
|
if (folio_test_uptodate(folio))
|
||||||
goto out;
|
goto out;
|
||||||
/* fallocated page */
|
/* fallocated folio */
|
||||||
if (sgp != SGP_READ)
|
if (sgp != SGP_READ)
|
||||||
goto clear;
|
goto clear;
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
|
@ -1882,10 +1871,10 @@ repeat:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SGP_READ: succeed on hole, with NULL page, letting caller zero.
|
* SGP_READ: succeed on hole, with NULL folio, letting caller zero.
|
||||||
* SGP_NOALLOC: fail on hole, with NULL page, letting caller fail.
|
* SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
|
||||||
*/
|
*/
|
||||||
*pagep = NULL;
|
*foliop = NULL;
|
||||||
if (sgp == SGP_READ)
|
if (sgp == SGP_READ)
|
||||||
return 0;
|
return 0;
|
||||||
if (sgp == SGP_NOALLOC)
|
if (sgp == SGP_NOALLOC)
|
||||||
|
@ -1918,7 +1907,7 @@ alloc_nohuge:
|
||||||
if (error != -ENOSPC)
|
if (error != -ENOSPC)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
/*
|
/*
|
||||||
* Try to reclaim some space by splitting a huge page
|
* Try to reclaim some space by splitting a large folio
|
||||||
* beyond i_size on the filesystem.
|
* beyond i_size on the filesystem.
|
||||||
*/
|
*/
|
||||||
while (retry--) {
|
while (retry--) {
|
||||||
|
@ -1954,9 +1943,9 @@ alloc_nohuge:
|
||||||
|
|
||||||
if (folio_test_pmd_mappable(folio) &&
|
if (folio_test_pmd_mappable(folio) &&
|
||||||
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
|
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
|
||||||
hindex + HPAGE_PMD_NR - 1) {
|
folio_next_index(folio) - 1) {
|
||||||
/*
|
/*
|
||||||
* Part of the huge page is beyond i_size: subject
|
* Part of the large folio is beyond i_size: subject
|
||||||
* to shrink under memory pressure.
|
* to shrink under memory pressure.
|
||||||
*/
|
*/
|
||||||
spin_lock(&sbinfo->shrinklist_lock);
|
spin_lock(&sbinfo->shrinklist_lock);
|
||||||
|
@ -1973,14 +1962,14 @@ alloc_nohuge:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
|
* Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
|
||||||
*/
|
*/
|
||||||
if (sgp == SGP_FALLOC)
|
if (sgp == SGP_FALLOC)
|
||||||
sgp = SGP_WRITE;
|
sgp = SGP_WRITE;
|
||||||
clear:
|
clear:
|
||||||
/*
|
/*
|
||||||
* Let SGP_WRITE caller clear ends if write does not fill page;
|
* Let SGP_WRITE caller clear ends if write does not fill folio;
|
||||||
* but SGP_FALLOC on a page fallocated earlier must initialize
|
* but SGP_FALLOC on a folio fallocated earlier must initialize
|
||||||
* it now, lest undo on failure cancel our earlier guarantee.
|
* it now, lest undo on failure cancel our earlier guarantee.
|
||||||
*/
|
*/
|
||||||
if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
|
if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
|
||||||
|
@ -2006,7 +1995,7 @@ clear:
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
*pagep = folio_page(folio, index - hindex);
|
*foliop = folio;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2036,6 +2025,29 @@ unlock:
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
||||||
|
struct page **pagep, enum sgp_type sgp,
|
||||||
|
gfp_t gfp, struct vm_area_struct *vma,
|
||||||
|
struct vm_fault *vmf, vm_fault_t *fault_type)
|
||||||
|
{
|
||||||
|
struct folio *folio = NULL;
|
||||||
|
int ret = shmem_get_folio_gfp(inode, index, &folio, sgp, gfp, vma,
|
||||||
|
vmf, fault_type);
|
||||||
|
|
||||||
|
if (folio)
|
||||||
|
*pagep = folio_file_page(folio, index);
|
||||||
|
else
|
||||||
|
*pagep = NULL;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int shmem_getpage(struct inode *inode, pgoff_t index,
|
||||||
|
struct page **pagep, enum sgp_type sgp)
|
||||||
|
{
|
||||||
|
return shmem_getpage_gfp(inode, index, pagep, sgp,
|
||||||
|
mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is like autoremove_wake_function, but it removes the wait queue
|
* This is like autoremove_wake_function, but it removes the wait queue
|
||||||
* entry unconditionally - even if something else had already woken the
|
* entry unconditionally - even if something else had already woken the
|
||||||
|
|
Loading…
Reference in New Issue