mm/shmem: convert shmem_alloc_and_acct_page to use a folio

Convert shmem_alloc_hugepage() to return the folio that it uses and use a
folio throughout shmem_alloc_and_acct_page().  Continue to return a page
from shmem_alloc_and_acct_page() for now.

Link: https://lkml.kernel.org/r/20220504182857.4013401-22-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-05-12 20:23:04 -07:00 committed by Andrew Morton
parent 0c023ef52d
commit 72827e5c2b
1 changed files with 9 additions and 9 deletions

View File

@ -1523,7 +1523,7 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
return result; return result;
} }
static struct page *shmem_alloc_hugepage(gfp_t gfp, static struct folio *shmem_alloc_hugefolio(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index) struct shmem_inode_info *info, pgoff_t index)
{ {
struct vm_area_struct pvma; struct vm_area_struct pvma;
@ -1541,7 +1541,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
shmem_pseudo_vma_destroy(&pvma); shmem_pseudo_vma_destroy(&pvma);
if (!folio) if (!folio)
count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK);
return &folio->page; return folio;
} }
static struct folio *shmem_alloc_folio(gfp_t gfp, static struct folio *shmem_alloc_folio(gfp_t gfp,
@ -1568,7 +1568,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
pgoff_t index, bool huge) pgoff_t index, bool huge)
{ {
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
struct page *page; struct folio *folio;
int nr; int nr;
int err = -ENOSPC; int err = -ENOSPC;
@ -1580,13 +1580,13 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
goto failed; goto failed;
if (huge) if (huge)
page = shmem_alloc_hugepage(gfp, info, index); folio = shmem_alloc_hugefolio(gfp, info, index);
else else
page = shmem_alloc_page(gfp, info, index); folio = shmem_alloc_folio(gfp, info, index);
if (page) { if (folio) {
__SetPageLocked(page); __folio_set_locked(folio);
__SetPageSwapBacked(page); __folio_set_swapbacked(folio);
return page; return &folio->page;
} }
err = -ENOMEM; err = -ENOMEM;