Three folio-related fixes for 5.18:
- Remove the migration code's assumptions about large pages being PMD sized - Don't call pmd_page() on a non-leaf PMD - Fix handling of hugetlbfs pages in page_vma_mapped_walk -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAmJQV0cACgkQDpNsjXcp gj7I6wf7BYsZWixhemyz0yABRzM0Cc1+X9UE3uFctFyeB4EkjIQihTljwqk439vt VkXmJ6LnIwmOrOLg7Q9j9NNqk8/IbbGmrPXD5MobDc68ODFv3IssOWTGcXGiXuDS irlh4rEyPg/4kk/cL/cVfb9UvUhzg+gaq8Ev6lnLkK3xEIYr2kazoN3XpuZN/YoC 0Fz52SjLDJ6sAJ6dHM0jklewJw10SsqpQ53keN5Vs5ULra/fCmVdzreHgDhC/h+G sZ7Am52saYyvPGq3jIlMGxaBpAN5+6wBq88HgJDP1gK9w257aJij8n8KiNdhj1DR V8KVZZAVeWhYhszpkuCSemRYPbSatg== =jkJU -----END PGP SIGNATURE----- Merge tag 'folio-5.18e' of git://git.infradead.org/users/willy/pagecache Pull folio fixes from Matthew Wilcox: "Fewer bug reports than I was expecting from enabling large folios. One that doesn't show up on x86 but does on arm64, one that shows up with hugetlbfs memory failure testing and one that shows up with page migration, which it turns out I wasn't testing because my last NUMA machine died. Need to set up a qemu fake NUMA machine so I don't skip testing that in future. Summary: - Remove the migration code's assumptions about large pages being PMD sized - Don't call pmd_page() on a non-leaf PMD - Fix handling of hugetlbfs pages in page_vma_mapped_walk" * tag 'folio-5.18e' of git://git.infradead.org/users/willy/pagecache: mm/rmap: Fix handling of hugetlbfs pages in page_vma_mapped_walk mm/mempolicy: Use vma_alloc_folio() in new_page() mm: Add vma_alloc_folio() mm/migrate: Use a folio in migrate_misplaced_transhuge_page() mm/migrate: Use a folio in alloc_migration_target() mm/huge_memory: Avoid calling pmd_page() on a non-leaf PMD
This commit is contained in:
commit
d66b6985b1
|
@ -613,9 +613,11 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
|
|||
#ifdef CONFIG_NUMA
|
||||
struct page *alloc_pages(gfp_t gfp, unsigned int order);
|
||||
struct folio *folio_alloc(gfp_t gfp, unsigned order);
|
||||
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
|
||||
struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
bool hugepage);
|
||||
struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
|
||||
unsigned long addr, bool hugepage);
|
||||
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
|
||||
alloc_pages_vma(gfp_mask, order, vma, addr, true)
|
||||
#else
|
||||
|
@ -627,8 +629,10 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
|
|||
{
|
||||
return __folio_alloc_node(gfp, order, numa_node_id());
|
||||
}
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, false)\
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \
|
||||
alloc_pages(gfp_mask, order)
|
||||
#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
|
||||
folio_alloc(gfp, order)
|
||||
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
|
||||
alloc_pages(gfp_mask, order)
|
||||
#endif
|
||||
|
|
|
@ -2145,15 +2145,14 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
* pmd against. Otherwise we can end up replacing wrong folio.
|
||||
*/
|
||||
VM_BUG_ON(freeze && !folio);
|
||||
if (folio) {
|
||||
VM_WARN_ON_ONCE(!folio_test_locked(folio));
|
||||
if (folio != page_folio(pmd_page(*pmd)))
|
||||
goto out;
|
||||
}
|
||||
VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
|
||||
|
||||
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
|
||||
is_pmd_migration_entry(*pmd))
|
||||
is_pmd_migration_entry(*pmd)) {
|
||||
if (folio && folio != page_folio(pmd_page(*pmd)))
|
||||
goto out;
|
||||
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(ptl);
|
||||
|
|
|
@ -1191,8 +1191,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
|||
*/
|
||||
static struct page *new_page(struct page *page, unsigned long start)
|
||||
{
|
||||
struct folio *dst, *src = page_folio(page);
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long address;
|
||||
gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
|
||||
|
||||
vma = find_vma(current->mm, start);
|
||||
while (vma) {
|
||||
|
@ -1202,24 +1204,19 @@ static struct page *new_page(struct page *page, unsigned long start)
|
|||
vma = vma->vm_next;
|
||||
}
|
||||
|
||||
if (PageHuge(page)) {
|
||||
return alloc_huge_page_vma(page_hstate(compound_head(page)),
|
||||
if (folio_test_hugetlb(src))
|
||||
return alloc_huge_page_vma(page_hstate(&src->page),
|
||||
vma, address);
|
||||
} else if (PageTransHuge(page)) {
|
||||
struct page *thp;
|
||||
|
||||
thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
|
||||
HPAGE_PMD_ORDER);
|
||||
if (!thp)
|
||||
return NULL;
|
||||
prep_transhuge_page(thp);
|
||||
return thp;
|
||||
}
|
||||
if (folio_test_large(src))
|
||||
gfp = GFP_TRANSHUGE;
|
||||
|
||||
/*
|
||||
* if !vma, alloc_page_vma() will use task or system default policy
|
||||
* if !vma, vma_alloc_folio() will use task or system default policy
|
||||
*/
|
||||
return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
|
||||
vma, address);
|
||||
dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
|
||||
folio_test_large(src));
|
||||
return &dst->page;
|
||||
}
|
||||
#else
|
||||
|
||||
|
@ -2227,6 +2224,19 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL(alloc_pages_vma);
|
||||
|
||||
struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
|
||||
unsigned long addr, bool hugepage)
|
||||
{
|
||||
struct folio *folio;
|
||||
|
||||
folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr,
|
||||
hugepage);
|
||||
if (folio && order > 1)
|
||||
prep_transhuge_page(&folio->page);
|
||||
|
||||
return folio;
|
||||
}
|
||||
|
||||
/**
|
||||
* alloc_pages - Allocate pages.
|
||||
* @gfp: GFP flags.
|
||||
|
|
78
mm/migrate.c
78
mm/migrate.c
|
@ -1520,10 +1520,11 @@ out:
|
|||
|
||||
struct page *alloc_migration_target(struct page *page, unsigned long private)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct migration_target_control *mtc;
|
||||
gfp_t gfp_mask;
|
||||
unsigned int order = 0;
|
||||
struct page *new_page = NULL;
|
||||
struct folio *new_folio = NULL;
|
||||
int nid;
|
||||
int zidx;
|
||||
|
||||
|
@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
|
|||
gfp_mask = mtc->gfp_mask;
|
||||
nid = mtc->nid;
|
||||
if (nid == NUMA_NO_NODE)
|
||||
nid = page_to_nid(page);
|
||||
nid = folio_nid(folio);
|
||||
|
||||
if (PageHuge(page)) {
|
||||
struct hstate *h = page_hstate(compound_head(page));
|
||||
if (folio_test_hugetlb(folio)) {
|
||||
struct hstate *h = page_hstate(&folio->page);
|
||||
|
||||
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
|
||||
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
|
||||
}
|
||||
|
||||
if (PageTransHuge(page)) {
|
||||
if (folio_test_large(folio)) {
|
||||
/*
|
||||
* clear __GFP_RECLAIM to make the migration callback
|
||||
* consistent with regular THP allocations.
|
||||
*/
|
||||
gfp_mask &= ~__GFP_RECLAIM;
|
||||
gfp_mask |= GFP_TRANSHUGE;
|
||||
order = HPAGE_PMD_ORDER;
|
||||
order = folio_order(folio);
|
||||
}
|
||||
zidx = zone_idx(page_zone(page));
|
||||
zidx = zone_idx(folio_zone(folio));
|
||||
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
|
||||
gfp_mask |= __GFP_HIGHMEM;
|
||||
|
||||
new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
|
||||
new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
|
||||
|
||||
if (new_page && PageTransHuge(new_page))
|
||||
prep_transhuge_page(new_page);
|
||||
|
||||
return new_page;
|
||||
return &new_folio->page;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
@ -1999,32 +1997,20 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
|
|||
unsigned long data)
|
||||
{
|
||||
int nid = (int) data;
|
||||
struct page *newpage;
|
||||
int order = compound_order(page);
|
||||
gfp_t gfp = __GFP_THISNODE;
|
||||
struct folio *new;
|
||||
|
||||
newpage = __alloc_pages_node(nid,
|
||||
(GFP_HIGHUSER_MOVABLE |
|
||||
__GFP_THISNODE | __GFP_NOMEMALLOC |
|
||||
__GFP_NORETRY | __GFP_NOWARN) &
|
||||
~__GFP_RECLAIM, 0);
|
||||
if (order > 0)
|
||||
gfp |= GFP_TRANSHUGE_LIGHT;
|
||||
else {
|
||||
gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
|
||||
__GFP_NOWARN;
|
||||
gfp &= ~__GFP_RECLAIM;
|
||||
}
|
||||
new = __folio_alloc_node(gfp, order, nid);
|
||||
|
||||
return newpage;
|
||||
}
|
||||
|
||||
static struct page *alloc_misplaced_dst_page_thp(struct page *page,
|
||||
unsigned long data)
|
||||
{
|
||||
int nid = (int) data;
|
||||
struct page *newpage;
|
||||
|
||||
newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
|
||||
HPAGE_PMD_ORDER);
|
||||
if (!newpage)
|
||||
goto out;
|
||||
|
||||
prep_transhuge_page(newpage);
|
||||
|
||||
out:
|
||||
return newpage;
|
||||
return &new->page;
|
||||
}
|
||||
|
||||
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
|
||||
|
@ -2082,22 +2068,8 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
|||
int nr_remaining;
|
||||
unsigned int nr_succeeded;
|
||||
LIST_HEAD(migratepages);
|
||||
new_page_t *new;
|
||||
bool compound;
|
||||
int nr_pages = thp_nr_pages(page);
|
||||
|
||||
/*
|
||||
* PTE mapped THP or HugeTLB page can't reach here so the page could
|
||||
* be either base page or THP. And it must be head page if it is
|
||||
* THP.
|
||||
*/
|
||||
compound = PageTransHuge(page);
|
||||
|
||||
if (compound)
|
||||
new = alloc_misplaced_dst_page_thp;
|
||||
else
|
||||
new = alloc_misplaced_dst_page;
|
||||
|
||||
/*
|
||||
* Don't migrate file pages that are mapped in multiple processes
|
||||
* with execute permissions as they are probably shared libraries.
|
||||
|
@ -2118,9 +2090,9 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
|||
goto out;
|
||||
|
||||
list_add(&page->lru, &migratepages);
|
||||
nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
|
||||
MIGRATE_ASYNC, MR_NUMA_MISPLACED,
|
||||
&nr_succeeded);
|
||||
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
|
||||
NULL, node, MIGRATE_ASYNC,
|
||||
MR_NUMA_MISPLACED, &nr_succeeded);
|
||||
if (nr_remaining) {
|
||||
if (!list_empty(&migratepages)) {
|
||||
list_del(&page->lru);
|
||||
|
|
|
@ -163,7 +163,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
|
|||
return not_found(pvmw);
|
||||
|
||||
if (unlikely(is_vm_hugetlb_page(vma))) {
|
||||
unsigned long size = pvmw->nr_pages * PAGE_SIZE;
|
||||
struct hstate *hstate = hstate_vma(vma);
|
||||
unsigned long size = huge_page_size(hstate);
|
||||
/* The only possible mapping was handled on last iteration */
|
||||
if (pvmw->pte)
|
||||
return not_found(pvmw);
|
||||
|
@ -173,8 +174,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
|
|||
if (!pvmw->pte)
|
||||
return false;
|
||||
|
||||
pvmw->ptl = huge_pte_lockptr(size_to_hstate(size), mm,
|
||||
pvmw->pte);
|
||||
pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte);
|
||||
spin_lock(pvmw->ptl);
|
||||
if (!check_pte(pvmw))
|
||||
return not_found(pvmw);
|
||||
|
|
Loading…
Reference in New Issue