mm/numa: no task_numa_fault() call if PTE is changed
commit 40b760cfd44566bca791c80e0720d70d75382b84 upstream. When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commitb99a342d4f
("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") restructured do_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pte_same() return immediately. This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting). Link: https://lkml.kernel.org/r/20240809145906.1513458-2-ziy@nvidia.com Fixes:b99a342d4f
("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault") Signed-off-by: Zi Yan <ziy@nvidia.com> Reported-by: "Huang, Ying" <ying.huang@intel.com> Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel.com/ Acked-by: David Hildenbrand <david@redhat.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Yang Shi <shy828301@gmail.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
c789a78151
commit
19b4397c4a
33
mm/memory.c
33
mm/memory.c
|
@ -4775,7 +4775,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
|
|||
spin_lock(vmf->ptl);
|
||||
if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get the normal PTE */
|
||||
|
@ -4840,23 +4840,19 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
|
|||
if (migrate_misplaced_page(page, vma, target_nid)) {
|
||||
page_nid = target_nid;
|
||||
flags |= TNF_MIGRATED;
|
||||
} else {
|
||||
flags |= TNF_MIGRATE_FAIL;
|
||||
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
|
||||
vmf->address, &vmf->ptl);
|
||||
if (unlikely(!vmf->pte))
|
||||
goto out;
|
||||
if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
goto out;
|
||||
}
|
||||
goto out_map;
|
||||
task_numa_fault(last_cpupid, page_nid, 1, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
out:
|
||||
if (page_nid != NUMA_NO_NODE)
|
||||
task_numa_fault(last_cpupid, page_nid, 1, flags);
|
||||
return 0;
|
||||
flags |= TNF_MIGRATE_FAIL;
|
||||
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
|
||||
vmf->address, &vmf->ptl);
|
||||
if (unlikely(!vmf->pte))
|
||||
return 0;
|
||||
if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
return 0;
|
||||
}
|
||||
out_map:
|
||||
/*
|
||||
* Make it present again, depending on how arch implements
|
||||
|
@ -4870,7 +4866,10 @@ out_map:
|
|||
ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
|
||||
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
goto out;
|
||||
|
||||
if (page_nid != NUMA_NO_NODE)
|
||||
task_numa_fault(last_cpupid, page_nid, 1, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
|
||||
|
|
Loading…
Reference in New Issue