Merge branch 'numa-migration-fixes' (fixes from Mel Gorman)
Merge NUMA balancing related fixlets from Mel Gorman: "There were a few minor changes so am resending just the two patches that are mostly likely to affect the bug Dave and Sasha saw and marked them for stable. I'm less confident it will address Sasha's problem because while I have not kept up to date, I believe he's also seeing memory corruption issues in next from an unknown source. Still, it would be nice to see how they affect trinity testing. I'll send the MPOL_MF_LAZY patch separately because it's not urgent" * emailed patches from Mel Gorman <mgorman@suse.de>: mm: numa: Do not mark PTEs pte_numa when splitting huge pages mm: migrate: Close race between migration completion and mprotect
This commit is contained in:
commit
f9220c239f
|
@ -1795,14 +1795,17 @@ static int __split_huge_page_map(struct page *page,
|
||||||
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
|
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
|
||||||
pte_t *pte, entry;
|
pte_t *pte, entry;
|
||||||
BUG_ON(PageCompound(page+i));
|
BUG_ON(PageCompound(page+i));
|
||||||
|
/*
|
||||||
|
* Note that pmd_numa is not transferred deliberately
|
||||||
|
* to avoid any possibility that pte_numa leaks to
|
||||||
|
* a PROT_NONE VMA by accident.
|
||||||
|
*/
|
||||||
entry = mk_pte(page + i, vma->vm_page_prot);
|
entry = mk_pte(page + i, vma->vm_page_prot);
|
||||||
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
||||||
if (!pmd_write(*pmd))
|
if (!pmd_write(*pmd))
|
||||||
entry = pte_wrprotect(entry);
|
entry = pte_wrprotect(entry);
|
||||||
if (!pmd_young(*pmd))
|
if (!pmd_young(*pmd))
|
||||||
entry = pte_mkold(entry);
|
entry = pte_mkold(entry);
|
||||||
if (pmd_numa(*pmd))
|
|
||||||
entry = pte_mknuma(entry);
|
|
||||||
pte = pte_offset_map(&_pmd, haddr);
|
pte = pte_offset_map(&_pmd, haddr);
|
||||||
BUG_ON(!pte_none(*pte));
|
BUG_ON(!pte_none(*pte));
|
||||||
set_pte_at(mm, haddr, pte, entry);
|
set_pte_at(mm, haddr, pte, entry);
|
||||||
|
|
|
@ -146,8 +146,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
|
||||||
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
|
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
|
||||||
if (pte_swp_soft_dirty(*ptep))
|
if (pte_swp_soft_dirty(*ptep))
|
||||||
pte = pte_mksoft_dirty(pte);
|
pte = pte_mksoft_dirty(pte);
|
||||||
|
|
||||||
|
/* Recheck VMA as permissions can change since migration started */
|
||||||
if (is_write_migration_entry(entry))
|
if (is_write_migration_entry(entry))
|
||||||
pte = pte_mkwrite(pte);
|
pte = maybe_mkwrite(pte, vma);
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
if (PageHuge(new)) {
|
if (PageHuge(new)) {
|
||||||
pte = pte_mkhuge(pte);
|
pte = pte_mkhuge(pte);
|
||||||
|
|
Loading…
Reference in New Issue