mm/mremap: retry if either pte_offset_map_*lock() fails
move_ptes() return -EAGAIN if pte_offset_map_lock() of old fails, or if pte_offset_map_nolock() of new fails: move_page_tables() retry if so. But that does need a pmd_none() check inside, to stop endless loop when huge shmem is truncated (thank you to syzbot); and move_huge_pmd() must tolerate that a page table might have been allocated there just before (of course it would be more satisfying to remove the empty page table, but this is not a path worth optimizing). Link: https://lkml.kernel.org/r/65e5e84a-f04-947-23f2-b97d3462e1e@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Christoph Hellwig <hch@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: SeongJae Park <sj@kernel.org> Cc: Song Liu <song@kernel.org> Cc: Steven Price <steven.price@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Shi <shy828301@gmail.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zack Rusin <zackr@vmware.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
670ddd8cdc
commit
a5be621ee2
|
@ -1760,9 +1760,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
|||
|
||||
/*
|
||||
* The destination pmd shouldn't be established, free_pgtables()
|
||||
* should have release it.
|
||||
* should have released it; but move_page_tables() might have already
|
||||
* inserted a page table, if racing against shmem/file collapse.
|
||||
*/
|
||||
if (WARN_ON(!pmd_none(*new_pmd))) {
|
||||
if (!pmd_none(*new_pmd)) {
|
||||
VM_BUG_ON(pmd_trans_huge(*new_pmd));
|
||||
return false;
|
||||
}
|
||||
|
|
28
mm/mremap.c
28
mm/mremap.c
|
@ -133,7 +133,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
|
|||
return pte;
|
||||
}
|
||||
|
||||
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
unsigned long old_addr, unsigned long old_end,
|
||||
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
||||
unsigned long new_addr, bool need_rmap_locks)
|
||||
|
@ -143,6 +143,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
spinlock_t *old_ptl, *new_ptl;
|
||||
bool force_flush = false;
|
||||
unsigned long len = old_end - old_addr;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
|
||||
|
@ -170,8 +171,16 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
* pte locks because exclusive mmap_lock prevents deadlock.
|
||||
*/
|
||||
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
|
||||
new_pte = pte_offset_map(new_pmd, new_addr);
|
||||
new_ptl = pte_lockptr(mm, new_pmd);
|
||||
if (!old_pte) {
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl);
|
||||
if (!new_pte) {
|
||||
pte_unmap_unlock(old_pte, old_ptl);
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
if (new_ptl != old_ptl)
|
||||
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
||||
flush_tlb_batched_pending(vma->vm_mm);
|
||||
|
@ -208,8 +217,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
spin_unlock(new_ptl);
|
||||
pte_unmap(new_pte - 1);
|
||||
pte_unmap_unlock(old_pte - 1, old_ptl);
|
||||
out:
|
||||
if (need_rmap_locks)
|
||||
drop_rmap_locks(vma);
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifndef arch_supports_page_table_move
|
||||
|
@ -537,6 +548,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|||
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
|
||||
if (!new_pmd)
|
||||
break;
|
||||
again:
|
||||
if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
|
||||
pmd_devmap(*old_pmd)) {
|
||||
if (extent == HPAGE_PMD_SIZE &&
|
||||
|
@ -544,8 +556,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|||
old_pmd, new_pmd, need_rmap_locks))
|
||||
continue;
|
||||
split_huge_pmd(vma, old_pmd, old_addr);
|
||||
if (pmd_trans_unstable(old_pmd))
|
||||
continue;
|
||||
} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
|
||||
extent == PMD_SIZE) {
|
||||
/*
|
||||
|
@ -556,11 +566,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
|||
old_pmd, new_pmd, true))
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pmd_none(*old_pmd))
|
||||
continue;
|
||||
if (pte_alloc(new_vma->vm_mm, new_pmd))
|
||||
break;
|
||||
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
|
||||
new_pmd, new_addr, need_rmap_locks);
|
||||
if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
|
||||
new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
|
||||
goto again;
|
||||
}
|
||||
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
|
|
Loading…
Reference in New Issue