mm: conditionally write-lock VMA in free_pgtables
Normally free_pgtables needs to lock affected VMAs except for the case when VMAs were isolated under VMA write-lock. munmap() does just that, isolating while holding appropriate locks and then downgrading mmap_lock and dropping per-VMA locks before freeing page tables. Add a parameter to free_pgtables for such scenario. Link: https://lkml.kernel.org/r/20230227173632.3292573-20-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
73046fd00b
commit
98e51a2239
|
@ -105,7 +105,7 @@ void folio_activate(struct folio *folio);
|
|||
|
||||
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *start_vma, unsigned long floor,
|
||||
unsigned long ceiling);
|
||||
unsigned long ceiling, bool mm_wr_locked);
|
||||
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
|
||||
|
||||
struct zap_details;
|
||||
|
|
|
@ -362,7 +362,7 @@ void free_pgd_range(struct mmu_gather *tlb,
|
|||
|
||||
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, unsigned long floor,
|
||||
unsigned long ceiling)
|
||||
unsigned long ceiling, bool mm_wr_locked)
|
||||
{
|
||||
MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
|
||||
|
||||
|
@ -380,6 +380,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
|
|||
* Hide vma from rmap and truncate_pagecache before freeing
|
||||
* pgtables
|
||||
*/
|
||||
if (mm_wr_locked)
|
||||
vma_start_write(vma);
|
||||
unlink_anon_vmas(vma);
|
||||
unlink_file_vma(vma);
|
||||
|
||||
|
@ -394,6 +396,8 @@ void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
|
|||
&& !is_vm_hugetlb_page(next)) {
|
||||
vma = next;
|
||||
next = mas_find(&mas, ceiling - 1);
|
||||
if (mm_wr_locked)
|
||||
vma_start_write(vma);
|
||||
unlink_anon_vmas(vma);
|
||||
unlink_file_vma(vma);
|
||||
}
|
||||
|
|
|
@ -2167,7 +2167,8 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
|
|||
update_hiwater_rss(mm);
|
||||
unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked);
|
||||
free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING);
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING,
|
||||
mm_wr_locked);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
|
@ -3064,7 +3065,7 @@ void exit_mmap(struct mm_struct *mm)
|
|||
set_bit(MMF_OOM_SKIP, &mm->flags);
|
||||
mmap_write_lock(mm);
|
||||
free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
|
||||
USER_PGTABLES_CEILING);
|
||||
USER_PGTABLES_CEILING, true);
|
||||
tlb_finish_mmu(&tlb);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue