mm: convert page_mkclean_one() to use page_vma_mapped_walk()
For consistency, it worth converting all page_check_address() to page_vma_mapped_walk(), so we could drop the former. PMD handling here is future-proofing, we don't have users yet. ext4 with huge pages will be the first. Link: http://lkml.kernel.org/r/20170129173858.45174-7-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a8fa41ad2f
commit
f27176cfc3
50
mm/rmap.c
50
mm/rmap.c
|
@ -1017,34 +1017,56 @@ int page_referenced(struct page *page,
|
||||||
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
||||||
unsigned long address, void *arg)
|
unsigned long address, void *arg)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct page_vma_mapped_walk pvmw = {
|
||||||
pte_t *pte;
|
.page = page,
|
||||||
spinlock_t *ptl;
|
.vma = vma,
|
||||||
int ret = 0;
|
.address = address,
|
||||||
|
.flags = PVMW_SYNC,
|
||||||
|
};
|
||||||
int *cleaned = arg;
|
int *cleaned = arg;
|
||||||
|
|
||||||
pte = page_check_address(page, mm, address, &ptl, 1);
|
while (page_vma_mapped_walk(&pvmw)) {
|
||||||
if (!pte)
|
int ret = 0;
|
||||||
goto out;
|
address = pvmw.address;
|
||||||
|
if (pvmw.pte) {
|
||||||
if (pte_dirty(*pte) || pte_write(*pte)) {
|
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
pte_t *pte = pvmw.pte;
|
||||||
|
|
||||||
|
if (!pte_dirty(*pte) && !pte_write(*pte))
|
||||||
|
continue;
|
||||||
|
|
||||||
flush_cache_page(vma, address, pte_pfn(*pte));
|
flush_cache_page(vma, address, pte_pfn(*pte));
|
||||||
entry = ptep_clear_flush(vma, address, pte);
|
entry = ptep_clear_flush(vma, address, pte);
|
||||||
entry = pte_wrprotect(entry);
|
entry = pte_wrprotect(entry);
|
||||||
entry = pte_mkclean(entry);
|
entry = pte_mkclean(entry);
|
||||||
set_pte_at(mm, address, pte, entry);
|
set_pte_at(vma->vm_mm, address, pte, entry);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
} else {
|
||||||
|
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
|
||||||
|
pmd_t *pmd = pvmw.pmd;
|
||||||
|
pmd_t entry;
|
||||||
|
|
||||||
pte_unmap_unlock(pte, ptl);
|
if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
flush_cache_page(vma, address, page_to_pfn(page));
|
||||||
|
entry = pmdp_huge_clear_flush(vma, address, pmd);
|
||||||
|
entry = pmd_wrprotect(entry);
|
||||||
|
entry = pmd_mkclean(entry);
|
||||||
|
set_pmd_at(vma->vm_mm, address, pmd, entry);
|
||||||
|
ret = 1;
|
||||||
|
#else
|
||||||
|
/* unexpected pmd-mapped page? */
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mmu_notifier_invalidate_page(mm, address);
|
mmu_notifier_invalidate_page(vma->vm_mm, address);
|
||||||
(*cleaned)++;
|
(*cleaned)++;
|
||||||
}
|
}
|
||||||
out:
|
}
|
||||||
|
|
||||||
return SWAP_AGAIN;
|
return SWAP_AGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue