fs/proc/task_mmu: properly detect PM_MMAP_EXCLUSIVE per page of PMD-mapped THPs
[ Upstream commit 2c1f057e5be63e890f2dd89e4c25ab5eef084a91 ] We added PM_MMAP_EXCLUSIVE in 2015 via commit77bb499bb6
("pagemap: add mmap-exclusive bit for marking pages mapped only here"), when THPs could not be partially mapped and page_mapcount() returned something that was true for all pages of the THP. In 2016, we added support for partially mapping THPs via commit53f9263bab
("mm: rework mapcount accounting to enable 4k mapping of THPs") but missed to determine PM_MMAP_EXCLUSIVE as well per page. Checking page_mapcount() on the head page does not tell the whole story. We should check each individual page. In a future without per-page mapcounts it will be different, but we'll change that to be consistent with PTE-mapped THPs once we deal with that. Link: https://lkml.kernel.org/r/20240607122357.115423-4-david@redhat.com Fixes:53f9263bab
("mm: rework mapcount accounting to enable 4k mapping of THPs") Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Lance Yang <ioworker0@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
cdeba6d1cf
commit
8acbcc5067
|
@ -1470,6 +1470,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
|||
|
||||
ptl = pmd_trans_huge_lock(pmdp, vma);
|
||||
if (ptl) {
|
||||
unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
|
||||
u64 flags = 0, frame = 0;
|
||||
pmd_t pmd = *pmdp;
|
||||
struct page *page = NULL;
|
||||
|
@ -1486,8 +1487,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
|||
if (pmd_uffd_wp(pmd))
|
||||
flags |= PM_UFFD_WP;
|
||||
if (pm->show_pfn)
|
||||
frame = pmd_pfn(pmd) +
|
||||
((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
||||
frame = pmd_pfn(pmd) + idx;
|
||||
}
|
||||
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
||||
else if (is_swap_pmd(pmd)) {
|
||||
|
@ -1496,11 +1496,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
|||
|
||||
if (pm->show_pfn) {
|
||||
if (is_pfn_swap_entry(entry))
|
||||
offset = swp_offset_pfn(entry);
|
||||
offset = swp_offset_pfn(entry) + idx;
|
||||
else
|
||||
offset = swp_offset(entry);
|
||||
offset = offset +
|
||||
((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
||||
offset = swp_offset(entry) + idx;
|
||||
frame = swp_type(entry) |
|
||||
(offset << MAX_SWAPFILES_SHIFT);
|
||||
}
|
||||
|
@ -1516,12 +1514,16 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
|||
|
||||
if (page && !PageAnon(page))
|
||||
flags |= PM_FILE;
|
||||
if (page && (flags & PM_PRESENT) && page_mapcount(page) == 1)
|
||||
flags |= PM_MMAP_EXCLUSIVE;
|
||||
|
||||
for (; addr != end; addr += PAGE_SIZE) {
|
||||
pagemap_entry_t pme = make_pme(frame, flags);
|
||||
for (; addr != end; addr += PAGE_SIZE, idx++) {
|
||||
unsigned long cur_flags = flags;
|
||||
pagemap_entry_t pme;
|
||||
|
||||
if (page && (flags & PM_PRESENT) &&
|
||||
page_mapcount(page + idx) == 1)
|
||||
cur_flags |= PM_MMAP_EXCLUSIVE;
|
||||
|
||||
pme = make_pme(frame, cur_flags);
|
||||
err = add_to_pagemap(&pme, pm);
|
||||
if (err)
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue