mm/hugetlb: fix uffd-wp handling for migration entries in hugetlb_change_protection()
We have to update the uffd-wp SWP PTE bit independent of the type of
migration entry. Currently, if we're unlucky and we want to install/clear
the uffd-wp bit just while we're migrating a read-only mapped hugetlb
page, we would miss to set/clear the uffd-wp bit.
Further, if we're processing a readable-exclusive migration entry and
neither want to set or clear the uffd-wp bit, we could currently end up
losing the uffd-wp bit. Note that the same would hold for writable
migrating entries, however, having a writable migration entry with the
uffd-wp bit set would already mean that something went wrong.
Note that the change from !is_readable_migration_entry ->
writable_migration_entry is harmless and actually cleaner, as raised by
Miaohe Lin and discussed in [1].
[1] https://lkml.kernel.org/r/90dd6a93-4500-e0de-2bf0-bf522c311b0c@huawei.com
Link: https://lkml.kernel.org/r/20221222205511.675832-3-david@redhat.com
Fixes: 60dfaad65a
("mm/hugetlb: allow uffd wr-protect none ptes")
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0e678153f5
commit
44f86392bd
17
mm/hugetlb.c
17
mm/hugetlb.c
|
@ -6662,10 +6662,9 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
} else if (unlikely(is_hugetlb_entry_migration(pte))) {
|
||||
swp_entry_t entry = pte_to_swp_entry(pte);
|
||||
struct page *page = pfn_swap_entry_to_page(entry);
|
||||
pte_t newpte = pte;
|
||||
|
||||
if (!is_readable_migration_entry(entry)) {
|
||||
pte_t newpte;
|
||||
|
||||
if (is_writable_migration_entry(entry)) {
|
||||
if (PageAnon(page))
|
||||
entry = make_readable_exclusive_migration_entry(
|
||||
swp_offset(entry));
|
||||
|
@ -6673,13 +6672,15 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
entry = make_readable_migration_entry(
|
||||
swp_offset(entry));
|
||||
newpte = swp_entry_to_pte(entry);
|
||||
if (uffd_wp)
|
||||
newpte = pte_swp_mkuffd_wp(newpte);
|
||||
else if (uffd_wp_resolve)
|
||||
newpte = pte_swp_clear_uffd_wp(newpte);
|
||||
set_huge_pte_at(mm, address, ptep, newpte);
|
||||
pages++;
|
||||
}
|
||||
|
||||
if (uffd_wp)
|
||||
newpte = pte_swp_mkuffd_wp(newpte);
|
||||
else if (uffd_wp_resolve)
|
||||
newpte = pte_swp_clear_uffd_wp(newpte);
|
||||
if (!pte_same(pte, newpte))
|
||||
set_huge_pte_at(mm, address, ptep, newpte);
|
||||
} else if (unlikely(is_pte_marker(pte))) {
|
||||
/* No other markers apply for now. */
|
||||
WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
|
||||
|
|
Loading…
Reference in New Issue