mm/swapfile: fix wrong swap entry type for hwpoisoned swapcache page
Patch series "A few fixup patches for mm", v2.
This series contains a few fixup patches to fix potential unexpected
return value, fix wrong swap entry type for hwpoisoned swapcache page and
so on. More details can be found in the respective changelogs.
This patch (of 3):
Hwpoisoned dirty swap cache page is kept in the swap cache and there's
simple interception code in do_swap_page() to catch it. But when trying
to swapoff, unuse_pte() will wrongly install a general sense of "future
accesses are invalid" swap entry for hwpoisoned swap cache page due to
unaware of such type of page. The user will receive SIGBUS signal without
expected BUS_MCEERR_AR payload. BTW, typo 'hwposioned' is fixed.
Link: https://lkml.kernel.org/r/20230727115643.639741-1-linmiaohe@huawei.com
Link: https://lkml.kernel.org/r/20230727115643.639741-2-linmiaohe@huawei.com
Fixes: 6b970599e8
("mm: hwpoison: support recovery from ksm_might_need_to_copy()")
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
cac7ea57a0
commit
f985fc3220
2
mm/ksm.c
2
mm/ksm.c
|
@ -2784,6 +2784,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
|
|||
anon_vma->root == vma->anon_vma->root) {
|
||||
return page; /* still no need to copy it */
|
||||
}
|
||||
if (PageHWPoison(page))
|
||||
return ERR_PTR(-EHWPOISON);
|
||||
if (!PageUptodate(page))
|
||||
return page; /* let do_swap_page report the error */
|
||||
|
||||
|
|
|
@ -1746,7 +1746,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
struct page *swapcache;
|
||||
spinlock_t *ptl;
|
||||
pte_t *pte, new_pte, old_pte;
|
||||
bool hwposioned = false;
|
||||
bool hwpoisoned = PageHWPoison(page);
|
||||
int ret = 1;
|
||||
|
||||
swapcache = page;
|
||||
|
@ -1754,7 +1754,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
if (unlikely(!page))
|
||||
return -ENOMEM;
|
||||
else if (unlikely(PTR_ERR(page) == -EHWPOISON))
|
||||
hwposioned = true;
|
||||
hwpoisoned = true;
|
||||
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
|
||||
|
@ -1765,11 +1765,11 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
|
||||
old_pte = ptep_get(pte);
|
||||
|
||||
if (unlikely(hwposioned || !PageUptodate(page))) {
|
||||
if (unlikely(hwpoisoned || !PageUptodate(page))) {
|
||||
swp_entry_t swp_entry;
|
||||
|
||||
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
|
||||
if (hwposioned) {
|
||||
if (hwpoisoned) {
|
||||
swp_entry = make_hwpoison_entry(swapcache);
|
||||
page = swapcache;
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue