HWPOISON, hugetlb: detect hwpoison in hugetlb code
This patch enables to block access to hwpoisoned hugepage and also enables to block unmapping for it. Dependency: "HWPOISON, hugetlb: enable error handling path for hugepage" Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andrew Morton <akpm@linux-foundation.org> Acked-by: Fengguang Wu <fengguang.wu@intel.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andi Kleen <ak@linux.intel.com>
This commit is contained in:
parent
93f70f900d
commit
fd6a03edd2
40
mm/hugetlb.c
40
mm/hugetlb.c
|
@ -19,6 +19,8 @@
|
||||||
#include <linux/sysfs.h>
|
#include <linux/sysfs.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/rmap.h>
|
#include <linux/rmap.h>
|
||||||
|
#include <linux/swap.h>
|
||||||
|
#include <linux/swapops.h>
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
@ -2149,6 +2151,19 @@ nomem:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
|
||||||
|
{
|
||||||
|
swp_entry_t swp;
|
||||||
|
|
||||||
|
if (huge_pte_none(pte) || pte_present(pte))
|
||||||
|
return 0;
|
||||||
|
swp = pte_to_swp_entry(pte);
|
||||||
|
if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
|
||||||
|
return 1;
|
||||||
|
} else
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
||||||
unsigned long end, struct page *ref_page)
|
unsigned long end, struct page *ref_page)
|
||||||
{
|
{
|
||||||
|
@ -2207,6 +2222,12 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
||||||
if (huge_pte_none(pte))
|
if (huge_pte_none(pte))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HWPoisoned hugepage is already unmapped and dropped reference
|
||||||
|
*/
|
||||||
|
if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
|
||||||
|
continue;
|
||||||
|
|
||||||
page = pte_page(pte);
|
page = pte_page(pte);
|
||||||
if (pte_dirty(pte))
|
if (pte_dirty(pte))
|
||||||
set_page_dirty(page);
|
set_page_dirty(page);
|
||||||
|
@ -2490,6 +2511,18 @@ retry:
|
||||||
page_dup_rmap(page);
|
page_dup_rmap(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since memory error handler replaces pte into hwpoison swap entry
|
||||||
|
* at the time of error handling, a process which reserved but not have
|
||||||
|
* the mapping to the error hugepage does not have hwpoison swap entry.
|
||||||
|
* So we need to block accesses from such a process by checking
|
||||||
|
* PG_hwpoison bit here.
|
||||||
|
*/
|
||||||
|
if (unlikely(PageHWPoison(page))) {
|
||||||
|
ret = VM_FAULT_HWPOISON;
|
||||||
|
goto backout_unlocked;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are going to COW a private mapping later, we examine the
|
* If we are going to COW a private mapping later, we examine the
|
||||||
* pending reservations for this page now. This will ensure that
|
* pending reservations for this page now. This will ensure that
|
||||||
|
@ -2544,6 +2577,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
|
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
|
|
||||||
|
ptep = huge_pte_offset(mm, address);
|
||||||
|
if (ptep) {
|
||||||
|
entry = huge_ptep_get(ptep);
|
||||||
|
if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
|
||||||
|
return VM_FAULT_HWPOISON;
|
||||||
|
}
|
||||||
|
|
||||||
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
|
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
|
|
Loading…
Reference in New Issue