x86: mm: add x86_64 support for page table check
Add page table check hooks into routines that modify user page tables. Link: https://lkml.kernel.org/r/20211221154650.1047963-5-pasha.tatashin@soleen.com Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <frederic@kernel.org> Cc: Greg Thelen <gthelen@google.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Slaby <jirislaby@kernel.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Kees Cook <keescook@chromium.org> Cc: Masahiro Yamada <masahiroy@kernel.org> Cc: Mike Rapoport <rppt@kernel.org> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Paul Turner <pjt@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sami Tolvanen <samitolvanen@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wei Xu <weixugc@google.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
df4e817b71
commit
d283d422c6
|
@ -104,6 +104,7 @@ config X86
|
|||
select ARCH_SUPPORTS_ACPI
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if X86_64
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
|
||||
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
|
||||
select ARCH_SUPPORTS_LTO_CLANG
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/pkru.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm-generic/pgtable_uffd.h>
|
||||
#include <linux/page_table_check.h>
|
||||
|
||||
extern pgd_t early_top_pgt[PTRS_PER_PGD];
|
||||
bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
|
||||
|
@ -1006,18 +1007,21 @@ static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
|
|||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
page_table_check_pte_set(mm, addr, ptep, pte);
|
||||
set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
page_table_check_pmd_set(mm, addr, pmdp, pmd);
|
||||
set_pmd(pmdp, pmd);
|
||||
}
|
||||
|
||||
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
|
||||
pud_t *pudp, pud_t pud)
|
||||
{
|
||||
page_table_check_pud_set(mm, addr, pudp, pud);
|
||||
native_set_pud(pudp, pud);
|
||||
}
|
||||
|
||||
|
@ -1048,6 +1052,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t *ptep)
|
||||
{
|
||||
pte_t pte = native_ptep_get_and_clear(ptep);
|
||||
page_table_check_pte_clear(mm, addr, pte);
|
||||
return pte;
|
||||
}
|
||||
|
||||
|
@ -1063,12 +1068,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
|
|||
* care about updates and native needs no locking
|
||||
*/
|
||||
pte = native_local_ptep_get_and_clear(ptep);
|
||||
page_table_check_pte_clear(mm, addr, pte);
|
||||
} else {
|
||||
pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_CLEAR
|
||||
static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
|
||||
ptep_get_and_clear(mm, addr, ptep);
|
||||
else
|
||||
pte_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
|
@ -1109,14 +1125,22 @@ static inline int pmd_write(pmd_t pmd)
|
|||
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
return native_pmdp_get_and_clear(pmdp);
|
||||
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
|
||||
|
||||
page_table_check_pmd_clear(mm, addr, pmd);
|
||||
|
||||
return pmd;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
|
||||
static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pud_t *pudp)
|
||||
{
|
||||
return native_pudp_get_and_clear(pudp);
|
||||
pud_t pud = native_pudp_get_and_clear(pudp);
|
||||
|
||||
page_table_check_pud_clear(mm, addr, pud);
|
||||
|
||||
return pud;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
|
||||
|
@ -1137,6 +1161,7 @@ static inline int pud_write(pud_t pud)
|
|||
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
|
||||
if (IS_ENABLED(CONFIG_SMP)) {
|
||||
return xchg(pmdp, pmd);
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue