2022-01-15 06:06:37 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2021, Google LLC.
|
|
|
|
* Pasha Tatashin <pasha.tatashin@soleen.com>
|
|
|
|
*/
|
2022-11-02 05:14:09 +08:00
|
|
|
#include <linux/kstrtox.h>
|
2022-01-15 06:06:37 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/page_table_check.h>
|
|
|
|
|
|
|
|
#undef pr_fmt
|
|
|
|
#define pr_fmt(fmt) "page_table_check: " fmt
|
|
|
|
|
|
|
|
struct page_table_check {
|
|
|
|
atomic_t anon_map_count;
|
|
|
|
atomic_t file_map_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool __page_table_check_enabled __initdata =
|
|
|
|
IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
|
|
|
|
|
|
|
|
DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
|
|
|
|
EXPORT_SYMBOL(page_table_check_disabled);
|
|
|
|
|
|
|
|
static int __init early_page_table_check_param(char *buf)
|
|
|
|
{
|
2022-11-02 05:14:09 +08:00
|
|
|
return kstrtobool(buf, &__page_table_check_enabled);
|
2022-01-15 06:06:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
early_param("page_table_check", early_page_table_check_param);
|
|
|
|
|
|
|
|
static bool __init need_page_table_check(void)
|
|
|
|
{
|
|
|
|
return __page_table_check_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init init_page_table_check(void)
|
|
|
|
{
|
|
|
|
if (!__page_table_check_enabled)
|
|
|
|
return;
|
|
|
|
static_branch_disable(&page_table_check_disabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct page_ext_operations page_table_check_ops = {
|
|
|
|
.size = sizeof(struct page_table_check),
|
|
|
|
.need = need_page_table_check,
|
|
|
|
.init = init_page_table_check,
|
2023-01-13 23:42:53 +08:00
|
|
|
.need_shared_flags = false,
|
2022-01-15 06:06:37 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
|
|
|
|
{
|
|
|
|
BUG_ON(!page_ext);
|
|
|
|
return (void *)(page_ext) + page_table_check_ops.offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-09-16 17:04:34 +08:00
|
|
|
* An entry is removed from the page table, decrement the counters for that page
|
2022-01-15 06:06:37 +08:00
|
|
|
* verify that it is of correct type and counters do not become negative.
|
|
|
|
*/
|
2023-07-14 01:26:29 +08:00
|
|
|
static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
|
2022-01-15 06:06:37 +08:00
|
|
|
{
|
|
|
|
struct page_ext *page_ext;
|
|
|
|
struct page *page;
|
2022-02-04 12:49:15 +08:00
|
|
|
unsigned long i;
|
2022-01-15 06:06:37 +08:00
|
|
|
bool anon;
|
|
|
|
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
page = pfn_to_page(pfn);
|
2022-08-18 21:50:00 +08:00
|
|
|
page_ext = page_ext_get(page);
|
2023-05-15 21:09:58 +08:00
|
|
|
|
|
|
|
BUG_ON(PageSlab(page));
|
2022-01-15 06:06:37 +08:00
|
|
|
anon = PageAnon(page);
|
|
|
|
|
|
|
|
for (i = 0; i < pgcnt; i++) {
|
|
|
|
struct page_table_check *ptc = get_page_table_check(page_ext);
|
|
|
|
|
|
|
|
if (anon) {
|
|
|
|
BUG_ON(atomic_read(&ptc->file_map_count));
|
|
|
|
BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
|
|
|
|
} else {
|
|
|
|
BUG_ON(atomic_read(&ptc->anon_map_count));
|
|
|
|
BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
|
|
|
|
}
|
|
|
|
page_ext = page_ext_next(page_ext);
|
|
|
|
}
|
2022-08-18 21:50:00 +08:00
|
|
|
page_ext_put(page_ext);
|
2022-01-15 06:06:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-09-16 17:04:34 +08:00
|
|
|
* A new entry is added to the page table, increment the counters for that page
|
2022-01-15 06:06:37 +08:00
|
|
|
* verify that it is of correct type and is not being mapped with a different
|
|
|
|
* type to a different process.
|
|
|
|
*/
|
|
|
|
static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
|
|
|
|
unsigned long pfn, unsigned long pgcnt,
|
|
|
|
bool rw)
|
|
|
|
{
|
|
|
|
struct page_ext *page_ext;
|
|
|
|
struct page *page;
|
2022-02-04 12:49:15 +08:00
|
|
|
unsigned long i;
|
2022-01-15 06:06:37 +08:00
|
|
|
bool anon;
|
|
|
|
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
page = pfn_to_page(pfn);
|
2022-08-18 21:50:00 +08:00
|
|
|
page_ext = page_ext_get(page);
|
2023-05-15 21:09:58 +08:00
|
|
|
|
|
|
|
BUG_ON(PageSlab(page));
|
2022-01-15 06:06:37 +08:00
|
|
|
anon = PageAnon(page);
|
|
|
|
|
|
|
|
for (i = 0; i < pgcnt; i++) {
|
|
|
|
struct page_table_check *ptc = get_page_table_check(page_ext);
|
|
|
|
|
|
|
|
if (anon) {
|
|
|
|
BUG_ON(atomic_read(&ptc->file_map_count));
|
|
|
|
BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
|
|
|
|
} else {
|
|
|
|
BUG_ON(atomic_read(&ptc->anon_map_count));
|
|
|
|
BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
|
|
|
|
}
|
|
|
|
page_ext = page_ext_next(page_ext);
|
|
|
|
}
|
2022-08-18 21:50:00 +08:00
|
|
|
page_ext_put(page_ext);
|
2022-01-15 06:06:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page is on free list, or is being allocated, verify that counters are zeroes
|
|
|
|
* crash if they are not.
|
|
|
|
*/
|
|
|
|
void __page_table_check_zero(struct page *page, unsigned int order)
|
|
|
|
{
|
2022-08-18 21:50:00 +08:00
|
|
|
struct page_ext *page_ext;
|
2022-02-04 12:49:15 +08:00
|
|
|
unsigned long i;
|
2022-01-15 06:06:37 +08:00
|
|
|
|
2023-05-15 21:09:58 +08:00
|
|
|
BUG_ON(PageSlab(page));
|
|
|
|
|
2022-08-18 21:50:00 +08:00
|
|
|
page_ext = page_ext_get(page);
|
2022-01-15 06:06:37 +08:00
|
|
|
BUG_ON(!page_ext);
|
2022-02-04 12:49:15 +08:00
|
|
|
for (i = 0; i < (1ul << order); i++) {
|
2022-01-15 06:06:37 +08:00
|
|
|
struct page_table_check *ptc = get_page_table_check(page_ext);
|
|
|
|
|
|
|
|
BUG_ON(atomic_read(&ptc->anon_map_count));
|
|
|
|
BUG_ON(atomic_read(&ptc->file_map_count));
|
|
|
|
page_ext = page_ext_next(page_ext);
|
|
|
|
}
|
2022-08-18 21:50:00 +08:00
|
|
|
page_ext_put(page_ext);
|
2022-01-15 06:06:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t pte)
|
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pte_user_accessible_page(pte)) {
|
2023-07-14 01:26:29 +08:00
|
|
|
page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
|
2022-01-15 06:06:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pte_clear);
|
|
|
|
|
|
|
|
void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pmd_t pmd)
|
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pmd_user_accessible_page(pmd)) {
|
2023-07-14 01:26:29 +08:00
|
|
|
page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
|
2022-01-15 06:06:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pmd_clear);
|
|
|
|
|
|
|
|
void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pud_t pud)
|
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pud_user_accessible_page(pud)) {
|
2023-07-14 01:26:29 +08:00
|
|
|
page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
|
2022-01-15 06:06:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pud_clear);
|
|
|
|
|
|
|
|
void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, pte_t pte)
|
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
mm: ptep_get() conversion
Convert all instances of direct pte_t* dereferencing to instead use
ptep_get() helper. This means that by default, the accesses change from a
C dereference to a READ_ONCE(). This is technically the correct thing to
do since where pgtables are modified by HW (for access/dirty) they are
volatile and therefore we should always ensure READ_ONCE() semantics.
But more importantly, by always using the helper, it can be overridden by
the architecture to fully encapsulate the contents of the pte. Arch code
is deliberately not converted, as the arch code knows best. It is
intended that arch code (arm64) will override the default with its own
implementation that can (e.g.) hide certain bits from the core code, or
determine young/dirty status by mixing in state from another source.
Conversion was done using Coccinelle:
----
// $ make coccicheck \
// COCCI=ptepget.cocci \
// SPFLAGS="--include-headers" \
// MODE=patch
virtual patch
@ depends on patch @
pte_t *v;
@@
- *v
+ ptep_get(v)
----
Then reviewed and hand-edited to avoid multiple unnecessary calls to
ptep_get(), instead opting to store the result of a single call in a
variable, where it is correct to do so. This aims to negate any cost of
READ_ONCE() and will benefit arch-overrides that may be more complex.
Included is a fix for an issue in an earlier version of this patch that
was pointed out by kernel test robot. The issue arose because config
MMU=n elides definition of the ptep helper functions, including
ptep_get(). HUGETLB_PAGE=n configs still define a simple
huge_ptep_clear_flush() for linking purposes, which dereferences the ptep.
So when both configs are disabled, this caused a build error because
ptep_get() is not defined. Fix by continuing to do a direct dereference
when MMU=n. This is safe because for this config the arch code cannot be
trying to virtualize the ptes because none of the ptep helpers are
defined.
Link: https://lkml.kernel.org/r/20230612151545.3317766-4-ryan.roberts@arm.com
Reported-by: kernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/oe-kbuild-all/202305120142.yXsNEo6H-lkp@intel.com/
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Dave Airlie <airlied@gmail.com>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: SeongJae Park <sj@kernel.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-06-12 23:15:45 +08:00
|
|
|
__page_table_check_pte_clear(mm, addr, ptep_get(ptep));
|
2022-01-15 06:06:37 +08:00
|
|
|
if (pte_user_accessible_page(pte)) {
|
|
|
|
page_table_check_set(mm, addr, pte_pfn(pte),
|
|
|
|
PAGE_SIZE >> PAGE_SHIFT,
|
|
|
|
pte_write(pte));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pte_set);
|
|
|
|
|
|
|
|
void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pmd_t *pmdp, pmd_t pmd)
|
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
2022-02-04 12:49:15 +08:00
|
|
|
__page_table_check_pmd_clear(mm, addr, *pmdp);
|
2022-01-15 06:06:37 +08:00
|
|
|
if (pmd_user_accessible_page(pmd)) {
|
|
|
|
page_table_check_set(mm, addr, pmd_pfn(pmd),
|
2022-05-13 11:23:06 +08:00
|
|
|
PMD_SIZE >> PAGE_SHIFT,
|
2022-01-15 06:06:37 +08:00
|
|
|
pmd_write(pmd));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pmd_set);
|
|
|
|
|
|
|
|
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pud_t *pudp, pud_t pud)
|
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
2022-02-04 12:49:15 +08:00
|
|
|
__page_table_check_pud_clear(mm, addr, *pudp);
|
2022-01-15 06:06:37 +08:00
|
|
|
if (pud_user_accessible_page(pud)) {
|
|
|
|
page_table_check_set(mm, addr, pud_pfn(pud),
|
2022-05-13 11:23:06 +08:00
|
|
|
PUD_SIZE >> PAGE_SHIFT,
|
2022-01-15 06:06:37 +08:00
|
|
|
pud_write(pud));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_table_check_pud_set);
|
2022-02-04 12:49:24 +08:00
|
|
|
|
|
|
|
void __page_table_check_pte_clear_range(struct mm_struct *mm,
|
|
|
|
unsigned long addr,
|
|
|
|
pmd_t pmd)
|
|
|
|
{
|
|
|
|
if (&init_mm == mm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
|
|
|
|
pte_t *ptep = pte_offset_map(&pmd, addr);
|
|
|
|
unsigned long i;
|
|
|
|
|
2023-06-09 09:27:52 +08:00
|
|
|
if (WARN_ON(!ptep))
|
|
|
|
return;
|
2022-02-04 12:49:24 +08:00
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++) {
|
mm: ptep_get() conversion
Convert all instances of direct pte_t* dereferencing to instead use
ptep_get() helper. This means that by default, the accesses change from a
C dereference to a READ_ONCE(). This is technically the correct thing to
do since where pgtables are modified by HW (for access/dirty) they are
volatile and therefore we should always ensure READ_ONCE() semantics.
But more importantly, by always using the helper, it can be overridden by
the architecture to fully encapsulate the contents of the pte. Arch code
is deliberately not converted, as the arch code knows best. It is
intended that arch code (arm64) will override the default with its own
implementation that can (e.g.) hide certain bits from the core code, or
determine young/dirty status by mixing in state from another source.
Conversion was done using Coccinelle:
----
// $ make coccicheck \
// COCCI=ptepget.cocci \
// SPFLAGS="--include-headers" \
// MODE=patch
virtual patch
@ depends on patch @
pte_t *v;
@@
- *v
+ ptep_get(v)
----
Then reviewed and hand-edited to avoid multiple unnecessary calls to
ptep_get(), instead opting to store the result of a single call in a
variable, where it is correct to do so. This aims to negate any cost of
READ_ONCE() and will benefit arch-overrides that may be more complex.
Included is a fix for an issue in an earlier version of this patch that
was pointed out by kernel test robot. The issue arose because config
MMU=n elides definition of the ptep helper functions, including
ptep_get(). HUGETLB_PAGE=n configs still define a simple
huge_ptep_clear_flush() for linking purposes, which dereferences the ptep.
So when both configs are disabled, this caused a build error because
ptep_get() is not defined. Fix by continuing to do a direct dereference
when MMU=n. This is safe because for this config the arch code cannot be
trying to virtualize the ptes because none of the ptep helpers are
defined.
Link: https://lkml.kernel.org/r/20230612151545.3317766-4-ryan.roberts@arm.com
Reported-by: kernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/oe-kbuild-all/202305120142.yXsNEo6H-lkp@intel.com/
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Dave Airlie <airlied@gmail.com>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: SeongJae Park <sj@kernel.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-06-12 23:15:45 +08:00
|
|
|
__page_table_check_pte_clear(mm, addr, ptep_get(ptep));
|
2022-02-04 12:49:24 +08:00
|
|
|
addr += PAGE_SIZE;
|
|
|
|
ptep++;
|
|
|
|
}
|
2022-05-26 19:33:50 +08:00
|
|
|
pte_unmap(ptep - PTRS_PER_PTE);
|
2022-02-04 12:49:24 +08:00
|
|
|
}
|
|
|
|
}
|