2022-09-15 23:03:45 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* KMSAN hooks for kernel subsystems.
|
|
|
|
*
|
|
|
|
* These functions handle creation of KMSAN metadata for memory allocations.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2018-2022 Google LLC
|
|
|
|
* Author: Alexander Potapenko <glider@google.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/cacheflush.h>
|
|
|
|
#include <linux/gfp.h>
|
2022-09-15 23:03:48 +08:00
|
|
|
#include <linux/kmsan.h>
|
2022-09-15 23:03:45 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/mm_types.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
|
|
|
#include "../internal.h"
|
|
|
|
#include "../slab.h"
|
|
|
|
#include "kmsan.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Instrumented functions shouldn't be called under
|
|
|
|
* kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to
|
|
|
|
* skipping effects of functions like memset() inside instrumented code.
|
|
|
|
*/
|
|
|
|
|
2022-09-15 23:03:50 +08:00
|
|
|
void kmsan_task_create(struct task_struct *task)
|
|
|
|
{
|
|
|
|
kmsan_enter_runtime();
|
|
|
|
kmsan_internal_task_create(task);
|
|
|
|
kmsan_leave_runtime();
|
|
|
|
}
|
|
|
|
|
|
|
|
void kmsan_task_exit(struct task_struct *task)
|
|
|
|
{
|
|
|
|
struct kmsan_ctx *ctx = &task->kmsan_ctx;
|
|
|
|
|
|
|
|
if (!kmsan_enabled || kmsan_in_runtime())
|
|
|
|
return;
|
|
|
|
|
|
|
|
ctx->allow_reporting = false;
|
|
|
|
}
|
|
|
|
|
2022-09-15 23:03:49 +08:00
|
|
|
void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
|
|
|
|
{
|
|
|
|
if (unlikely(object == NULL))
|
|
|
|
return;
|
|
|
|
if (!kmsan_enabled || kmsan_in_runtime())
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* There's a ctor or this is an RCU cache - do nothing. The memory
|
|
|
|
* status hasn't changed since last use.
|
|
|
|
*/
|
|
|
|
if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
|
|
|
|
return;
|
|
|
|
|
|
|
|
kmsan_enter_runtime();
|
|
|
|
if (flags & __GFP_ZERO)
|
|
|
|
kmsan_internal_unpoison_memory(object, s->object_size,
|
|
|
|
KMSAN_POISON_CHECK);
|
|
|
|
else
|
|
|
|
kmsan_internal_poison_memory(object, s->object_size, flags,
|
|
|
|
KMSAN_POISON_CHECK);
|
|
|
|
kmsan_leave_runtime();
|
|
|
|
}
|
|
|
|
|
|
|
|
void kmsan_slab_free(struct kmem_cache *s, void *object)
|
|
|
|
{
|
|
|
|
if (!kmsan_enabled || kmsan_in_runtime())
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* RCU slabs could be legally used after free within the RCU period */
|
|
|
|
if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)))
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* If there's a constructor, freed memory must remain in the same state
|
|
|
|
* until the next allocation. We cannot save its state to detect
|
|
|
|
* use-after-free bugs, instead we just keep it unpoisoned.
|
|
|
|
*/
|
|
|
|
if (s->ctor)
|
|
|
|
return;
|
|
|
|
kmsan_enter_runtime();
|
|
|
|
kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
|
|
|
|
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
|
|
|
|
kmsan_leave_runtime();
|
|
|
|
}
|
|
|
|
|
|
|
|
void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
|
|
|
|
{
|
|
|
|
if (unlikely(ptr == NULL))
|
|
|
|
return;
|
|
|
|
if (!kmsan_enabled || kmsan_in_runtime())
|
|
|
|
return;
|
|
|
|
kmsan_enter_runtime();
|
|
|
|
if (flags & __GFP_ZERO)
|
|
|
|
kmsan_internal_unpoison_memory((void *)ptr, size,
|
|
|
|
/*checked*/ true);
|
|
|
|
else
|
|
|
|
kmsan_internal_poison_memory((void *)ptr, size, flags,
|
|
|
|
KMSAN_POISON_CHECK);
|
|
|
|
kmsan_leave_runtime();
|
|
|
|
}
|
|
|
|
|
|
|
|
void kmsan_kfree_large(const void *ptr)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (!kmsan_enabled || kmsan_in_runtime())
|
|
|
|
return;
|
|
|
|
kmsan_enter_runtime();
|
|
|
|
page = virt_to_head_page((void *)ptr);
|
|
|
|
KMSAN_WARN_ON(ptr != page_address(page));
|
|
|
|
kmsan_internal_poison_memory((void *)ptr,
|
|
|
|
PAGE_SIZE << compound_order(page),
|
|
|
|
GFP_KERNEL,
|
|
|
|
KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
|
|
|
|
kmsan_leave_runtime();
|
|
|
|
}
|
|
|
|
|
2022-09-15 23:03:48 +08:00
|
|
|
static unsigned long vmalloc_shadow(unsigned long addr)
|
|
|
|
{
|
|
|
|
return (unsigned long)kmsan_get_metadata((void *)addr,
|
|
|
|
KMSAN_META_SHADOW);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long vmalloc_origin(unsigned long addr)
|
|
|
|
{
|
|
|
|
return (unsigned long)kmsan_get_metadata((void *)addr,
|
|
|
|
KMSAN_META_ORIGIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
__vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
|
|
|
|
__vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
|
|
|
|
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
|
|
|
|
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function creates new shadow/origin pages for the physical pages mapped
|
|
|
|
* into the virtual memory. If those physical pages already had shadow/origin,
|
|
|
|
* those are ignored.
|
|
|
|
*/
|
|
|
|
void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
|
|
|
|
phys_addr_t phys_addr, pgprot_t prot,
|
|
|
|
unsigned int page_shift)
|
|
|
|
{
|
|
|
|
gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
|
|
|
|
struct page *shadow, *origin;
|
|
|
|
unsigned long off = 0;
|
|
|
|
int nr;
|
|
|
|
|
|
|
|
if (!kmsan_enabled || kmsan_in_runtime())
|
|
|
|
return;
|
|
|
|
|
|
|
|
nr = (end - start) / PAGE_SIZE;
|
|
|
|
kmsan_enter_runtime();
|
|
|
|
for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
|
|
|
|
shadow = alloc_pages(gfp_mask, 1);
|
|
|
|
origin = alloc_pages(gfp_mask, 1);
|
|
|
|
__vmap_pages_range_noflush(
|
|
|
|
vmalloc_shadow(start + off),
|
|
|
|
vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
|
|
|
|
PAGE_SHIFT);
|
|
|
|
__vmap_pages_range_noflush(
|
|
|
|
vmalloc_origin(start + off),
|
|
|
|
vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
|
|
|
|
PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
|
|
|
|
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
|
|
|
|
kmsan_leave_runtime();
|
|
|
|
}
|
|
|
|
|
|
|
|
void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
unsigned long v_shadow, v_origin;
|
|
|
|
struct page *shadow, *origin;
|
|
|
|
int nr;
|
|
|
|
|
|
|
|
if (!kmsan_enabled || kmsan_in_runtime())
|
|
|
|
return;
|
|
|
|
|
|
|
|
nr = (end - start) / PAGE_SIZE;
|
|
|
|
kmsan_enter_runtime();
|
|
|
|
v_shadow = (unsigned long)vmalloc_shadow(start);
|
|
|
|
v_origin = (unsigned long)vmalloc_origin(start);
|
|
|
|
for (int i = 0; i < nr;
|
|
|
|
i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
|
|
|
|
shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
|
|
|
|
origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
|
|
|
|
__vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
|
|
|
|
__vunmap_range_noflush(v_origin, vmalloc_origin(end));
|
|
|
|
if (shadow)
|
|
|
|
__free_pages(shadow, 1);
|
|
|
|
if (origin)
|
|
|
|
__free_pages(origin, 1);
|
|
|
|
}
|
|
|
|
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
|
|
|
|
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
|
|
|
|
kmsan_leave_runtime();
|
|
|
|
}
|
|
|
|
|
2022-09-15 23:03:45 +08:00
|
|
|
/* Functions from kmsan-checks.h follow. */
|
|
|
|
void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
|
|
|
|
{
|
|
|
|
if (!kmsan_enabled || kmsan_in_runtime())
|
|
|
|
return;
|
|
|
|
kmsan_enter_runtime();
|
|
|
|
/* The users may want to poison/unpoison random memory. */
|
|
|
|
kmsan_internal_poison_memory((void *)address, size, flags,
|
|
|
|
KMSAN_POISON_NOCHECK);
|
|
|
|
kmsan_leave_runtime();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(kmsan_poison_memory);
|
|
|
|
|
|
|
|
void kmsan_unpoison_memory(const void *address, size_t size)
|
|
|
|
{
|
|
|
|
unsigned long ua_flags;
|
|
|
|
|
|
|
|
if (!kmsan_enabled || kmsan_in_runtime())
|
|
|
|
return;
|
|
|
|
|
|
|
|
ua_flags = user_access_save();
|
|
|
|
kmsan_enter_runtime();
|
|
|
|
/* The users may want to poison/unpoison random memory. */
|
|
|
|
kmsan_internal_unpoison_memory((void *)address, size,
|
|
|
|
KMSAN_POISON_NOCHECK);
|
|
|
|
kmsan_leave_runtime();
|
|
|
|
user_access_restore(ua_flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(kmsan_unpoison_memory);
|
|
|
|
|
|
|
|
void kmsan_check_memory(const void *addr, size_t size)
|
|
|
|
{
|
|
|
|
if (!kmsan_enabled)
|
|
|
|
return;
|
|
|
|
return kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
|
|
|
|
REASON_ANY);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(kmsan_check_memory);
|