Merge branch 'for-next/late-arrivals' into for-next/core
Late patches for 5.10: MTE selftests, minor KCSAN preparation and removal of some unused prototypes. (Amit Daniel Kachhap and others) * for-next/late-arrivals: arm64: random: Remove no longer needed prototypes arm64: initialize per-cpu offsets earlier kselftest/arm64: Check mte tagged user address in kernel kselftest/arm64: Verify KSM page merge for MTE pages kselftest/arm64: Verify all different mmap MTE options kselftest/arm64: Check forked child mte memory accessibility kselftest/arm64: Verify mte tag inclusion via prctl kselftest/arm64: Add utilities and a test to validate mte memory
This commit is contained in:
commit
a82e4ef041
|
@ -79,10 +79,5 @@ arch_get_random_seed_long_early(unsigned long *v)
|
|||
}
|
||||
#define arch_get_random_seed_long_early arch_get_random_seed_long_early
|
||||
|
||||
#else
|
||||
|
||||
static inline bool __arm64_rndr(unsigned long *v) { return false; }
|
||||
static inline bool __init __early_cpu_has_rndr(void) { return false; }
|
||||
|
||||
#endif /* CONFIG_ARCH_RANDOM */
|
||||
#endif /* _ASM_ARCHRANDOM_H */
|
||||
|
|
|
@ -68,4 +68,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info);
|
|||
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
|
||||
struct cpuinfo_arm64 *boot);
|
||||
|
||||
void init_this_cpu_offset(void);
|
||||
|
||||
#endif /* __ASM_CPU_H */
|
||||
|
|
|
@ -448,6 +448,8 @@ SYM_FUNC_START_LOCAL(__primary_switched)
|
|||
bl __pi_memset
|
||||
dsb ishst // Make zero page visible to PTW
|
||||
|
||||
bl init_this_cpu_offset
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
bl kasan_early_init
|
||||
#endif
|
||||
|
@ -754,6 +756,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
|
|||
ptrauth_keys_init_cpu x2, x3, x4, x5
|
||||
#endif
|
||||
|
||||
bl init_this_cpu_offset
|
||||
b secondary_start_kernel
|
||||
SYM_FUNC_END(__secondary_switched)
|
||||
|
||||
|
|
|
@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void)
|
|||
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
||||
set_cpu_logical_map(0, mpidr);
|
||||
|
||||
/*
|
||||
* clear __my_cpu_offset on boot CPU to avoid hang caused by
|
||||
* using percpu variable early, for example, lockdep will
|
||||
* access percpu variable inside lock_release
|
||||
*/
|
||||
set_my_cpu_offset(0);
|
||||
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
|
||||
(unsigned long)mpidr, read_cpuid_id());
|
||||
}
|
||||
|
@ -282,6 +276,12 @@ u64 cpu_logical_map(int cpu)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(cpu_logical_map);
|
||||
|
||||
void noinstr init_this_cpu_offset(void)
|
||||
{
|
||||
unsigned int cpu = task_cpu(current);
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
}
|
||||
|
||||
void __init __no_sanitize_address setup_arch(char **cmdline_p)
|
||||
{
|
||||
init_mm.start_code = (unsigned long) _text;
|
||||
|
|
|
@ -192,10 +192,7 @@ asmlinkage notrace void secondary_start_kernel(void)
|
|||
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
||||
struct mm_struct *mm = &init_mm;
|
||||
const struct cpu_operations *ops;
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = task_cpu(current);
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* All kernel threads share the same mm context; grab a
|
||||
|
@ -435,7 +432,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
/*
|
||||
* Now that setup_per_cpu_areas() has allocated the runtime per-cpu
|
||||
* areas it is only safe to read the CPU0 boot-time area, and we must
|
||||
* reinitialize the offset to point to the runtime area.
|
||||
*/
|
||||
init_this_cpu_offset();
|
||||
|
||||
cpuinfo_store_boot_cpu();
|
||||
|
||||
/*
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
ARCH ?= $(shell uname -m 2>/dev/null || echo not)
|
||||
|
||||
ifneq (,$(filter $(ARCH),aarch64 arm64))
|
||||
ARM64_SUBTARGETS ?= tags signal pauth fp
|
||||
ARM64_SUBTARGETS ?= tags signal pauth fp mte
|
||||
else
|
||||
ARM64_SUBTARGETS :=
|
||||
endif
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
check_buffer_fill
|
||||
check_tags_inclusion
|
||||
check_child_memory
|
||||
check_mmap_options
|
||||
check_ksm_options
|
||||
check_user_mem
|
|
@ -0,0 +1,29 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Copyright (C) 2020 ARM Limited
|
||||
|
||||
CFLAGS += -std=gnu99 -I.
|
||||
SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
|
||||
PROGS := $(patsubst %.c,%,$(SRCS))
|
||||
|
||||
#Add mte compiler option
|
||||
ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
|
||||
CFLAGS += -march=armv8.5-a+memtag
|
||||
endif
|
||||
|
||||
#check if the compiler works well
|
||||
mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
|
||||
|
||||
ifeq ($(mte_cc_support),1)
|
||||
# Generated binaries to be installed by top KSFT script
|
||||
TEST_GEN_PROGS := $(PROGS)
|
||||
|
||||
# Get Kernel headers installed and use them.
|
||||
KSFT_KHDR_INSTALL := 1
|
||||
endif
|
||||
|
||||
# Include KSFT lib.mk.
|
||||
include ../../lib.mk
|
||||
|
||||
ifeq ($(mte_cc_support),1)
|
||||
$(TEST_GEN_PROGS): mte_common_util.c mte_common_util.h mte_helper.S
|
||||
endif
|
|
@ -0,0 +1,475 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2020 ARM Limited
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "kselftest.h"
|
||||
#include "mte_common_util.h"
|
||||
#include "mte_def.h"
|
||||
|
||||
#define OVERFLOW_RANGE MT_GRANULE_SIZE
|
||||
|
||||
static int sizes[] = {
|
||||
1, 555, 1033, MT_GRANULE_SIZE - 1, MT_GRANULE_SIZE,
|
||||
/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
|
||||
};
|
||||
|
||||
enum mte_block_test_alloc {
|
||||
UNTAGGED_TAGGED,
|
||||
TAGGED_UNTAGGED,
|
||||
TAGGED_TAGGED,
|
||||
BLOCK_ALLOC_MAX,
|
||||
};
|
||||
|
||||
static int check_buffer_by_byte(int mem_type, int mode)
|
||||
{
|
||||
char *ptr;
|
||||
int i, j, item;
|
||||
bool err;
|
||||
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
item = sizeof(sizes)/sizeof(int);
|
||||
|
||||
for (i = 0; i < item; i++) {
|
||||
ptr = (char *)mte_allocate_memory(sizes[i], mem_type, 0, true);
|
||||
if (check_allocated_memory(ptr, sizes[i], mem_type, true) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[i]);
|
||||
/* Set some value in tagged memory */
|
||||
for (j = 0; j < sizes[i]; j++)
|
||||
ptr[j] = '1';
|
||||
mte_wait_after_trig();
|
||||
err = cur_mte_cxt.fault_valid;
|
||||
/* Check the buffer whether it is filled. */
|
||||
for (j = 0; j < sizes[i] && !err; j++) {
|
||||
if (ptr[j] != '1')
|
||||
err = true;
|
||||
}
|
||||
mte_free_memory((void *)ptr, sizes[i], mem_type, true);
|
||||
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
if (!err)
|
||||
return KSFT_PASS;
|
||||
else
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
|
||||
static int check_buffer_underflow_by_byte(int mem_type, int mode,
|
||||
int underflow_range)
|
||||
{
|
||||
char *ptr;
|
||||
int i, j, item, last_index;
|
||||
bool err;
|
||||
char *und_ptr = NULL;
|
||||
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
item = sizeof(sizes)/sizeof(int);
|
||||
for (i = 0; i < item; i++) {
|
||||
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
|
||||
underflow_range, 0);
|
||||
if (check_allocated_memory_range(ptr, sizes[i], mem_type,
|
||||
underflow_range, 0) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, -underflow_range);
|
||||
last_index = 0;
|
||||
/* Set some value in tagged memory and make the buffer underflow */
|
||||
for (j = sizes[i] - 1; (j >= -underflow_range) &&
|
||||
(cur_mte_cxt.fault_valid == false); j--) {
|
||||
ptr[j] = '1';
|
||||
last_index = j;
|
||||
}
|
||||
mte_wait_after_trig();
|
||||
err = false;
|
||||
/* Check whether the buffer is filled */
|
||||
for (j = 0; j < sizes[i]; j++) {
|
||||
if (ptr[j] != '1') {
|
||||
err = true;
|
||||
ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%lx\n",
|
||||
j, ptr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (err)
|
||||
goto check_buffer_underflow_by_byte_err;
|
||||
|
||||
switch (mode) {
|
||||
case MTE_NONE_ERR:
|
||||
if (cur_mte_cxt.fault_valid == true || last_index != -underflow_range) {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
/* There were no fault so the underflow area should be filled */
|
||||
und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr - underflow_range);
|
||||
for (j = 0 ; j < underflow_range; j++) {
|
||||
if (und_ptr[j] != '1') {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case MTE_ASYNC_ERR:
|
||||
/* Imprecise fault should occur otherwise return error */
|
||||
if (cur_mte_cxt.fault_valid == false) {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* The imprecise fault is checked after the write to the buffer,
|
||||
* so the underflow area before the fault should be filled.
|
||||
*/
|
||||
und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr);
|
||||
for (j = last_index ; j < 0 ; j++) {
|
||||
if (und_ptr[j] != '1') {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case MTE_SYNC_ERR:
|
||||
/* Precise fault should occur otherwise return error */
|
||||
if (!cur_mte_cxt.fault_valid || (last_index != (-1))) {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
/* Underflow area should not be filled */
|
||||
und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr);
|
||||
if (und_ptr[-1] == '1')
|
||||
err = true;
|
||||
break;
|
||||
default:
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
check_buffer_underflow_by_byte_err:
|
||||
mte_free_memory_tag_range((void *)ptr, sizes[i], mem_type, underflow_range, 0);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
return (err ? KSFT_FAIL : KSFT_PASS);
|
||||
}
|
||||
|
||||
static int check_buffer_overflow_by_byte(int mem_type, int mode,
|
||||
int overflow_range)
|
||||
{
|
||||
char *ptr;
|
||||
int i, j, item, last_index;
|
||||
bool err;
|
||||
size_t tagged_size, overflow_size;
|
||||
char *over_ptr = NULL;
|
||||
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
item = sizeof(sizes)/sizeof(int);
|
||||
for (i = 0; i < item; i++) {
|
||||
ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
|
||||
0, overflow_range);
|
||||
if (check_allocated_memory_range(ptr, sizes[i], mem_type,
|
||||
0, overflow_range) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
tagged_size = MT_ALIGN_UP(sizes[i]);
|
||||
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[i] + overflow_range);
|
||||
|
||||
/* Set some value in tagged memory and make the buffer underflow */
|
||||
for (j = 0, last_index = 0 ; (j < (sizes[i] + overflow_range)) &&
|
||||
(cur_mte_cxt.fault_valid == false); j++) {
|
||||
ptr[j] = '1';
|
||||
last_index = j;
|
||||
}
|
||||
mte_wait_after_trig();
|
||||
err = false;
|
||||
/* Check whether the buffer is filled */
|
||||
for (j = 0; j < sizes[i]; j++) {
|
||||
if (ptr[j] != '1') {
|
||||
err = true;
|
||||
ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%lx\n",
|
||||
j, ptr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (err)
|
||||
goto check_buffer_overflow_by_byte_err;
|
||||
|
||||
overflow_size = overflow_range - (tagged_size - sizes[i]);
|
||||
|
||||
switch (mode) {
|
||||
case MTE_NONE_ERR:
|
||||
if ((cur_mte_cxt.fault_valid == true) ||
|
||||
(last_index != (sizes[i] + overflow_range - 1))) {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
/* There were no fault so the overflow area should be filled */
|
||||
over_ptr = (char *) MT_CLEAR_TAG((size_t) ptr + tagged_size);
|
||||
for (j = 0 ; j < overflow_size; j++) {
|
||||
if (over_ptr[j] != '1') {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case MTE_ASYNC_ERR:
|
||||
/* Imprecise fault should occur otherwise return error */
|
||||
if (cur_mte_cxt.fault_valid == false) {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* The imprecise fault is checked after the write to the buffer,
|
||||
* so the overflow area should be filled before the fault.
|
||||
*/
|
||||
over_ptr = (char *) MT_CLEAR_TAG((size_t) ptr);
|
||||
for (j = tagged_size ; j < last_index; j++) {
|
||||
if (over_ptr[j] != '1') {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case MTE_SYNC_ERR:
|
||||
/* Precise fault should occur otherwise return error */
|
||||
if (!cur_mte_cxt.fault_valid || (last_index != tagged_size)) {
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
/* Underflow area should not be filled */
|
||||
over_ptr = (char *) MT_CLEAR_TAG((size_t) ptr + tagged_size);
|
||||
for (j = 0 ; j < overflow_size; j++) {
|
||||
if (over_ptr[j] == '1')
|
||||
err = true;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
check_buffer_overflow_by_byte_err:
|
||||
mte_free_memory_tag_range((void *)ptr, sizes[i], mem_type, 0, overflow_range);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
return (err ? KSFT_FAIL : KSFT_PASS);
|
||||
}
|
||||
|
||||
static int check_buffer_by_block_iterate(int mem_type, int mode, size_t size)
|
||||
{
|
||||
char *src, *dst;
|
||||
int j, result = KSFT_PASS;
|
||||
enum mte_block_test_alloc alloc_type = UNTAGGED_TAGGED;
|
||||
|
||||
for (alloc_type = UNTAGGED_TAGGED; alloc_type < (int) BLOCK_ALLOC_MAX; alloc_type++) {
|
||||
switch (alloc_type) {
|
||||
case UNTAGGED_TAGGED:
|
||||
src = (char *)mte_allocate_memory(size, mem_type, 0, false);
|
||||
if (check_allocated_memory(src, size, mem_type, false) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
dst = (char *)mte_allocate_memory(size, mem_type, 0, true);
|
||||
if (check_allocated_memory(dst, size, mem_type, true) != KSFT_PASS) {
|
||||
mte_free_memory((void *)src, size, mem_type, false);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
|
||||
break;
|
||||
case TAGGED_UNTAGGED:
|
||||
dst = (char *)mte_allocate_memory(size, mem_type, 0, false);
|
||||
if (check_allocated_memory(dst, size, mem_type, false) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
src = (char *)mte_allocate_memory(size, mem_type, 0, true);
|
||||
if (check_allocated_memory(src, size, mem_type, true) != KSFT_PASS) {
|
||||
mte_free_memory((void *)dst, size, mem_type, false);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
break;
|
||||
case TAGGED_TAGGED:
|
||||
src = (char *)mte_allocate_memory(size, mem_type, 0, true);
|
||||
if (check_allocated_memory(src, size, mem_type, true) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
dst = (char *)mte_allocate_memory(size, mem_type, 0, true);
|
||||
if (check_allocated_memory(dst, size, mem_type, true) != KSFT_PASS) {
|
||||
mte_free_memory((void *)src, size, mem_type, true);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
|
||||
cur_mte_cxt.fault_valid = false;
|
||||
result = KSFT_PASS;
|
||||
mte_initialize_current_context(mode, (uintptr_t)dst, size);
|
||||
/* Set some value in memory and copy*/
|
||||
memset((void *)src, (int)'1', size);
|
||||
memcpy((void *)dst, (void *)src, size);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid) {
|
||||
result = KSFT_FAIL;
|
||||
goto check_buffer_by_block_err;
|
||||
}
|
||||
/* Check the buffer whether it is filled. */
|
||||
for (j = 0; j < size; j++) {
|
||||
if (src[j] != dst[j] || src[j] != '1') {
|
||||
result = KSFT_FAIL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
check_buffer_by_block_err:
|
||||
mte_free_memory((void *)src, size, mem_type,
|
||||
MT_FETCH_TAG((uintptr_t)src) ? true : false);
|
||||
mte_free_memory((void *)dst, size, mem_type,
|
||||
MT_FETCH_TAG((uintptr_t)dst) ? true : false);
|
||||
if (result != KSFT_PASS)
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static int check_buffer_by_block(int mem_type, int mode)
|
||||
{
|
||||
int i, item, result = KSFT_PASS;
|
||||
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
item = sizeof(sizes)/sizeof(int);
|
||||
cur_mte_cxt.fault_valid = false;
|
||||
for (i = 0; i < item; i++) {
|
||||
result = check_buffer_by_block_iterate(mem_type, mode, sizes[i]);
|
||||
if (result != KSFT_PASS)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static int compare_memory_tags(char *ptr, size_t size, int tag)
|
||||
{
|
||||
int i, new_tag;
|
||||
|
||||
for (i = 0 ; i < size ; i += MT_GRANULE_SIZE) {
|
||||
new_tag = MT_FETCH_TAG((uintptr_t)(mte_get_tag_address(ptr + i)));
|
||||
if (tag != new_tag) {
|
||||
ksft_print_msg("FAIL: child mte tag mismatch\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
}
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
static int check_memory_initial_tags(int mem_type, int mode, int mapping)
|
||||
{
|
||||
char *ptr;
|
||||
int run, fd;
|
||||
int total = sizeof(sizes)/sizeof(int);
|
||||
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
for (run = 0; run < total; run++) {
|
||||
/* check initial tags for anonymous mmap */
|
||||
ptr = (char *)mte_allocate_memory(sizes[run], mem_type, mapping, false);
|
||||
if (check_allocated_memory(ptr, sizes[run], mem_type, false) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
if (compare_memory_tags(ptr, sizes[run], 0) != KSFT_PASS) {
|
||||
mte_free_memory((void *)ptr, sizes[run], mem_type, false);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
mte_free_memory((void *)ptr, sizes[run], mem_type, false);
|
||||
|
||||
/* check initial tags for file mmap */
|
||||
fd = create_temp_file();
|
||||
if (fd == -1)
|
||||
return KSFT_FAIL;
|
||||
ptr = (char *)mte_allocate_file_memory(sizes[run], mem_type, mapping, false, fd);
|
||||
if (check_allocated_memory(ptr, sizes[run], mem_type, false) != KSFT_PASS) {
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
if (compare_memory_tags(ptr, sizes[run], 0) != KSFT_PASS) {
|
||||
mte_free_memory((void *)ptr, sizes[run], mem_type, false);
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
mte_free_memory((void *)ptr, sizes[run], mem_type, false);
|
||||
close(fd);
|
||||
}
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int err;
|
||||
size_t page_size = getpagesize();
|
||||
int item = sizeof(sizes)/sizeof(int);
|
||||
|
||||
sizes[item - 3] = page_size - 1;
|
||||
sizes[item - 2] = page_size;
|
||||
sizes[item - 1] = page_size + 1;
|
||||
|
||||
err = mte_default_setup();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Register SIGSEGV handler */
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
|
||||
/* Buffer by byte tests */
|
||||
evaluate_test(check_buffer_by_byte(USE_MMAP, MTE_SYNC_ERR),
|
||||
"Check buffer correctness by byte with sync err mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_by_byte(USE_MMAP, MTE_ASYNC_ERR),
|
||||
"Check buffer correctness by byte with async err mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_by_byte(USE_MPROTECT, MTE_SYNC_ERR),
|
||||
"Check buffer correctness by byte with sync err mode and mmap/mprotect memory\n");
|
||||
evaluate_test(check_buffer_by_byte(USE_MPROTECT, MTE_ASYNC_ERR),
|
||||
"Check buffer correctness by byte with async err mode and mmap/mprotect memory\n");
|
||||
|
||||
/* Check buffer underflow with underflow size as 16 */
|
||||
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_SYNC_ERR, MT_GRANULE_SIZE),
|
||||
"Check buffer write underflow by byte with sync mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, MT_GRANULE_SIZE),
|
||||
"Check buffer write underflow by byte with async mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_NONE_ERR, MT_GRANULE_SIZE),
|
||||
"Check buffer write underflow by byte with tag check fault ignore and mmap memory\n");
|
||||
|
||||
/* Check buffer underflow with underflow size as page size */
|
||||
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_SYNC_ERR, page_size),
|
||||
"Check buffer write underflow by byte with sync mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, page_size),
|
||||
"Check buffer write underflow by byte with async mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_NONE_ERR, page_size),
|
||||
"Check buffer write underflow by byte with tag check fault ignore and mmap memory\n");
|
||||
|
||||
/* Check buffer overflow with overflow size as 16 */
|
||||
evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_SYNC_ERR, MT_GRANULE_SIZE),
|
||||
"Check buffer write overflow by byte with sync mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, MT_GRANULE_SIZE),
|
||||
"Check buffer write overflow by byte with async mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_NONE_ERR, MT_GRANULE_SIZE),
|
||||
"Check buffer write overflow by byte with tag fault ignore mode and mmap memory\n");
|
||||
|
||||
/* Buffer by block tests */
|
||||
evaluate_test(check_buffer_by_block(USE_MMAP, MTE_SYNC_ERR),
|
||||
"Check buffer write correctness by block with sync mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_by_block(USE_MMAP, MTE_ASYNC_ERR),
|
||||
"Check buffer write correctness by block with async mode and mmap memory\n");
|
||||
evaluate_test(check_buffer_by_block(USE_MMAP, MTE_NONE_ERR),
|
||||
"Check buffer write correctness by block with tag fault ignore and mmap memory\n");
|
||||
|
||||
/* Initial tags are supposed to be 0 */
|
||||
evaluate_test(check_memory_initial_tags(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check initial tags with private mapping, sync error mode and mmap memory\n");
|
||||
evaluate_test(check_memory_initial_tags(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check initial tags with private mapping, sync error mode and mmap/mprotect memory\n");
|
||||
evaluate_test(check_memory_initial_tags(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
|
||||
"Check initial tags with shared mapping, sync error mode and mmap memory\n");
|
||||
evaluate_test(check_memory_initial_tags(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED),
|
||||
"Check initial tags with shared mapping, sync error mode and mmap/mprotect memory\n");
|
||||
|
||||
mte_restore_setup();
|
||||
ksft_print_cnts();
|
||||
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
|
||||
}
|
|
@ -0,0 +1,195 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2020 ARM Limited
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ucontext.h>
|
||||
#include <sys/wait.h>
|
||||
|
||||
#include "kselftest.h"
|
||||
#include "mte_common_util.h"
|
||||
#include "mte_def.h"
|
||||
|
||||
#define BUFFER_SIZE (5 * MT_GRANULE_SIZE)
|
||||
#define RUNS (MT_TAG_COUNT)
|
||||
#define UNDERFLOW MT_GRANULE_SIZE
|
||||
#define OVERFLOW MT_GRANULE_SIZE
|
||||
|
||||
static size_t page_size;
|
||||
static int sizes[] = {
|
||||
1, 537, 989, 1269, MT_GRANULE_SIZE - 1, MT_GRANULE_SIZE,
|
||||
/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
|
||||
};
|
||||
|
||||
static int check_child_tag_inheritance(char *ptr, int size, int mode)
|
||||
{
|
||||
int i, parent_tag, child_tag, fault, child_status;
|
||||
pid_t child;
|
||||
|
||||
parent_tag = MT_FETCH_TAG((uintptr_t)ptr);
|
||||
fault = 0;
|
||||
|
||||
child = fork();
|
||||
if (child == -1) {
|
||||
ksft_print_msg("FAIL: child process creation\n");
|
||||
return KSFT_FAIL;
|
||||
} else if (child == 0) {
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, size);
|
||||
/* Do copy on write */
|
||||
memset(ptr, '1', size);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid == true) {
|
||||
fault = 1;
|
||||
goto check_child_tag_inheritance_err;
|
||||
}
|
||||
for (i = 0 ; i < size ; i += MT_GRANULE_SIZE) {
|
||||
child_tag = MT_FETCH_TAG((uintptr_t)(mte_get_tag_address(ptr + i)));
|
||||
if (parent_tag != child_tag) {
|
||||
ksft_print_msg("FAIL: child mte tag mismatch\n");
|
||||
fault = 1;
|
||||
goto check_child_tag_inheritance_err;
|
||||
}
|
||||
}
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, -UNDERFLOW);
|
||||
memset(ptr - UNDERFLOW, '2', UNDERFLOW);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid == false) {
|
||||
fault = 1;
|
||||
goto check_child_tag_inheritance_err;
|
||||
}
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, size + OVERFLOW);
|
||||
memset(ptr + size, '3', OVERFLOW);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid == false) {
|
||||
fault = 1;
|
||||
goto check_child_tag_inheritance_err;
|
||||
}
|
||||
check_child_tag_inheritance_err:
|
||||
_exit(fault);
|
||||
}
|
||||
/* Wait for child process to terminate */
|
||||
wait(&child_status);
|
||||
if (WIFEXITED(child_status))
|
||||
fault = WEXITSTATUS(child_status);
|
||||
else
|
||||
fault = 1;
|
||||
return (fault) ? KSFT_FAIL : KSFT_PASS;
|
||||
}
|
||||
|
||||
static int check_child_memory_mapping(int mem_type, int mode, int mapping)
|
||||
{
|
||||
char *ptr;
|
||||
int run, result;
|
||||
int item = sizeof(sizes)/sizeof(int);
|
||||
|
||||
item = sizeof(sizes)/sizeof(int);
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
for (run = 0; run < item; run++) {
|
||||
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
|
||||
UNDERFLOW, OVERFLOW);
|
||||
if (check_allocated_memory_range(ptr, sizes[run], mem_type,
|
||||
UNDERFLOW, OVERFLOW) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
result = check_child_tag_inheritance(ptr, sizes[run], mode);
|
||||
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
|
||||
if (result == KSFT_FAIL)
|
||||
return result;
|
||||
}
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
static int check_child_file_mapping(int mem_type, int mode, int mapping)
|
||||
{
|
||||
char *ptr, *map_ptr;
|
||||
int run, fd, map_size, result = KSFT_PASS;
|
||||
int total = sizeof(sizes)/sizeof(int);
|
||||
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
for (run = 0; run < total; run++) {
|
||||
fd = create_temp_file();
|
||||
if (fd == -1)
|
||||
return KSFT_FAIL;
|
||||
|
||||
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
|
||||
map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd);
|
||||
if (check_allocated_memory(map_ptr, map_size, mem_type, false) != KSFT_PASS) {
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
ptr = map_ptr + UNDERFLOW;
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]);
|
||||
/* Only mte enabled memory will allow tag insertion */
|
||||
ptr = mte_insert_tags((void *)ptr, sizes[run]);
|
||||
if (!ptr || cur_mte_cxt.fault_valid == true) {
|
||||
ksft_print_msg("FAIL: Insert tags on file based memory\n");
|
||||
munmap((void *)map_ptr, map_size);
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
result = check_child_tag_inheritance(ptr, sizes[run], mode);
|
||||
mte_clear_tags((void *)ptr, sizes[run]);
|
||||
munmap((void *)map_ptr, map_size);
|
||||
close(fd);
|
||||
if (result != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int err;
|
||||
int item = sizeof(sizes)/sizeof(int);
|
||||
|
||||
page_size = getpagesize();
|
||||
if (!page_size) {
|
||||
ksft_print_msg("ERR: Unable to get page size\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
sizes[item - 3] = page_size - 1;
|
||||
sizes[item - 2] = page_size;
|
||||
sizes[item - 1] = page_size + 1;
|
||||
|
||||
err = mte_default_setup();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Register SIGSEGV handler */
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
mte_register_signal(SIGBUS, mte_default_handler);
|
||||
|
||||
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check child anonymous memory with private mapping, precise mode and mmap memory\n");
|
||||
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
|
||||
"Check child anonymous memory with shared mapping, precise mode and mmap memory\n");
|
||||
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
|
||||
"Check child anonymous memory with private mapping, imprecise mode and mmap memory\n");
|
||||
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
|
||||
"Check child anonymous memory with shared mapping, imprecise mode and mmap memory\n");
|
||||
evaluate_test(check_child_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check child anonymous memory with private mapping, precise mode and mmap/mprotect memory\n");
|
||||
evaluate_test(check_child_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED),
|
||||
"Check child anonymous memory with shared mapping, precise mode and mmap/mprotect memory\n");
|
||||
|
||||
evaluate_test(check_child_file_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check child file memory with private mapping, precise mode and mmap memory\n");
|
||||
evaluate_test(check_child_file_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
|
||||
"Check child file memory with shared mapping, precise mode and mmap memory\n");
|
||||
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
|
||||
"Check child file memory with private mapping, imprecise mode and mmap memory\n");
|
||||
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
|
||||
"Check child file memory with shared mapping, imprecise mode and mmap memory\n");
|
||||
evaluate_test(check_child_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check child file memory with private mapping, precise mode and mmap/mprotect memory\n");
|
||||
evaluate_test(check_child_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED),
|
||||
"Check child file memory with shared mapping, precise mode and mmap/mprotect memory\n");
|
||||
|
||||
mte_restore_setup();
|
||||
ksft_print_cnts();
|
||||
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2020 ARM Limited
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ucontext.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "kselftest.h"
|
||||
#include "mte_common_util.h"
|
||||
#include "mte_def.h"
|
||||
|
||||
#define TEST_UNIT 10
|
||||
#define PATH_KSM "/sys/kernel/mm/ksm/"
|
||||
#define MAX_LOOP 4
|
||||
|
||||
static size_t page_sz;
|
||||
static unsigned long ksm_sysfs[5];
|
||||
|
||||
static unsigned long read_sysfs(char *str)
|
||||
{
|
||||
FILE *f;
|
||||
unsigned long val = 0;
|
||||
|
||||
f = fopen(str, "r");
|
||||
if (!f) {
|
||||
ksft_print_msg("ERR: missing %s\n", str);
|
||||
return 0;
|
||||
}
|
||||
fscanf(f, "%lu", &val);
|
||||
fclose(f);
|
||||
return val;
|
||||
}
|
||||
|
||||
static void write_sysfs(char *str, unsigned long val)
|
||||
{
|
||||
FILE *f;
|
||||
|
||||
f = fopen(str, "w");
|
||||
if (!f) {
|
||||
ksft_print_msg("ERR: missing %s\n", str);
|
||||
return;
|
||||
}
|
||||
fprintf(f, "%lu", val);
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
static void mte_ksm_setup(void)
|
||||
{
|
||||
ksm_sysfs[0] = read_sysfs(PATH_KSM "merge_across_nodes");
|
||||
write_sysfs(PATH_KSM "merge_across_nodes", 1);
|
||||
ksm_sysfs[1] = read_sysfs(PATH_KSM "sleep_millisecs");
|
||||
write_sysfs(PATH_KSM "sleep_millisecs", 0);
|
||||
ksm_sysfs[2] = read_sysfs(PATH_KSM "run");
|
||||
write_sysfs(PATH_KSM "run", 1);
|
||||
ksm_sysfs[3] = read_sysfs(PATH_KSM "max_page_sharing");
|
||||
write_sysfs(PATH_KSM "max_page_sharing", ksm_sysfs[3] + TEST_UNIT);
|
||||
ksm_sysfs[4] = read_sysfs(PATH_KSM "pages_to_scan");
|
||||
write_sysfs(PATH_KSM "pages_to_scan", ksm_sysfs[4] + TEST_UNIT);
|
||||
}
|
||||
|
||||
static void mte_ksm_restore(void)
|
||||
{
|
||||
write_sysfs(PATH_KSM "merge_across_nodes", ksm_sysfs[0]);
|
||||
write_sysfs(PATH_KSM "sleep_millisecs", ksm_sysfs[1]);
|
||||
write_sysfs(PATH_KSM "run", ksm_sysfs[2]);
|
||||
write_sysfs(PATH_KSM "max_page_sharing", ksm_sysfs[3]);
|
||||
write_sysfs(PATH_KSM "pages_to_scan", ksm_sysfs[4]);
|
||||
}
|
||||
|
||||
static void mte_ksm_scan(void)
|
||||
{
|
||||
int cur_count = read_sysfs(PATH_KSM "full_scans");
|
||||
int scan_count = cur_count + 1;
|
||||
int max_loop_count = MAX_LOOP;
|
||||
|
||||
while ((cur_count < scan_count) && max_loop_count) {
|
||||
sleep(1);
|
||||
cur_count = read_sysfs(PATH_KSM "full_scans");
|
||||
max_loop_count--;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
ksft_print_msg("INFO: pages_shared=%lu pages_sharing=%lu\n",
|
||||
read_sysfs(PATH_KSM "pages_shared"),
|
||||
read_sysfs(PATH_KSM "pages_sharing"));
|
||||
#endif
|
||||
}
|
||||
|
||||
static int check_madvise_options(int mem_type, int mode, int mapping)
|
||||
{
|
||||
char *ptr;
|
||||
int err, ret;
|
||||
|
||||
err = KSFT_FAIL;
|
||||
if (access(PATH_KSM, F_OK) == -1) {
|
||||
ksft_print_msg("ERR: Kernel KSM config not enabled\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
ptr = mte_allocate_memory(TEST_UNIT * page_sz, mem_type, mapping, true);
|
||||
if (check_allocated_memory(ptr, TEST_UNIT * page_sz, mem_type, false) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
/* Insert same data in all the pages */
|
||||
memset(ptr, 'A', TEST_UNIT * page_sz);
|
||||
ret = madvise(ptr, TEST_UNIT * page_sz, MADV_MERGEABLE);
|
||||
if (ret) {
|
||||
ksft_print_msg("ERR: madvise failed to set MADV_UNMERGEABLE\n");
|
||||
goto madvise_err;
|
||||
}
|
||||
mte_ksm_scan();
|
||||
/* Tagged pages should not merge */
|
||||
if ((read_sysfs(PATH_KSM "pages_shared") < 1) ||
|
||||
(read_sysfs(PATH_KSM "pages_sharing") < (TEST_UNIT - 1)))
|
||||
err = KSFT_PASS;
|
||||
madvise_err:
|
||||
mte_free_memory(ptr, TEST_UNIT * page_sz, mem_type, true);
|
||||
return err;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mte_default_setup();
|
||||
if (err)
|
||||
return err;
|
||||
page_sz = getpagesize();
|
||||
if (!page_sz) {
|
||||
ksft_print_msg("ERR: Unable to get page size\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
/* Register signal handlers */
|
||||
mte_register_signal(SIGBUS, mte_default_handler);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
/* Enable KSM */
|
||||
mte_ksm_setup();
|
||||
|
||||
evaluate_test(check_madvise_options(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check KSM mte page merge for private mapping, sync mode and mmap memory\n");
|
||||
evaluate_test(check_madvise_options(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
|
||||
"Check KSM mte page merge for private mapping, async mode and mmap memory\n");
|
||||
evaluate_test(check_madvise_options(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
|
||||
"Check KSM mte page merge for shared mapping, sync mode and mmap memory\n");
|
||||
evaluate_test(check_madvise_options(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
|
||||
"Check KSM mte page merge for shared mapping, async mode and mmap memory\n");
|
||||
|
||||
mte_ksm_restore();
|
||||
mte_restore_setup();
|
||||
ksft_print_cnts();
|
||||
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
|
||||
}
|
|
@ -0,0 +1,262 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2020 ARM Limited
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ucontext.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "kselftest.h"
|
||||
#include "mte_common_util.h"
|
||||
#include "mte_def.h"
|
||||
|
||||
#define RUNS (MT_TAG_COUNT)
|
||||
#define UNDERFLOW MT_GRANULE_SIZE
|
||||
#define OVERFLOW MT_GRANULE_SIZE
|
||||
#define TAG_CHECK_ON 0
|
||||
#define TAG_CHECK_OFF 1
|
||||
|
||||
static size_t page_size;
|
||||
static int sizes[] = {
|
||||
1, 537, 989, 1269, MT_GRANULE_SIZE - 1, MT_GRANULE_SIZE,
|
||||
/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
|
||||
};
|
||||
|
||||
static int check_mte_memory(char *ptr, int size, int mode, int tag_check)
|
||||
{
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, size);
|
||||
memset(ptr, '1', size);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid == true)
|
||||
return KSFT_FAIL;
|
||||
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, -UNDERFLOW);
|
||||
memset(ptr - UNDERFLOW, '2', UNDERFLOW);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid == false && tag_check == TAG_CHECK_ON)
|
||||
return KSFT_FAIL;
|
||||
if (cur_mte_cxt.fault_valid == true && tag_check == TAG_CHECK_OFF)
|
||||
return KSFT_FAIL;
|
||||
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, size + OVERFLOW);
|
||||
memset(ptr + size, '3', OVERFLOW);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid == false && tag_check == TAG_CHECK_ON)
|
||||
return KSFT_FAIL;
|
||||
if (cur_mte_cxt.fault_valid == true && tag_check == TAG_CHECK_OFF)
|
||||
return KSFT_FAIL;
|
||||
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
|
||||
{
|
||||
char *ptr, *map_ptr;
|
||||
int run, result, map_size;
|
||||
int item = sizeof(sizes)/sizeof(int);
|
||||
|
||||
item = sizeof(sizes)/sizeof(int);
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
for (run = 0; run < item; run++) {
|
||||
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
|
||||
map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false);
|
||||
if (check_allocated_memory(map_ptr, map_size, mem_type, false) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
ptr = map_ptr + UNDERFLOW;
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]);
|
||||
/* Only mte enabled memory will allow tag insertion */
|
||||
ptr = mte_insert_tags((void *)ptr, sizes[run]);
|
||||
if (!ptr || cur_mte_cxt.fault_valid == true) {
|
||||
ksft_print_msg("FAIL: Insert tags on anonymous mmap memory\n");
|
||||
munmap((void *)map_ptr, map_size);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
result = check_mte_memory(ptr, sizes[run], mode, tag_check);
|
||||
mte_clear_tags((void *)ptr, sizes[run]);
|
||||
mte_free_memory((void *)map_ptr, map_size, mem_type, false);
|
||||
if (result == KSFT_FAIL)
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
|
||||
{
|
||||
char *ptr, *map_ptr;
|
||||
int run, fd, map_size;
|
||||
int total = sizeof(sizes)/sizeof(int);
|
||||
int result = KSFT_PASS;
|
||||
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
for (run = 0; run < total; run++) {
|
||||
fd = create_temp_file();
|
||||
if (fd == -1)
|
||||
return KSFT_FAIL;
|
||||
|
||||
map_size = sizes[run] + UNDERFLOW + OVERFLOW;
|
||||
map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd);
|
||||
if (check_allocated_memory(map_ptr, map_size, mem_type, false) != KSFT_PASS) {
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
ptr = map_ptr + UNDERFLOW;
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]);
|
||||
/* Only mte enabled memory will allow tag insertion */
|
||||
ptr = mte_insert_tags((void *)ptr, sizes[run]);
|
||||
if (!ptr || cur_mte_cxt.fault_valid == true) {
|
||||
ksft_print_msg("FAIL: Insert tags on file based memory\n");
|
||||
munmap((void *)map_ptr, map_size);
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
result = check_mte_memory(ptr, sizes[run], mode, tag_check);
|
||||
mte_clear_tags((void *)ptr, sizes[run]);
|
||||
munmap((void *)map_ptr, map_size);
|
||||
close(fd);
|
||||
if (result == KSFT_FAIL)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
|
||||
{
|
||||
char *ptr, *map_ptr;
|
||||
int run, prot_flag, result, fd, map_size;
|
||||
int total = sizeof(sizes)/sizeof(int);
|
||||
|
||||
prot_flag = PROT_READ | PROT_WRITE;
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
for (run = 0; run < total; run++) {
|
||||
map_size = sizes[run] + OVERFLOW + UNDERFLOW;
|
||||
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
|
||||
UNDERFLOW, OVERFLOW);
|
||||
if (check_allocated_memory_range(ptr, sizes[run], mem_type,
|
||||
UNDERFLOW, OVERFLOW) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
map_ptr = ptr - UNDERFLOW;
|
||||
/* Try to clear PROT_MTE property and verify it by tag checking */
|
||||
if (mprotect(map_ptr, map_size, prot_flag)) {
|
||||
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type,
|
||||
UNDERFLOW, OVERFLOW);
|
||||
ksft_print_msg("FAIL: mprotect not ignoring clear PROT_MTE property\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
|
||||
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
|
||||
if (result != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
fd = create_temp_file();
|
||||
if (fd == -1)
|
||||
return KSFT_FAIL;
|
||||
ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping,
|
||||
UNDERFLOW, OVERFLOW, fd);
|
||||
if (check_allocated_memory_range(ptr, sizes[run], mem_type,
|
||||
UNDERFLOW, OVERFLOW) != KSFT_PASS) {
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
map_ptr = ptr - UNDERFLOW;
|
||||
/* Try to clear PROT_MTE property and verify it by tag checking */
|
||||
if (mprotect(map_ptr, map_size, prot_flag)) {
|
||||
ksft_print_msg("FAIL: mprotect not ignoring clear PROT_MTE property\n");
|
||||
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type,
|
||||
UNDERFLOW, OVERFLOW);
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
|
||||
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
|
||||
close(fd);
|
||||
if (result != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int err;
|
||||
int item = sizeof(sizes)/sizeof(int);
|
||||
|
||||
err = mte_default_setup();
|
||||
if (err)
|
||||
return err;
|
||||
page_size = getpagesize();
|
||||
if (!page_size) {
|
||||
ksft_print_msg("ERR: Unable to get page size\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
sizes[item - 3] = page_size - 1;
|
||||
sizes[item - 2] = page_size;
|
||||
sizes[item - 1] = page_size + 1;
|
||||
|
||||
/* Register signal handlers */
|
||||
mte_register_signal(SIGBUS, mte_default_handler);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
|
||||
mte_enable_pstate_tco();
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
|
||||
"Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
|
||||
"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check off\n");
|
||||
|
||||
mte_disable_pstate_tco();
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
|
||||
"Check anonymous memory with private mapping, no error mode, mmap memory and tag check off\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
|
||||
"Check file memory with private mapping, no error mode, mmap/mprotect memory and tag check off\n");
|
||||
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check anonymous memory with private mapping, sync error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check anonymous memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check anonymous memory with shared mapping, sync error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check anonymous memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check anonymous memory with private mapping, async error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check anonymous memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check anonymous memory with shared mapping, async error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check anonymous memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
|
||||
|
||||
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check file memory with private mapping, sync error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check file memory with shared mapping, sync error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check file memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check file memory with private mapping, async error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check file memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check file memory with shared mapping, async error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check file memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
|
||||
|
||||
evaluate_test(check_clear_prot_mte_flag(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check clear PROT_MTE flags with private mapping, sync error mode and mmap memory\n");
|
||||
evaluate_test(check_clear_prot_mte_flag(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check clear PROT_MTE flags with private mapping and sync error mode and mmap/mprotect memory\n");
|
||||
|
||||
mte_restore_setup();
|
||||
ksft_print_cnts();
|
||||
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
|
||||
}
|
|
@ -0,0 +1,185 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2020 ARM Limited
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ucontext.h>
|
||||
#include <sys/wait.h>
|
||||
|
||||
#include "kselftest.h"
|
||||
#include "mte_common_util.h"
|
||||
#include "mte_def.h"
|
||||
|
||||
#define BUFFER_SIZE (5 * MT_GRANULE_SIZE)
|
||||
#define RUNS (MT_TAG_COUNT * 2)
|
||||
#define MTE_LAST_TAG_MASK (0x7FFF)
|
||||
|
||||
static int verify_mte_pointer_validity(char *ptr, int mode)
|
||||
{
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE);
|
||||
/* Check the validity of the tagged pointer */
|
||||
memset((void *)ptr, '1', BUFFER_SIZE);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid)
|
||||
return KSFT_FAIL;
|
||||
/* Proceed further for nonzero tags */
|
||||
if (!MT_FETCH_TAG((uintptr_t)ptr))
|
||||
return KSFT_PASS;
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE + 1);
|
||||
/* Check the validity outside the range */
|
||||
ptr[BUFFER_SIZE] = '2';
|
||||
mte_wait_after_trig();
|
||||
if (!cur_mte_cxt.fault_valid)
|
||||
return KSFT_FAIL;
|
||||
else
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
static int check_single_included_tags(int mem_type, int mode)
|
||||
{
|
||||
char *ptr;
|
||||
int tag, run, result = KSFT_PASS;
|
||||
|
||||
ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
|
||||
if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE,
|
||||
mem_type, false) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
for (tag = 0; (tag < MT_TAG_COUNT) && (result == KSFT_PASS); tag++) {
|
||||
mte_switch_mode(mode, MT_INCLUDE_VALID_TAG(tag));
|
||||
/* Try to catch a excluded tag by a number of tries. */
|
||||
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
|
||||
ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
|
||||
/* Check tag value */
|
||||
if (MT_FETCH_TAG((uintptr_t)ptr) == tag) {
|
||||
ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n",
|
||||
MT_FETCH_TAG((uintptr_t)ptr),
|
||||
MT_INCLUDE_VALID_TAG(tag));
|
||||
result = KSFT_FAIL;
|
||||
break;
|
||||
}
|
||||
result = verify_mte_pointer_validity(ptr, mode);
|
||||
}
|
||||
}
|
||||
mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
|
||||
return result;
|
||||
}
|
||||
|
||||
static int check_multiple_included_tags(int mem_type, int mode)
|
||||
{
|
||||
char *ptr;
|
||||
int tag, run, result = KSFT_PASS;
|
||||
unsigned long excl_mask = 0;
|
||||
|
||||
ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
|
||||
if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE,
|
||||
mem_type, false) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
for (tag = 0; (tag < MT_TAG_COUNT - 1) && (result == KSFT_PASS); tag++) {
|
||||
excl_mask |= 1 << tag;
|
||||
mte_switch_mode(mode, MT_INCLUDE_VALID_TAGS(excl_mask));
|
||||
/* Try to catch a excluded tag by a number of tries. */
|
||||
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
|
||||
ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
|
||||
/* Check tag value */
|
||||
if (MT_FETCH_TAG((uintptr_t)ptr) < tag) {
|
||||
ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n",
|
||||
MT_FETCH_TAG((uintptr_t)ptr),
|
||||
MT_INCLUDE_VALID_TAGS(excl_mask));
|
||||
result = KSFT_FAIL;
|
||||
break;
|
||||
}
|
||||
result = verify_mte_pointer_validity(ptr, mode);
|
||||
}
|
||||
}
|
||||
mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
|
||||
return result;
|
||||
}
|
||||
|
||||
static int check_all_included_tags(int mem_type, int mode)
|
||||
{
|
||||
char *ptr;
|
||||
int run, result = KSFT_PASS;
|
||||
|
||||
ptr = (char *)mte_allocate_memory(BUFFER_SIZE + MT_GRANULE_SIZE, mem_type, 0, false);
|
||||
if (check_allocated_memory(ptr, BUFFER_SIZE + MT_GRANULE_SIZE,
|
||||
mem_type, false) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
mte_switch_mode(mode, MT_INCLUDE_TAG_MASK);
|
||||
/* Try to catch a excluded tag by a number of tries. */
|
||||
for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) {
|
||||
ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
|
||||
/*
|
||||
* Here tag byte can be between 0x0 to 0xF (full allowed range)
|
||||
* so no need to match so just verify if it is writable.
|
||||
*/
|
||||
result = verify_mte_pointer_validity(ptr, mode);
|
||||
}
|
||||
mte_free_memory_tag_range((void *)ptr, BUFFER_SIZE, mem_type, 0, MT_GRANULE_SIZE);
|
||||
return result;
|
||||
}
|
||||
|
||||
static int check_none_included_tags(int mem_type, int mode)
|
||||
{
|
||||
char *ptr;
|
||||
int run;
|
||||
|
||||
ptr = (char *)mte_allocate_memory(BUFFER_SIZE, mem_type, 0, false);
|
||||
if (check_allocated_memory(ptr, BUFFER_SIZE, mem_type, false) != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
|
||||
mte_switch_mode(mode, MT_EXCLUDE_TAG_MASK);
|
||||
/* Try to catch a excluded tag by a number of tries. */
|
||||
for (run = 0; run < RUNS; run++) {
|
||||
ptr = (char *)mte_insert_tags(ptr, BUFFER_SIZE);
|
||||
/* Here all tags exluded so tag value generated should be 0 */
|
||||
if (MT_FETCH_TAG((uintptr_t)ptr)) {
|
||||
ksft_print_msg("FAIL: included tag value found\n");
|
||||
mte_free_memory((void *)ptr, BUFFER_SIZE, mem_type, true);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, BUFFER_SIZE);
|
||||
/* Check the write validity of the untagged pointer */
|
||||
memset((void *)ptr, '1', BUFFER_SIZE);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid)
|
||||
break;
|
||||
}
|
||||
mte_free_memory((void *)ptr, BUFFER_SIZE, mem_type, false);
|
||||
if (cur_mte_cxt.fault_valid)
|
||||
return KSFT_FAIL;
|
||||
else
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mte_default_setup();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Register SIGSEGV handler */
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
|
||||
evaluate_test(check_single_included_tags(USE_MMAP, MTE_SYNC_ERR),
|
||||
"Check an included tag value with sync mode\n");
|
||||
evaluate_test(check_multiple_included_tags(USE_MMAP, MTE_SYNC_ERR),
|
||||
"Check different included tags value with sync mode\n");
|
||||
evaluate_test(check_none_included_tags(USE_MMAP, MTE_SYNC_ERR),
|
||||
"Check none included tags value with sync mode\n");
|
||||
evaluate_test(check_all_included_tags(USE_MMAP, MTE_SYNC_ERR),
|
||||
"Check all included tags value with sync mode\n");
|
||||
|
||||
mte_restore_setup();
|
||||
ksft_print_cnts();
|
||||
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2020 ARM Limited
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <ucontext.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "kselftest.h"
|
||||
#include "mte_common_util.h"
|
||||
#include "mte_def.h"
|
||||
|
||||
static size_t page_sz;
|
||||
|
||||
static int check_usermem_access_fault(int mem_type, int mode, int mapping)
|
||||
{
|
||||
int fd, i, err;
|
||||
char val = 'A';
|
||||
size_t len, read_len;
|
||||
void *ptr, *ptr_next;
|
||||
|
||||
err = KSFT_FAIL;
|
||||
len = 2 * page_sz;
|
||||
mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
fd = create_temp_file();
|
||||
if (fd == -1)
|
||||
return KSFT_FAIL;
|
||||
for (i = 0; i < len; i++)
|
||||
write(fd, &val, sizeof(val));
|
||||
lseek(fd, 0, 0);
|
||||
ptr = mte_allocate_memory(len, mem_type, mapping, true);
|
||||
if (check_allocated_memory(ptr, len, mem_type, true) != KSFT_PASS) {
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, len);
|
||||
/* Copy from file into buffer with valid tag */
|
||||
read_len = read(fd, ptr, len);
|
||||
mte_wait_after_trig();
|
||||
if (cur_mte_cxt.fault_valid || read_len < len)
|
||||
goto usermem_acc_err;
|
||||
/* Verify same pattern is read */
|
||||
for (i = 0; i < len; i++)
|
||||
if (*(char *)(ptr + i) != val)
|
||||
break;
|
||||
if (i < len)
|
||||
goto usermem_acc_err;
|
||||
|
||||
/* Tag the next half of memory with different value */
|
||||
ptr_next = (void *)((unsigned long)ptr + page_sz);
|
||||
ptr_next = mte_insert_new_tag(ptr_next);
|
||||
mte_set_tag_address_range(ptr_next, page_sz);
|
||||
|
||||
lseek(fd, 0, 0);
|
||||
/* Copy from file into buffer with invalid tag */
|
||||
read_len = read(fd, ptr, len);
|
||||
mte_wait_after_trig();
|
||||
/*
|
||||
* Accessing user memory in kernel with invalid tag should fail in sync
|
||||
* mode without fault but may not fail in async mode as per the
|
||||
* implemented MTE userspace support in Arm64 kernel.
|
||||
*/
|
||||
if (mode == MTE_SYNC_ERR &&
|
||||
!cur_mte_cxt.fault_valid && read_len < len) {
|
||||
err = KSFT_PASS;
|
||||
} else if (mode == MTE_ASYNC_ERR &&
|
||||
!cur_mte_cxt.fault_valid && read_len == len) {
|
||||
err = KSFT_PASS;
|
||||
}
|
||||
usermem_acc_err:
|
||||
mte_free_memory((void *)ptr, len, mem_type, true);
|
||||
close(fd);
|
||||
return err;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int err;
|
||||
|
||||
page_sz = getpagesize();
|
||||
if (!page_sz) {
|
||||
ksft_print_msg("ERR: Unable to get page size\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
err = mte_default_setup();
|
||||
if (err)
|
||||
return err;
|
||||
/* Register signal handlers */
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
|
||||
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check memory access from kernel in sync mode, private mapping and mmap memory\n");
|
||||
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
|
||||
"Check memory access from kernel in sync mode, shared mapping and mmap memory\n");
|
||||
|
||||
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
|
||||
"Check memory access from kernel in async mode, private mapping and mmap memory\n");
|
||||
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
|
||||
"Check memory access from kernel in async mode, shared mapping and mmap memory\n");
|
||||
|
||||
mte_restore_setup();
|
||||
ksft_print_cnts();
|
||||
return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
|
||||
}
|
|
@ -0,0 +1,341 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2020 ARM Limited
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <sched.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <linux/auxvec.h>
|
||||
#include <sys/auxv.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/prctl.h>
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
#include "kselftest.h"
|
||||
#include "mte_common_util.h"
|
||||
#include "mte_def.h"
|
||||
|
||||
#define INIT_BUFFER_SIZE 256
|
||||
|
||||
struct mte_fault_cxt cur_mte_cxt;
|
||||
static unsigned int mte_cur_mode;
|
||||
static unsigned int mte_cur_pstate_tco;
|
||||
|
||||
void mte_default_handler(int signum, siginfo_t *si, void *uc)
|
||||
{
|
||||
unsigned long addr = (unsigned long)si->si_addr;
|
||||
|
||||
if (signum == SIGSEGV) {
|
||||
#ifdef DEBUG
|
||||
ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
|
||||
((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
|
||||
#endif
|
||||
if (si->si_code == SEGV_MTEAERR) {
|
||||
if (cur_mte_cxt.trig_si_code == si->si_code)
|
||||
cur_mte_cxt.fault_valid = true;
|
||||
return;
|
||||
}
|
||||
/* Compare the context for precise error */
|
||||
else if (si->si_code == SEGV_MTESERR) {
|
||||
if (cur_mte_cxt.trig_si_code == si->si_code &&
|
||||
((cur_mte_cxt.trig_range >= 0 &&
|
||||
addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
|
||||
addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
|
||||
(cur_mte_cxt.trig_range < 0 &&
|
||||
addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
|
||||
addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
|
||||
cur_mte_cxt.fault_valid = true;
|
||||
/* Adjust the pc by 4 */
|
||||
((ucontext_t *)uc)->uc_mcontext.pc += 4;
|
||||
} else {
|
||||
ksft_print_msg("Invalid MTE synchronous exception caught!\n");
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
ksft_print_msg("Unknown SIGSEGV exception caught!\n");
|
||||
exit(1);
|
||||
}
|
||||
} else if (signum == SIGBUS) {
|
||||
ksft_print_msg("INFO: SIGBUS signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
|
||||
((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
|
||||
if ((cur_mte_cxt.trig_range >= 0 &&
|
||||
addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
|
||||
addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
|
||||
(cur_mte_cxt.trig_range < 0 &&
|
||||
addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
|
||||
addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
|
||||
cur_mte_cxt.fault_valid = true;
|
||||
/* Adjust the pc by 4 */
|
||||
((ucontext_t *)uc)->uc_mcontext.pc += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *))
|
||||
{
|
||||
struct sigaction sa;
|
||||
|
||||
sa.sa_sigaction = handler;
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
sigaction(signal, &sa, NULL);
|
||||
}
|
||||
|
||||
void mte_wait_after_trig(void)
|
||||
{
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
void *mte_insert_tags(void *ptr, size_t size)
|
||||
{
|
||||
void *tag_ptr;
|
||||
int align_size;
|
||||
|
||||
if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
|
||||
ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
|
||||
return NULL;
|
||||
}
|
||||
align_size = MT_ALIGN_UP(size);
|
||||
tag_ptr = mte_insert_random_tag(ptr);
|
||||
mte_set_tag_address_range(tag_ptr, align_size);
|
||||
return tag_ptr;
|
||||
}
|
||||
|
||||
void mte_clear_tags(void *ptr, size_t size)
|
||||
{
|
||||
if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
|
||||
ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
|
||||
return;
|
||||
}
|
||||
size = MT_ALIGN_UP(size);
|
||||
ptr = (void *)MT_CLEAR_TAG((unsigned long)ptr);
|
||||
mte_clear_tag_address_range(ptr, size);
|
||||
}
|
||||
|
||||
static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
|
||||
size_t range_before, size_t range_after,
|
||||
bool tags, int fd)
|
||||
{
|
||||
void *ptr;
|
||||
int prot_flag, map_flag;
|
||||
size_t entire_size = size + range_before + range_after;
|
||||
|
||||
if (mem_type != USE_MALLOC && mem_type != USE_MMAP &&
|
||||
mem_type != USE_MPROTECT) {
|
||||
ksft_print_msg("FAIL: Invalid allocate request\n");
|
||||
return NULL;
|
||||
}
|
||||
if (mem_type == USE_MALLOC)
|
||||
return malloc(entire_size) + range_before;
|
||||
|
||||
prot_flag = PROT_READ | PROT_WRITE;
|
||||
if (mem_type == USE_MMAP)
|
||||
prot_flag |= PROT_MTE;
|
||||
|
||||
map_flag = mapping;
|
||||
if (fd == -1)
|
||||
map_flag = MAP_ANONYMOUS | map_flag;
|
||||
if (!(mapping & MAP_SHARED))
|
||||
map_flag |= MAP_PRIVATE;
|
||||
ptr = mmap(NULL, entire_size, prot_flag, map_flag, fd, 0);
|
||||
if (ptr == MAP_FAILED) {
|
||||
ksft_print_msg("FAIL: mmap allocation\n");
|
||||
return NULL;
|
||||
}
|
||||
if (mem_type == USE_MPROTECT) {
|
||||
if (mprotect(ptr, entire_size, prot_flag | PROT_MTE)) {
|
||||
munmap(ptr, size);
|
||||
ksft_print_msg("FAIL: mprotect PROT_MTE property\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (tags)
|
||||
ptr = mte_insert_tags(ptr + range_before, size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
|
||||
size_t range_before, size_t range_after)
|
||||
{
|
||||
return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
|
||||
range_after, true, -1);
|
||||
}
|
||||
|
||||
void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags)
|
||||
{
|
||||
return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, -1);
|
||||
}
|
||||
|
||||
void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags, int fd)
|
||||
{
|
||||
int index;
|
||||
char buffer[INIT_BUFFER_SIZE];
|
||||
|
||||
if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) {
|
||||
ksft_print_msg("FAIL: Invalid mmap file request\n");
|
||||
return NULL;
|
||||
}
|
||||
/* Initialize the file for mappable size */
|
||||
lseek(fd, 0, SEEK_SET);
|
||||
for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE)
|
||||
write(fd, buffer, INIT_BUFFER_SIZE);
|
||||
index -= INIT_BUFFER_SIZE;
|
||||
write(fd, buffer, size - index);
|
||||
return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd);
|
||||
}
|
||||
|
||||
void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
|
||||
size_t range_before, size_t range_after, int fd)
|
||||
{
|
||||
int index;
|
||||
char buffer[INIT_BUFFER_SIZE];
|
||||
int map_size = size + range_before + range_after;
|
||||
|
||||
if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) {
|
||||
ksft_print_msg("FAIL: Invalid mmap file request\n");
|
||||
return NULL;
|
||||
}
|
||||
/* Initialize the file for mappable size */
|
||||
lseek(fd, 0, SEEK_SET);
|
||||
for (index = INIT_BUFFER_SIZE; index < map_size; index += INIT_BUFFER_SIZE)
|
||||
write(fd, buffer, INIT_BUFFER_SIZE);
|
||||
index -= INIT_BUFFER_SIZE;
|
||||
write(fd, buffer, map_size - index);
|
||||
return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
|
||||
range_after, true, fd);
|
||||
}
|
||||
|
||||
static void __mte_free_memory_range(void *ptr, size_t size, int mem_type,
|
||||
size_t range_before, size_t range_after, bool tags)
|
||||
{
|
||||
switch (mem_type) {
|
||||
case USE_MALLOC:
|
||||
free(ptr - range_before);
|
||||
break;
|
||||
case USE_MMAP:
|
||||
case USE_MPROTECT:
|
||||
if (tags)
|
||||
mte_clear_tags(ptr, size);
|
||||
munmap(ptr - range_before, size + range_before + range_after);
|
||||
break;
|
||||
default:
|
||||
ksft_print_msg("FAIL: Invalid free request\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void mte_free_memory_tag_range(void *ptr, size_t size, int mem_type,
|
||||
size_t range_before, size_t range_after)
|
||||
{
|
||||
__mte_free_memory_range(ptr, size, mem_type, range_before, range_after, true);
|
||||
}
|
||||
|
||||
void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags)
|
||||
{
|
||||
__mte_free_memory_range(ptr, size, mem_type, 0, 0, tags);
|
||||
}
|
||||
|
||||
void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range)
|
||||
{
|
||||
cur_mte_cxt.fault_valid = false;
|
||||
cur_mte_cxt.trig_addr = ptr;
|
||||
cur_mte_cxt.trig_range = range;
|
||||
if (mode == MTE_SYNC_ERR)
|
||||
cur_mte_cxt.trig_si_code = SEGV_MTESERR;
|
||||
else if (mode == MTE_ASYNC_ERR)
|
||||
cur_mte_cxt.trig_si_code = SEGV_MTEAERR;
|
||||
else
|
||||
cur_mte_cxt.trig_si_code = 0;
|
||||
}
|
||||
|
||||
int mte_switch_mode(int mte_option, unsigned long incl_mask)
|
||||
{
|
||||
unsigned long en = 0;
|
||||
|
||||
if (!(mte_option == MTE_SYNC_ERR || mte_option == MTE_ASYNC_ERR ||
|
||||
mte_option == MTE_NONE_ERR || incl_mask <= MTE_ALLOW_NON_ZERO_TAG)) {
|
||||
ksft_print_msg("FAIL: Invalid mte config option\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
en = PR_TAGGED_ADDR_ENABLE;
|
||||
if (mte_option == MTE_SYNC_ERR)
|
||||
en |= PR_MTE_TCF_SYNC;
|
||||
else if (mte_option == MTE_ASYNC_ERR)
|
||||
en |= PR_MTE_TCF_ASYNC;
|
||||
else if (mte_option == MTE_NONE_ERR)
|
||||
en |= PR_MTE_TCF_NONE;
|
||||
|
||||
en |= (incl_mask << PR_MTE_TAG_SHIFT);
|
||||
/* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */
|
||||
if (!prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) == 0) {
|
||||
ksft_print_msg("FAIL:prctl PR_SET_TAGGED_ADDR_CTRL for mte mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ID_AA64PFR1_MTE_SHIFT 8
|
||||
#define ID_AA64PFR1_MTE 2
|
||||
|
||||
int mte_default_setup(void)
|
||||
{
|
||||
unsigned long hwcaps = getauxval(AT_HWCAP);
|
||||
unsigned long en = 0;
|
||||
int ret;
|
||||
|
||||
if (!(hwcaps & HWCAP_CPUID)) {
|
||||
ksft_print_msg("FAIL: CPUID registers unavailable\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
/* Read ID_AA64PFR1_EL1 register */
|
||||
asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
|
||||
if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
|
||||
ksft_print_msg("FAIL: MTE features unavailable\n");
|
||||
return KSFT_SKIP;
|
||||
}
|
||||
/* Get current mte mode */
|
||||
ret = prctl(PR_GET_TAGGED_ADDR_CTRL, en, 0, 0, 0);
|
||||
if (ret < 0) {
|
||||
ksft_print_msg("FAIL:prctl PR_GET_TAGGED_ADDR_CTRL with error =%d\n", ret);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
if (ret & PR_MTE_TCF_SYNC)
|
||||
mte_cur_mode = MTE_SYNC_ERR;
|
||||
else if (ret & PR_MTE_TCF_ASYNC)
|
||||
mte_cur_mode = MTE_ASYNC_ERR;
|
||||
else if (ret & PR_MTE_TCF_NONE)
|
||||
mte_cur_mode = MTE_NONE_ERR;
|
||||
|
||||
mte_cur_pstate_tco = mte_get_pstate_tco();
|
||||
/* Disable PSTATE.TCO */
|
||||
mte_disable_pstate_tco();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mte_restore_setup(void)
|
||||
{
|
||||
mte_switch_mode(mte_cur_mode, MTE_ALLOW_NON_ZERO_TAG);
|
||||
if (mte_cur_pstate_tco == MT_PSTATE_TCO_EN)
|
||||
mte_enable_pstate_tco();
|
||||
else if (mte_cur_pstate_tco == MT_PSTATE_TCO_DIS)
|
||||
mte_disable_pstate_tco();
|
||||
}
|
||||
|
||||
int create_temp_file(void)
|
||||
{
|
||||
int fd;
|
||||
char filename[] = "/dev/shm/tmp_XXXXXX";
|
||||
|
||||
/* Create a file in the tmpfs filesystem */
|
||||
fd = mkstemp(&filename[0]);
|
||||
if (fd == -1) {
|
||||
ksft_print_msg("FAIL: Unable to open temporary file\n");
|
||||
return 0;
|
||||
}
|
||||
unlink(&filename[0]);
|
||||
return fd;
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2020 ARM Limited */
|
||||
|
||||
#ifndef _MTE_COMMON_UTIL_H
|
||||
#define _MTE_COMMON_UTIL_H
|
||||
|
||||
#include <signal.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/auxv.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/prctl.h>
|
||||
#include "mte_def.h"
|
||||
#include "kselftest.h"
|
||||
|
||||
enum mte_mem_type {
|
||||
USE_MALLOC,
|
||||
USE_MMAP,
|
||||
USE_MPROTECT,
|
||||
};
|
||||
|
||||
enum mte_mode {
|
||||
MTE_NONE_ERR,
|
||||
MTE_SYNC_ERR,
|
||||
MTE_ASYNC_ERR,
|
||||
};
|
||||
|
||||
struct mte_fault_cxt {
|
||||
/* Address start which triggers mte tag fault */
|
||||
unsigned long trig_addr;
|
||||
/* Address range for mte tag fault and negative value means underflow */
|
||||
ssize_t trig_range;
|
||||
/* siginfo si code */
|
||||
unsigned long trig_si_code;
|
||||
/* Flag to denote if correct fault caught */
|
||||
bool fault_valid;
|
||||
};
|
||||
|
||||
extern struct mte_fault_cxt cur_mte_cxt;
|
||||
|
||||
/* MTE utility functions */
|
||||
void mte_default_handler(int signum, siginfo_t *si, void *uc);
|
||||
void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *));
|
||||
void mte_wait_after_trig(void);
|
||||
void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags);
|
||||
void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
|
||||
size_t range_before, size_t range_after);
|
||||
void *mte_allocate_file_memory(size_t size, int mem_type, int mapping,
|
||||
bool tags, int fd);
|
||||
void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
|
||||
size_t range_before, size_t range_after, int fd);
|
||||
void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags);
|
||||
void mte_free_memory_tag_range(void *ptr, size_t size, int mem_type,
|
||||
size_t range_before, size_t range_after);
|
||||
void *mte_insert_tags(void *ptr, size_t size);
|
||||
void mte_clear_tags(void *ptr, size_t size);
|
||||
int mte_default_setup(void);
|
||||
void mte_restore_setup(void);
|
||||
int mte_switch_mode(int mte_option, unsigned long incl_mask);
|
||||
void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range);
|
||||
|
||||
/* Common utility functions */
|
||||
int create_temp_file(void);
|
||||
|
||||
/* Assembly MTE utility functions */
|
||||
void *mte_insert_random_tag(void *ptr);
|
||||
void *mte_insert_new_tag(void *ptr);
|
||||
void *mte_get_tag_address(void *ptr);
|
||||
void mte_set_tag_address_range(void *ptr, int range);
|
||||
void mte_clear_tag_address_range(void *ptr, int range);
|
||||
void mte_disable_pstate_tco(void);
|
||||
void mte_enable_pstate_tco(void);
|
||||
unsigned int mte_get_pstate_tco(void);
|
||||
|
||||
/* Test framework static inline functions/macros */
|
||||
static inline void evaluate_test(int err, const char *msg)
|
||||
{
|
||||
if (err == KSFT_PASS)
|
||||
ksft_test_result_pass(msg);
|
||||
else if (err == KSFT_FAIL)
|
||||
ksft_test_result_fail(msg);
|
||||
}
|
||||
|
||||
static inline int check_allocated_memory(void *ptr, size_t size,
|
||||
int mem_type, bool tags)
|
||||
{
|
||||
if (ptr == NULL) {
|
||||
ksft_print_msg("FAIL: memory allocation\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
|
||||
if (tags && !MT_FETCH_TAG((uintptr_t)ptr)) {
|
||||
ksft_print_msg("FAIL: tag not found at addr(%p)\n", ptr);
|
||||
mte_free_memory((void *)ptr, size, mem_type, false);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
static inline int check_allocated_memory_range(void *ptr, size_t size, int mem_type,
|
||||
size_t range_before, size_t range_after)
|
||||
{
|
||||
if (ptr == NULL) {
|
||||
ksft_print_msg("FAIL: memory allocation\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
|
||||
if (!MT_FETCH_TAG((uintptr_t)ptr)) {
|
||||
ksft_print_msg("FAIL: tag not found at addr(%p)\n", ptr);
|
||||
mte_free_memory_tag_range((void *)ptr, size, mem_type, range_before,
|
||||
range_after);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
#endif /* _MTE_COMMON_UTIL_H */
|
|
@ -0,0 +1,60 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2020 ARM Limited */
|
||||
|
||||
/*
|
||||
* Below definitions may be found in kernel headers, However, they are
|
||||
* redefined here to decouple the MTE selftests compilations from them.
|
||||
*/
|
||||
#ifndef SEGV_MTEAERR
|
||||
#define SEGV_MTEAERR 8
|
||||
#endif
|
||||
#ifndef SEGV_MTESERR
|
||||
#define SEGV_MTESERR 9
|
||||
#endif
|
||||
#ifndef PROT_MTE
|
||||
#define PROT_MTE 0x20
|
||||
#endif
|
||||
#ifndef HWCAP2_MTE
|
||||
#define HWCAP2_MTE (1 << 18)
|
||||
#endif
|
||||
|
||||
#ifndef PR_MTE_TCF_SHIFT
|
||||
#define PR_MTE_TCF_SHIFT 1
|
||||
#endif
|
||||
#ifndef PR_MTE_TCF_NONE
|
||||
#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
|
||||
#endif
|
||||
#ifndef PR_MTE_TCF_SYNC
|
||||
#define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
|
||||
#endif
|
||||
#ifndef PR_MTE_TCF_ASYNC
|
||||
#define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
|
||||
#endif
|
||||
#ifndef PR_MTE_TAG_SHIFT
|
||||
#define PR_MTE_TAG_SHIFT 3
|
||||
#endif
|
||||
|
||||
/* MTE Hardware feature definitions below. */
|
||||
#define MT_TAG_SHIFT 56
|
||||
#define MT_TAG_MASK 0xFUL
|
||||
#define MT_FREE_TAG 0x0UL
|
||||
#define MT_GRANULE_SIZE 16
|
||||
#define MT_TAG_COUNT 16
|
||||
#define MT_INCLUDE_TAG_MASK 0xFFFF
|
||||
#define MT_EXCLUDE_TAG_MASK 0x0
|
||||
|
||||
#define MT_ALIGN_GRANULE (MT_GRANULE_SIZE - 1)
|
||||
#define MT_CLEAR_TAG(x) ((x) & ~(MT_TAG_MASK << MT_TAG_SHIFT))
|
||||
#define MT_SET_TAG(x, y) ((x) | (y << MT_TAG_SHIFT))
|
||||
#define MT_FETCH_TAG(x) ((x >> MT_TAG_SHIFT) & (MT_TAG_MASK))
|
||||
#define MT_ALIGN_UP(x) ((x + MT_ALIGN_GRANULE) & ~(MT_ALIGN_GRANULE))
|
||||
|
||||
#define MT_PSTATE_TCO_SHIFT 25
|
||||
#define MT_PSTATE_TCO_MASK ~(0x1 << MT_PSTATE_TCO_SHIFT)
|
||||
#define MT_PSTATE_TCO_EN 1
|
||||
#define MT_PSTATE_TCO_DIS 0
|
||||
|
||||
#define MT_EXCLUDE_TAG(x) (1 << (x))
|
||||
#define MT_INCLUDE_VALID_TAG(x) (MT_INCLUDE_TAG_MASK ^ MT_EXCLUDE_TAG(x))
|
||||
#define MT_INCLUDE_VALID_TAGS(x) (MT_INCLUDE_TAG_MASK ^ (x))
|
||||
#define MTE_ALLOW_NON_ZERO_TAG MT_INCLUDE_VALID_TAG(0)
|
|
@ -0,0 +1,128 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2020 ARM Limited */
|
||||
|
||||
#include "mte_def.h"
|
||||
|
||||
#define ENTRY(name) \
|
||||
.globl name ;\
|
||||
.p2align 2;\
|
||||
.type name, @function ;\
|
||||
name:
|
||||
|
||||
#define ENDPROC(name) \
|
||||
.size name, .-name ;
|
||||
|
||||
.text
|
||||
/*
|
||||
* mte_insert_random_tag: Insert random tag and might be same as the source tag if
|
||||
* the source pointer has it.
|
||||
* Input:
|
||||
* x0 - source pointer with a tag/no-tag
|
||||
* Return:
|
||||
* x0 - pointer with random tag
|
||||
*/
|
||||
ENTRY(mte_insert_random_tag)
|
||||
irg x0, x0, xzr
|
||||
ret
|
||||
ENDPROC(mte_insert_random_tag)
|
||||
|
||||
/*
|
||||
* mte_insert_new_tag: Insert new tag and different from the source tag if
|
||||
* source pointer has it.
|
||||
* Input:
|
||||
* x0 - source pointer with a tag/no-tag
|
||||
* Return:
|
||||
* x0 - pointer with random tag
|
||||
*/
|
||||
ENTRY(mte_insert_new_tag)
|
||||
gmi x1, x0, xzr
|
||||
irg x0, x0, x1
|
||||
ret
|
||||
ENDPROC(mte_insert_new_tag)
|
||||
|
||||
/*
|
||||
* mte_get_tag_address: Get the tag from given address.
|
||||
* Input:
|
||||
* x0 - source pointer
|
||||
* Return:
|
||||
* x0 - pointer with appended tag
|
||||
*/
|
||||
ENTRY(mte_get_tag_address)
|
||||
ldg x0, [x0]
|
||||
ret
|
||||
ENDPROC(mte_get_tag_address)
|
||||
|
||||
/*
|
||||
* mte_set_tag_address_range: Set the tag range from the given address
|
||||
* Input:
|
||||
* x0 - source pointer with tag data
|
||||
* x1 - range
|
||||
* Return:
|
||||
* none
|
||||
*/
|
||||
ENTRY(mte_set_tag_address_range)
|
||||
cbz x1, 2f
|
||||
1:
|
||||
stg x0, [x0, #0x0]
|
||||
add x0, x0, #MT_GRANULE_SIZE
|
||||
sub x1, x1, #MT_GRANULE_SIZE
|
||||
cbnz x1, 1b
|
||||
2:
|
||||
ret
|
||||
ENDPROC(mte_set_tag_address_range)
|
||||
|
||||
/*
|
||||
* mt_clear_tag_address_range: Clear the tag range from the given address
|
||||
* Input:
|
||||
* x0 - source pointer with tag data
|
||||
* x1 - range
|
||||
* Return:
|
||||
* none
|
||||
*/
|
||||
ENTRY(mte_clear_tag_address_range)
|
||||
cbz x1, 2f
|
||||
1:
|
||||
stzg x0, [x0, #0x0]
|
||||
add x0, x0, #MT_GRANULE_SIZE
|
||||
sub x1, x1, #MT_GRANULE_SIZE
|
||||
cbnz x1, 1b
|
||||
2:
|
||||
ret
|
||||
ENDPROC(mte_clear_tag_address_range)
|
||||
|
||||
/*
|
||||
* mte_enable_pstate_tco: Enable PSTATE.TCO (tag check override) field
|
||||
* Input:
|
||||
* none
|
||||
* Return:
|
||||
* none
|
||||
*/
|
||||
ENTRY(mte_enable_pstate_tco)
|
||||
msr tco, #MT_PSTATE_TCO_EN
|
||||
ret
|
||||
ENDPROC(mte_enable_pstate_tco)
|
||||
|
||||
/*
|
||||
* mte_disable_pstate_tco: Disable PSTATE.TCO (tag check override) field
|
||||
* Input:
|
||||
* none
|
||||
* Return:
|
||||
* none
|
||||
*/
|
||||
ENTRY(mte_disable_pstate_tco)
|
||||
msr tco, #MT_PSTATE_TCO_DIS
|
||||
ret
|
||||
ENDPROC(mte_disable_pstate_tco)
|
||||
|
||||
/*
|
||||
* mte_get_pstate_tco: Get PSTATE.TCO (tag check override) field
|
||||
* Input:
|
||||
* none
|
||||
* Return:
|
||||
* x0
|
||||
*/
|
||||
ENTRY(mte_get_pstate_tco)
|
||||
mrs x0, tco
|
||||
ubfx x0, x0, #MT_PSTATE_TCO_SHIFT, #1
|
||||
ret
|
||||
ENDPROC(mte_get_pstate_tco)
|
Loading…
Reference in New Issue