kallsyms, x86: Export addresses of PTI entry trampolines

Currently, the addresses of PTI entry trampolines are not exported to
user space. Kernel profiling tools need these addresses to identify the
kernel code, so add a symbol and address for each CPU's PTI entry
trampoline.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Link: http://lkml.kernel.org/r/1528289651-4113-3-git-send-email-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Alexander Shishkin 2018-06-06 15:54:10 +03:00 committed by Arnaldo Carvalho de Melo
parent b966794220
commit d83212d5dd
2 changed files with 50 additions and 1 deletions

View File

@ -2,6 +2,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <asm/cpu_entry_area.h> #include <asm/cpu_entry_area.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
@ -150,6 +151,28 @@ static void __init setup_cpu_entry_area(int cpu)
percpu_setup_debug_store(cpu); percpu_setup_debug_store(cpu);
} }
#ifdef CONFIG_X86_64
int arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *name)
{
unsigned int cpu, ncpu = 0;
if (symnum >= num_possible_cpus())
return -EINVAL;
for_each_possible_cpu(cpu) {
if (ncpu++ >= symnum)
break;
}
*value = (unsigned long)&get_cpu_entry_area(cpu)->entry_trampoline;
*type = 't';
strlcpy(name, "__entry_SYSCALL_64_trampoline", KSYM_NAME_LEN);
return 0;
}
#endif
static __init void setup_cpu_entry_area_ptes(void) static __init void setup_cpu_entry_area_ptes(void)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32

View File

@ -432,6 +432,7 @@ int sprint_backtrace(char *buffer, unsigned long address)
/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
struct kallsym_iter { struct kallsym_iter {
loff_t pos; loff_t pos;
loff_t pos_arch_end;
loff_t pos_mod_end; loff_t pos_mod_end;
loff_t pos_ftrace_mod_end; loff_t pos_ftrace_mod_end;
unsigned long value; unsigned long value;
@ -443,9 +444,29 @@ struct kallsym_iter {
int show_value; int show_value;
}; };
int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *name)
{
return -EINVAL;
}
static int get_ksymbol_arch(struct kallsym_iter *iter)
{
int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
&iter->value, &iter->type,
iter->name);
if (ret < 0) {
iter->pos_arch_end = iter->pos;
return 0;
}
return 1;
}
static int get_ksymbol_mod(struct kallsym_iter *iter) static int get_ksymbol_mod(struct kallsym_iter *iter)
{ {
int ret = module_get_kallsym(iter->pos - kallsyms_num_syms, int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
&iter->value, &iter->type, &iter->value, &iter->type,
iter->name, iter->module_name, iter->name, iter->module_name,
&iter->exported); &iter->exported);
@ -501,6 +522,7 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
iter->nameoff = get_symbol_offset(new_pos); iter->nameoff = get_symbol_offset(new_pos);
iter->pos = new_pos; iter->pos = new_pos;
if (new_pos == 0) { if (new_pos == 0) {
iter->pos_arch_end = 0;
iter->pos_mod_end = 0; iter->pos_mod_end = 0;
iter->pos_ftrace_mod_end = 0; iter->pos_ftrace_mod_end = 0;
} }
@ -515,6 +537,10 @@ static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
{ {
iter->pos = pos; iter->pos = pos;
if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
get_ksymbol_arch(iter))
return 1;
if ((!iter->pos_mod_end || iter->pos_mod_end > pos) && if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
get_ksymbol_mod(iter)) get_ksymbol_mod(iter))
return 1; return 1;