context_tracking: Ensure that the critical path cannot be instrumented

context tracking lacks a few protection mechanisms against instrumentation:

 - While the core functions are marked NOKPROBE they lack protection
   against function tracing which is required as the function entry/exit
   points can be utilized by BPF.

 - static functions invoked from the protected functions need to be marked
   as well as they can be instrumented otherwise.

 - using plain inline allows the compiler to emit traceable and probable
   functions.

Fix this by marking the functions noinstr and converting the plain inlines
to __always_inline.

The NOKPROBE_SYMBOL() annotations are removed as the .noinstr.text section
is already excluded from being probed.

Cures the following objtool warnings:

 vmlinux.o: warning: objtool: enter_from_user_mode()+0x34: call to __context_tracking_exit() leaves .noinstr.text section
 vmlinux.o: warning: objtool: prepare_exit_to_usermode()+0x29: call to __context_tracking_enter() leaves .noinstr.text section
 vmlinux.o: warning: objtool: syscall_return_slowpath()+0x29: call to __context_tracking_enter() leaves .noinstr.text section
 vmlinux.o: warning: objtool: do_syscall_64()+0x7f: call to __context_tracking_enter() leaves .noinstr.text section
 vmlinux.o: warning: objtool: do_int80_syscall_32()+0x3d: call to __context_tracking_enter() leaves .noinstr.text section
 vmlinux.o: warning: objtool: do_fast_syscall_32()+0x9c: call to __context_tracking_enter() leaves .noinstr.text section

and generates new ones...

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200505134340.811520478@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-03-04 11:05:22 +01:00
parent 20355e5f73
commit 0372007f5a
3 changed files with 14 additions and 12 deletions

View File

@ -33,13 +33,13 @@ static inline void user_exit(void)
} }
/* Called with interrupts disabled. */ /* Called with interrupts disabled. */
static inline void user_enter_irqoff(void) static __always_inline void user_enter_irqoff(void)
{ {
if (context_tracking_enabled()) if (context_tracking_enabled())
__context_tracking_enter(CONTEXT_USER); __context_tracking_enter(CONTEXT_USER);
} }
static inline void user_exit_irqoff(void) static __always_inline void user_exit_irqoff(void)
{ {
if (context_tracking_enabled()) if (context_tracking_enabled())
__context_tracking_exit(CONTEXT_USER); __context_tracking_exit(CONTEXT_USER);
@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
* is enabled. If context tracking is disabled, returns * is enabled. If context tracking is disabled, returns
* CONTEXT_DISABLED. This should be used primarily for debugging. * CONTEXT_DISABLED. This should be used primarily for debugging.
*/ */
static inline enum ctx_state ct_state(void) static __always_inline enum ctx_state ct_state(void)
{ {
return context_tracking_enabled() ? return context_tracking_enabled() ?
this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;

View File

@ -26,12 +26,12 @@ struct context_tracking {
extern struct static_key_false context_tracking_key; extern struct static_key_false context_tracking_key;
DECLARE_PER_CPU(struct context_tracking, context_tracking); DECLARE_PER_CPU(struct context_tracking, context_tracking);
static inline bool context_tracking_enabled(void) static __always_inline bool context_tracking_enabled(void)
{ {
return static_branch_unlikely(&context_tracking_key); return static_branch_unlikely(&context_tracking_key);
} }
static inline bool context_tracking_enabled_cpu(int cpu) static __always_inline bool context_tracking_enabled_cpu(int cpu)
{ {
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu); return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
} }
@ -41,7 +41,7 @@ static inline bool context_tracking_enabled_this_cpu(void)
return context_tracking_enabled() && __this_cpu_read(context_tracking.active); return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
} }
static inline bool context_tracking_in_user(void) static __always_inline bool context_tracking_in_user(void)
{ {
return __this_cpu_read(context_tracking.state) == CONTEXT_USER; return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
} }

View File

@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(context_tracking_key);
DEFINE_PER_CPU(struct context_tracking, context_tracking); DEFINE_PER_CPU(struct context_tracking, context_tracking);
EXPORT_SYMBOL_GPL(context_tracking); EXPORT_SYMBOL_GPL(context_tracking);
static bool context_tracking_recursion_enter(void) static noinstr bool context_tracking_recursion_enter(void)
{ {
int recursion; int recursion;
@ -45,7 +45,7 @@ static bool context_tracking_recursion_enter(void)
return false; return false;
} }
static void context_tracking_recursion_exit(void) static __always_inline void context_tracking_recursion_exit(void)
{ {
__this_cpu_dec(context_tracking.recursion); __this_cpu_dec(context_tracking.recursion);
} }
@ -59,7 +59,7 @@ static void context_tracking_recursion_exit(void)
* instructions to execute won't use any RCU read side critical section * instructions to execute won't use any RCU read side critical section
* because this function sets RCU in extended quiescent state. * because this function sets RCU in extended quiescent state.
*/ */
void __context_tracking_enter(enum ctx_state state) void noinstr __context_tracking_enter(enum ctx_state state)
{ {
/* Kernel threads aren't supposed to go to userspace */ /* Kernel threads aren't supposed to go to userspace */
WARN_ON_ONCE(!current->mm); WARN_ON_ONCE(!current->mm);
@ -77,8 +77,10 @@ void __context_tracking_enter(enum ctx_state state)
* on the tick. * on the tick.
*/ */
if (state == CONTEXT_USER) { if (state == CONTEXT_USER) {
instrumentation_begin();
trace_user_enter(0); trace_user_enter(0);
vtime_user_enter(current); vtime_user_enter(current);
instrumentation_end();
} }
rcu_user_enter(); rcu_user_enter();
} }
@ -99,7 +101,6 @@ void __context_tracking_enter(enum ctx_state state)
} }
context_tracking_recursion_exit(); context_tracking_recursion_exit();
} }
NOKPROBE_SYMBOL(__context_tracking_enter);
EXPORT_SYMBOL_GPL(__context_tracking_enter); EXPORT_SYMBOL_GPL(__context_tracking_enter);
void context_tracking_enter(enum ctx_state state) void context_tracking_enter(enum ctx_state state)
@ -142,7 +143,7 @@ NOKPROBE_SYMBOL(context_tracking_user_enter);
* This call supports re-entrancy. This way it can be called from any exception * This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not. * handler without needing to know if we came from userspace or not.
*/ */
void __context_tracking_exit(enum ctx_state state) void noinstr __context_tracking_exit(enum ctx_state state)
{ {
if (!context_tracking_recursion_enter()) if (!context_tracking_recursion_enter())
return; return;
@ -155,15 +156,16 @@ void __context_tracking_exit(enum ctx_state state)
*/ */
rcu_user_exit(); rcu_user_exit();
if (state == CONTEXT_USER) { if (state == CONTEXT_USER) {
instrumentation_begin();
vtime_user_exit(current); vtime_user_exit(current);
trace_user_exit(0); trace_user_exit(0);
instrumentation_end();
} }
} }
__this_cpu_write(context_tracking.state, CONTEXT_KERNEL); __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
} }
context_tracking_recursion_exit(); context_tracking_recursion_exit();
} }
NOKPROBE_SYMBOL(__context_tracking_exit);
EXPORT_SYMBOL_GPL(__context_tracking_exit); EXPORT_SYMBOL_GPL(__context_tracking_exit);
void context_tracking_exit(enum ctx_state state) void context_tracking_exit(enum ctx_state state)