x86/entry: Move nmi entry/exit into common code
Lockdep state handling on NMI enter and exit is nothing specific to X86. It's not any different on other architectures. Also the extra state type is not necessary, irqentry_state_t can carry the necessary information as well. Move it to common code and extend irqentry_state_t to carry lockdep state. [ Ira: Make exit_rcu and lockdep a union as they are mutually exclusive between the IRQ and NMI exceptions, and add kernel documentation for struct irqentry_state_t ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20201102205320.1458656-7-ira.weiny@intel.com
This commit is contained in:
parent
01be83eea0
commit
b6be002bcd
|
@ -209,40 +209,6 @@ SYSCALL_DEFINE0(ni_syscall)
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
|
||||
{
|
||||
bool irq_state = lockdep_hardirqs_enabled();
|
||||
|
||||
__nmi_enter();
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
lockdep_hardirq_enter();
|
||||
rcu_nmi_enter();
|
||||
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_off_finish();
|
||||
ftrace_nmi_enter();
|
||||
instrumentation_end();
|
||||
|
||||
return irq_state;
|
||||
}
|
||||
|
||||
noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore)
|
||||
{
|
||||
instrumentation_begin();
|
||||
ftrace_nmi_exit();
|
||||
if (restore) {
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
}
|
||||
instrumentation_end();
|
||||
|
||||
rcu_nmi_exit();
|
||||
lockdep_hardirq_exit();
|
||||
if (restore)
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
__nmi_exit();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
/*
|
||||
|
|
|
@ -11,9 +11,6 @@
|
|||
|
||||
#include <asm/irq_stack.h>
|
||||
|
||||
bool idtentry_enter_nmi(struct pt_regs *regs);
|
||||
void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);
|
||||
|
||||
/**
|
||||
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
|
||||
* No error code pushed by hardware
|
||||
|
|
|
@ -1983,7 +1983,7 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
|
|||
|
||||
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
|
||||
{
|
||||
bool irq_state;
|
||||
irqentry_state_t irq_state;
|
||||
|
||||
WARN_ON_ONCE(user_mode(regs));
|
||||
|
||||
|
@ -1995,7 +1995,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
|
|||
mce_check_crashing_cpu())
|
||||
return;
|
||||
|
||||
irq_state = idtentry_enter_nmi(regs);
|
||||
irq_state = irqentry_nmi_enter(regs);
|
||||
/*
|
||||
* The call targets are marked noinstr, but objtool can't figure
|
||||
* that out because it's an indirect call. Annotate it.
|
||||
|
@ -2006,7 +2006,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
|
|||
if (regs->flags & X86_EFLAGS_IF)
|
||||
trace_hardirqs_on_prepare();
|
||||
instrumentation_end();
|
||||
idtentry_exit_nmi(regs, irq_state);
|
||||
irqentry_nmi_exit(regs, irq_state);
|
||||
}
|
||||
|
||||
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
|
||||
|
|
|
@ -475,7 +475,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);
|
|||
|
||||
DEFINE_IDTENTRY_RAW(exc_nmi)
|
||||
{
|
||||
bool irq_state;
|
||||
irqentry_state_t irq_state;
|
||||
|
||||
/*
|
||||
* Re-enable NMIs right here when running as an SEV-ES guest. This might
|
||||
|
@ -502,14 +502,14 @@ nmi_restart:
|
|||
|
||||
this_cpu_write(nmi_dr7, local_db_save());
|
||||
|
||||
irq_state = idtentry_enter_nmi(regs);
|
||||
irq_state = irqentry_nmi_enter(regs);
|
||||
|
||||
inc_irq_stat(__nmi_count);
|
||||
|
||||
if (!ignore_nmis)
|
||||
default_do_nmi(regs);
|
||||
|
||||
idtentry_exit_nmi(regs, irq_state);
|
||||
irqentry_nmi_exit(regs, irq_state);
|
||||
|
||||
local_db_restore(this_cpu_read(nmi_dr7));
|
||||
|
||||
|
|
|
@ -405,7 +405,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
|
|||
}
|
||||
#endif
|
||||
|
||||
idtentry_enter_nmi(regs);
|
||||
irqentry_nmi_enter(regs);
|
||||
instrumentation_begin();
|
||||
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
|
||||
|
||||
|
@ -651,12 +651,13 @@ DEFINE_IDTENTRY_RAW(exc_int3)
|
|||
instrumentation_end();
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
} else {
|
||||
bool irq_state = idtentry_enter_nmi(regs);
|
||||
irqentry_state_t irq_state = irqentry_nmi_enter(regs);
|
||||
|
||||
instrumentation_begin();
|
||||
if (!do_int3(regs))
|
||||
die("int3", regs, 0);
|
||||
instrumentation_end();
|
||||
idtentry_exit_nmi(regs, irq_state);
|
||||
irqentry_nmi_exit(regs, irq_state);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -851,7 +852,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
|
|||
* includes the entry stack is excluded for everything.
|
||||
*/
|
||||
unsigned long dr7 = local_db_save();
|
||||
bool irq_state = idtentry_enter_nmi(regs);
|
||||
irqentry_state_t irq_state = irqentry_nmi_enter(regs);
|
||||
instrumentation_begin();
|
||||
|
||||
/*
|
||||
|
@ -908,7 +909,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
|
|||
regs->flags &= ~X86_EFLAGS_TF;
|
||||
out:
|
||||
instrumentation_end();
|
||||
idtentry_exit_nmi(regs, irq_state);
|
||||
irqentry_nmi_exit(regs, irq_state);
|
||||
|
||||
local_db_restore(dr7);
|
||||
}
|
||||
|
@ -926,7 +927,7 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
|
|||
|
||||
/*
|
||||
* NB: We can't easily clear DR7 here because
|
||||
* idtentry_exit_to_usermode() can invoke ptrace, schedule, access
|
||||
* irqentry_exit_to_usermode() can invoke ptrace, schedule, access
|
||||
* user memory, etc. This means that a recursive #DB is possible. If
|
||||
* this happens, that #DB will hit exc_debug_kernel() and clear DR7.
|
||||
* Since we're not on the IST stack right now, everything will be
|
||||
|
|
|
@ -346,8 +346,26 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs);
|
|||
void irqentry_exit_to_user_mode(struct pt_regs *regs);
|
||||
|
||||
#ifndef irqentry_state
|
||||
/**
|
||||
* struct irqentry_state - Opaque object for exception state storage
|
||||
* @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
|
||||
* exit path has to invoke rcu_irq_exit().
|
||||
* @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
|
||||
* lockdep state is restored correctly on exit from nmi.
|
||||
*
|
||||
* This opaque object is filled in by the irqentry_*_enter() functions and
|
||||
* must be passed back into the corresponding irqentry_*_exit() functions
|
||||
* when the exception is complete.
|
||||
*
|
||||
* Callers of irqentry_*_[enter|exit]() must consider this structure opaque
|
||||
* and all members private. Descriptions of the members are provided to aid in
|
||||
* the maintenance of the irqentry_*() functions.
|
||||
*/
|
||||
typedef struct irqentry_state {
|
||||
bool exit_rcu;
|
||||
union {
|
||||
bool exit_rcu;
|
||||
bool lockdep;
|
||||
};
|
||||
} irqentry_state_t;
|
||||
#endif
|
||||
|
||||
|
@ -407,4 +425,23 @@ void irqentry_exit_cond_resched(void);
|
|||
*/
|
||||
void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
|
||||
|
||||
/**
|
||||
* irqentry_nmi_enter - Handle NMI entry
|
||||
* @regs: Pointer to currents pt_regs
|
||||
*
|
||||
* Similar to irqentry_enter() but taking care of the NMI constraints.
|
||||
*/
|
||||
irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
|
||||
|
||||
/**
|
||||
* irqentry_nmi_exit - Handle return from NMI handling
|
||||
* @regs: Pointer to pt_regs (NMI entry regs)
|
||||
* @irq_state: Return value from matching call to irqentry_nmi_enter()
|
||||
*
|
||||
* Last action before returning to the low level assmenbly code.
|
||||
*
|
||||
* Counterpart to irqentry_nmi_enter().
|
||||
*/
|
||||
void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -405,3 +405,39 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
|
|||
rcu_irq_exit();
|
||||
}
|
||||
}
|
||||
|
||||
irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
|
||||
{
|
||||
irqentry_state_t irq_state;
|
||||
|
||||
irq_state.lockdep = lockdep_hardirqs_enabled();
|
||||
|
||||
__nmi_enter();
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
lockdep_hardirq_enter();
|
||||
rcu_nmi_enter();
|
||||
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_off_finish();
|
||||
ftrace_nmi_enter();
|
||||
instrumentation_end();
|
||||
|
||||
return irq_state;
|
||||
}
|
||||
|
||||
void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
|
||||
{
|
||||
instrumentation_begin();
|
||||
ftrace_nmi_exit();
|
||||
if (irq_state.lockdep) {
|
||||
trace_hardirqs_on_prepare();
|
||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
||||
}
|
||||
instrumentation_end();
|
||||
|
||||
rcu_nmi_exit();
|
||||
lockdep_hardirq_exit();
|
||||
if (irq_state.lockdep)
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
__nmi_exit();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue