x86/apic: Provide and use helper for send_IPI_allbutself()
To support IPI shorthands wrap invocations of apic->send_IPI_allbutself() in a helper function, so the static key controlling the shorthand mode is only in one place. Fixup all callers. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20190722105220.492691679@linutronix.de
This commit is contained in:
parent
6a1cb5f5c6
commit
22ca7ee933
|
@ -177,6 +177,8 @@ extern void lapic_online(void);
|
||||||
extern void lapic_offline(void);
|
extern void lapic_offline(void);
|
||||||
extern bool apic_needs_pit(void);
|
extern bool apic_needs_pit(void);
|
||||||
|
|
||||||
|
extern void apic_send_IPI_allbutself(unsigned int vector);
|
||||||
|
|
||||||
#else /* !CONFIG_X86_LOCAL_APIC */
|
#else /* !CONFIG_X86_LOCAL_APIC */
|
||||||
static inline void lapic_shutdown(void) { }
|
static inline void lapic_shutdown(void) { }
|
||||||
#define local_apic_timer_c2_ok 1
|
#define local_apic_timer_c2_ok 1
|
||||||
|
|
|
@ -50,6 +50,18 @@ void apic_smt_update(void)
|
||||||
static_branch_enable(&apic_use_ipi_shorthand);
|
static_branch_enable(&apic_use_ipi_shorthand);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void apic_send_IPI_allbutself(unsigned int vector)
|
||||||
|
{
|
||||||
|
if (num_online_cpus() < 2)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (static_branch_likely(&apic_use_ipi_shorthand))
|
||||||
|
apic->send_IPI_allbutself(vector);
|
||||||
|
else
|
||||||
|
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
static inline int __prepare_ICR2(unsigned int mask)
|
static inline int __prepare_ICR2(unsigned int mask)
|
||||||
|
|
|
@ -416,7 +416,7 @@ static void kgdb_disable_hw_debug(struct pt_regs *regs)
|
||||||
*/
|
*/
|
||||||
void kgdb_roundup_cpus(void)
|
void kgdb_roundup_cpus(void)
|
||||||
{
|
{
|
||||||
apic->send_IPI_allbutself(NMI_VECTOR);
|
apic_send_IPI_allbutself(NMI_VECTOR);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -828,11 +828,6 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
|
||||||
return NMI_HANDLED;
|
return NMI_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void smp_send_nmi_allbutself(void)
|
|
||||||
{
|
|
||||||
apic->send_IPI_allbutself(NMI_VECTOR);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Halt all other CPUs, calling the specified function on each of them
|
* Halt all other CPUs, calling the specified function on each of them
|
||||||
*
|
*
|
||||||
|
@ -861,7 +856,7 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
|
||||||
*/
|
*/
|
||||||
wmb();
|
wmb();
|
||||||
|
|
||||||
smp_send_nmi_allbutself();
|
apic_send_IPI_allbutself(NMI_VECTOR);
|
||||||
|
|
||||||
/* Kick CPUs looping in NMI context. */
|
/* Kick CPUs looping in NMI context. */
|
||||||
WRITE_ONCE(crash_ipi_issued, 1);
|
WRITE_ONCE(crash_ipi_issued, 1);
|
||||||
|
|
|
@ -215,7 +215,7 @@ static void native_stop_other_cpus(int wait)
|
||||||
/* sync above data before sending IRQ */
|
/* sync above data before sending IRQ */
|
||||||
wmb();
|
wmb();
|
||||||
|
|
||||||
apic->send_IPI_allbutself(REBOOT_VECTOR);
|
apic_send_IPI_allbutself(REBOOT_VECTOR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't wait longer than a second for IPI completion. The
|
* Don't wait longer than a second for IPI completion. The
|
||||||
|
@ -241,7 +241,7 @@ static void native_stop_other_cpus(int wait)
|
||||||
|
|
||||||
pr_emerg("Shutting down cpus with NMI\n");
|
pr_emerg("Shutting down cpus with NMI\n");
|
||||||
|
|
||||||
apic->send_IPI_allbutself(NMI_VECTOR);
|
apic_send_IPI_allbutself(NMI_VECTOR);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Don't wait longer than 10 ms if the caller didn't
|
* Don't wait longer than 10 ms if the caller didn't
|
||||||
|
|
Loading…
Reference in New Issue