genirq: Change force_irqthreads to a static key
With CONFIG_IRQ_FORCED_THREADING=y, testing the boolean force_irqthreads could incur a cache line miss in invoke_softirq() and other places. Replace the test with a static key to avoid the potential cache miss. [ tglx: Dropped the IDE part, removed the export and updated blk-mq ] Suggested-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Tanner Love <tannerlove@google.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20210602180338.3324213-1-tannerlove.kernel@gmail.com
This commit is contained in:
parent
5a6c76b5de
commit
91cc470e79
|
@ -606,7 +606,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
|
|||
* This is probably worse than completing the request on a different
|
||||
* cache domain.
|
||||
*/
|
||||
if (force_irqthreads)
|
||||
if (force_irqthreads())
|
||||
return false;
|
||||
|
||||
/* same CPU or cache domain? Complete locally */
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/hrtimer.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
@ -474,12 +475,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
|
|||
|
||||
#ifdef CONFIG_IRQ_FORCED_THREADING
|
||||
# ifdef CONFIG_PREEMPT_RT
|
||||
# define force_irqthreads (true)
|
||||
# define force_irqthreads() (true)
|
||||
# else
|
||||
extern bool force_irqthreads;
|
||||
DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
|
||||
# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
|
||||
# endif
|
||||
#else
|
||||
#define force_irqthreads (0)
|
||||
#define force_irqthreads() (false)
|
||||
#endif
|
||||
|
||||
#ifndef local_softirq_pending
|
||||
|
|
|
@ -25,12 +25,11 @@
|
|||
#include "internals.h"
|
||||
|
||||
#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
|
||||
__read_mostly bool force_irqthreads;
|
||||
EXPORT_SYMBOL_GPL(force_irqthreads);
|
||||
DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
|
||||
|
||||
static int __init setup_forced_irqthreads(char *arg)
|
||||
{
|
||||
force_irqthreads = true;
|
||||
static_branch_enable(&force_irqthreads_key);
|
||||
return 0;
|
||||
}
|
||||
early_param("threadirqs", setup_forced_irqthreads);
|
||||
|
@ -1260,8 +1259,8 @@ static int irq_thread(void *data)
|
|||
irqreturn_t (*handler_fn)(struct irq_desc *desc,
|
||||
struct irqaction *action);
|
||||
|
||||
if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
|
||||
&action->thread_flags))
|
||||
if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
|
||||
&action->thread_flags))
|
||||
handler_fn = irq_forced_thread_fn;
|
||||
else
|
||||
handler_fn = irq_thread_fn;
|
||||
|
@ -1322,7 +1321,7 @@ EXPORT_SYMBOL_GPL(irq_wake_thread);
|
|||
|
||||
static int irq_setup_forced_threading(struct irqaction *new)
|
||||
{
|
||||
if (!force_irqthreads)
|
||||
if (!force_irqthreads())
|
||||
return 0;
|
||||
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
|
||||
return 0;
|
||||
|
|
|
@ -422,7 +422,7 @@ static inline void invoke_softirq(void)
|
|||
if (ksoftirqd_running(local_softirq_pending()))
|
||||
return;
|
||||
|
||||
if (!force_irqthreads || !__this_cpu_read(ksoftirqd)) {
|
||||
if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
|
||||
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
/*
|
||||
* We can safely execute softirq on the current stack if
|
||||
|
|
Loading…
Reference in New Issue