irq_work: Do not raise an IPI when queueing work on the local CPU
The QEMU PowerPC/PSeries machine model was not expecting a self-IPI, and it may be a bit surprising thing to do, so have irq_work_queue_on do local queueing when target is the current CPU. Suggested-by: Steven Rostedt <rostedt@goodmis.org> Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= <clg@kaod.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Suraj Jitindar Singh <sjitindarsingh@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20190409093403.20994-1-npiggin@gmail.com [ Simplified the preprocessor comments. Fixed unbalanced curly brackets pointed out by Thomas. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
2d65c42b43
commit
471ba0e686
|
@ -56,34 +56,18 @@ void __weak arch_irq_work_raise(void)
|
|||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* Enqueue the irq_work @work on @cpu unless it's already pending
|
||||
* somewhere.
|
||||
*
|
||||
* Can be re-enqueued while the callback is still in progress.
|
||||
*/
|
||||
bool irq_work_queue_on(struct irq_work *work, int cpu)
|
||||
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
|
||||
static void __irq_work_queue_local(struct irq_work *work)
|
||||
{
|
||||
/* All work should have been flushed before going offline */
|
||||
WARN_ON_ONCE(cpu_is_offline(cpu));
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/* Arch remote IPI send/receive backend aren't NMI safe */
|
||||
WARN_ON_ONCE(in_nmi());
|
||||
|
||||
/* Only queue if not already pending */
|
||||
if (!irq_work_claim(work))
|
||||
return false;
|
||||
|
||||
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
|
||||
arch_send_call_function_single_ipi(cpu);
|
||||
|
||||
#else /* #ifdef CONFIG_SMP */
|
||||
irq_work_queue(work);
|
||||
#endif /* #else #ifdef CONFIG_SMP */
|
||||
|
||||
return true;
|
||||
/* If the work is "lazy", handle it from next tick if any */
|
||||
if (work->flags & IRQ_WORK_LAZY) {
|
||||
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
|
||||
tick_nohz_tick_stopped())
|
||||
arch_irq_work_raise();
|
||||
} else {
|
||||
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
|
||||
arch_irq_work_raise();
|
||||
}
|
||||
}
|
||||
|
||||
/* Enqueue the irq work @work on the current CPU */
|
||||
|
@ -95,23 +79,48 @@ bool irq_work_queue(struct irq_work *work)
|
|||
|
||||
/* Queue the entry and raise the IPI if needed. */
|
||||
preempt_disable();
|
||||
|
||||
/* If the work is "lazy", handle it from next tick if any */
|
||||
if (work->flags & IRQ_WORK_LAZY) {
|
||||
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
|
||||
tick_nohz_tick_stopped())
|
||||
arch_irq_work_raise();
|
||||
} else {
|
||||
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
|
||||
arch_irq_work_raise();
|
||||
}
|
||||
|
||||
__irq_work_queue_local(work);
|
||||
preempt_enable();
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_work_queue);
|
||||
|
||||
/*
|
||||
* Enqueue the irq_work @work on @cpu unless it's already pending
|
||||
* somewhere.
|
||||
*
|
||||
* Can be re-enqueued while the callback is still in progress.
|
||||
*/
|
||||
bool irq_work_queue_on(struct irq_work *work, int cpu)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
return irq_work_queue(work);
|
||||
|
||||
#else /* CONFIG_SMP: */
|
||||
/* All work should have been flushed before going offline */
|
||||
WARN_ON_ONCE(cpu_is_offline(cpu));
|
||||
|
||||
/* Only queue if not already pending */
|
||||
if (!irq_work_claim(work))
|
||||
return false;
|
||||
|
||||
preempt_disable();
|
||||
if (cpu != smp_processor_id()) {
|
||||
/* Arch remote IPI send/receive backend aren't NMI safe */
|
||||
WARN_ON_ONCE(in_nmi());
|
||||
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
|
||||
arch_send_call_function_single_ipi(cpu);
|
||||
} else {
|
||||
__irq_work_queue_local(work);
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
return true;
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
|
||||
|
||||
bool irq_work_needs_cpu(void)
|
||||
{
|
||||
struct llist_head *raised, *lazy;
|
||||
|
|
Loading…
Reference in New Issue