ARM: 8824/1: fix a migrating irq bug when hotplug cpu
Arm TC2 fails cpu hotplug stress test.
This issue was tracked down to a missing copy of the new affinity
cpumask for the vexpress-spc interrupt into struct
irq_common_data.affinity when the interrupt is migrated in
migrate_one_irq().
Fix it by replacing the arm specific hotplug cpu migration with the
generic irq code.
This is the counterpart implementation to commit 217d453d47
("arm64:
fix a migrating irq bug when hotplug cpu").
Tested with cpu hotplug stress test on Arm TC2 (multi_v7_defconfig plus
CONFIG_ARM_BIG_LITTLE_CPUFREQ=y and CONFIG_ARM_VEXPRESS_SPC_CPUFREQ=y).
The vexpress-spc interrupt (irq=22) on this board is affine to CPU0.
Its affinity cpumask now changes correctly e.g. from 0 to 1-4 when
CPU0 is hotplugged out.
Suggested-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
This commit is contained in:
parent
bfeffd1552
commit
1b5ba35078
|
@ -1400,6 +1400,7 @@ config NR_CPUS
|
||||||
config HOTPLUG_CPU
|
config HOTPLUG_CPU
|
||||||
bool "Support for hot-pluggable CPUs"
|
bool "Support for hot-pluggable CPUs"
|
||||||
depends on SMP
|
depends on SMP
|
||||||
|
select GENERIC_IRQ_MIGRATION
|
||||||
help
|
help
|
||||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||||
can be controlled through /sys/devices/system/cpu.
|
can be controlled through /sys/devices/system/cpu.
|
||||||
|
|
|
@ -25,7 +25,6 @@
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
struct irqaction;
|
struct irqaction;
|
||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
extern void migrate_irqs(void);
|
|
||||||
|
|
||||||
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
|
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
|
||||||
void handle_IRQ(unsigned int, struct pt_regs *);
|
void handle_IRQ(unsigned int, struct pt_regs *);
|
||||||
|
|
|
@ -31,7 +31,6 @@
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/ratelimit.h>
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/kallsyms.h>
|
#include <linux/kallsyms.h>
|
||||||
|
@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
|
||||||
return nr_irqs;
|
return nr_irqs;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
|
||||||
static bool migrate_one_irq(struct irq_desc *desc)
|
|
||||||
{
|
|
||||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
|
||||||
const struct cpumask *affinity = irq_data_get_affinity_mask(d);
|
|
||||||
struct irq_chip *c;
|
|
||||||
bool ret = false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If this is a per-CPU interrupt, or the affinity does not
|
|
||||||
* include this CPU, then we have nothing to do.
|
|
||||||
*/
|
|
||||||
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
|
||||||
affinity = cpu_online_mask;
|
|
||||||
ret = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
c = irq_data_get_irq_chip(d);
|
|
||||||
if (!c->irq_set_affinity)
|
|
||||||
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
|
||||||
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
|
||||||
cpumask_copy(irq_data_get_affinity_mask(d), affinity);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The current CPU has been marked offline. Migrate IRQs off this CPU.
|
|
||||||
* If the affinity settings do not allow other CPUs, force them onto any
|
|
||||||
* available CPU.
|
|
||||||
*
|
|
||||||
* Note: we must iterate over all IRQs, whether they have an attached
|
|
||||||
* action structure or not, as we need to get chained interrupts too.
|
|
||||||
*/
|
|
||||||
void migrate_irqs(void)
|
|
||||||
{
|
|
||||||
unsigned int i;
|
|
||||||
struct irq_desc *desc;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
|
|
||||||
for_each_irq_desc(i, desc) {
|
|
||||||
bool affinity_broken;
|
|
||||||
|
|
||||||
raw_spin_lock(&desc->lock);
|
|
||||||
affinity_broken = migrate_one_irq(desc);
|
|
||||||
raw_spin_unlock(&desc->lock);
|
|
||||||
|
|
||||||
if (affinity_broken)
|
|
||||||
pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
|
|
||||||
i, smp_processor_id());
|
|
||||||
}
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_HOTPLUG_CPU */
|
|
||||||
|
|
|
@ -254,7 +254,7 @@ int __cpu_disable(void)
|
||||||
/*
|
/*
|
||||||
* OK - migrate IRQs away from this CPU
|
* OK - migrate IRQs away from this CPU
|
||||||
*/
|
*/
|
||||||
migrate_irqs();
|
irq_migrate_all_off_this_cpu();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush user cache and TLB mappings, and then remove this CPU
|
* Flush user cache and TLB mappings, and then remove this CPU
|
||||||
|
|
Loading…
Reference in New Issue