diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index f6d2946edbd2..15f2effd6baf 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -60,7 +60,7 @@ int irq_select_affinity(unsigned int irq) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; - cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu)); + irq_data_update_affinity(data, cpumask_of(cpu)); chip->irq_set_affinity(data, cpumask_of(cpu), false); return 0; } diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig index 75cccbd3f05f..7b3440687176 100644 --- a/arch/arm/mach-hisi/Kconfig +++ b/arch/arm/mach-hisi/Kconfig @@ -40,7 +40,7 @@ config ARCH_HIP04 select HAVE_ARM_ARCH_TIMER select MCPM if SMP select MCPM_QUAD_CLUSTER if SMP - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP help Support for Hisilicon HiP04 SoC family diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 35adcf89035a..99300850abc1 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -834,7 +834,7 @@ iosapic_unregister_intr (unsigned int gsi) if (iosapic_intr_info[irq].count == 0) { #ifdef CONFIG_SMP /* Clear affinity */ - cpumask_setall(irq_get_affinity_mask(irq)); + irq_data_update_affinity(irq_get_irq_data(irq), cpu_all_mask); #endif /* Clear the interrupt information */ iosapic_intr_info[irq].dest = 0; diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index ecef17c7c35b..275b9ea58c64 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -57,8 +57,8 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; void set_irq_affinity_info (unsigned int irq, int hwid, int redir) { if (irq < NR_IRQS) { - cpumask_copy(irq_get_affinity_mask(irq), - cpumask_of(cpu_logical_id(hwid))); + irq_data_update_affinity(irq_get_irq_data(irq), + cpumask_of(cpu_logical_id(hwid))); irq_redir[irq] = (char) (redir & 0xff); } } diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index df5c28f252e3..025e5133c860 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -37,7 +37,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, msg.data = data; pci_write_msi_msg(irq, &msg); - cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu)); + irq_data_update_affinity(idata, cpumask_of(cpu)); return 0; } @@ -132,7 +132,7 @@ static int dmar_msi_set_affinity(struct irq_data *data, msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); dmar_msi_write(irq, &msg); - cpumask_copy(irq_data_get_affinity_mask(data), mask); + irq_data_update_affinity(data, mask); return 0; } diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 6cdcbf4de763..9cb9ed44bcaf 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -263,7 +263,7 @@ static int next_cpu_for_irq(struct irq_data *data) #ifdef CONFIG_SMP int cpu; - struct cpumask *mask = irq_data_get_affinity_mask(data); + const struct cpumask *mask = irq_data_get_affinity_mask(data); int weight = cpumask_weight(mask); struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); @@ -758,7 +758,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data) { int cpu = smp_processor_id(); cpumask_t new_affinity; - struct cpumask *mask = irq_data_get_affinity_mask(data); + const struct cpumask *mask = irq_data_get_affinity_mask(data); if (!cpumask_test_cpu(cpu, mask)) return; diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 0fe2d79fb123..5ebb1771b4ab 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -315,7 +315,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu) { #ifdef CONFIG_SMP struct irq_data *d = irq_get_irq_data(irq); - cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu)); + irq_data_update_affinity(d, cpumask_of(cpu)); #endif return per_cpu(cpu_data, cpu).txn_addr; diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index ef0f0827cf57..56269c2c3414 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -230,16 +230,17 @@ void migrate_irqs(void) struct irq_data *data = irq_get_irq_data(irq); if (irq_data_get_node(data) == cpu) { - struct cpumask *mask = irq_data_get_affinity_mask(data); + const struct cpumask *mask = irq_data_get_affinity_mask(data); unsigned int newcpu = cpumask_any_and(mask, cpu_online_mask); if (newcpu >= nr_cpu_ids) { pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", irq, cpu); - cpumask_setall(mask); + irq_set_affinity(irq, cpu_all_mask); + } else { + irq_set_affinity(irq, mask); } - irq_set_affinity(irq, mask); } } } diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c index 7e0f6bedc248..42c70d28ef27 100644 --- a/arch/x86/hyperv/irqdomain.c +++ b/arch/x86/hyperv/irqdomain.c @@ -192,7 +192,7 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct pci_dev *dev; struct hv_interrupt_entry out_entry, *stored_entry; struct irq_cfg *cfg = irqd_cfg(data); - cpumask_t *affinity; + const cpumask_t *affinity; int cpu; u64 status; diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 529fe9245821..42f106004400 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -169,7 +169,7 @@ void migrate_irqs(void) for_each_active_irq(i) { struct irq_data *data = irq_get_irq_data(i); - struct cpumask *mask; + const struct cpumask *mask; unsigned int newcpu; if (irqd_is_per_cpu(data)) @@ -185,9 +185,10 @@ void migrate_irqs(void) pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", i, cpu); - cpumask_setall(mask); + irq_set_affinity(i, cpu_all_mask); + } else { + irq_set_affinity(i, mask); } - irq_set_affinity(i, mask); } } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index e285a220c913..51bd66a45a11 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -194,7 +194,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) u32 vector; struct irq_cfg *cfg; int ioapic_id; - struct cpumask *affinity; + const struct cpumask *affinity; int cpu; struct hv_interrupt_entry entry; struct hyperv_root_ir_data *data = irq_data->chip_data; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 1f23a6be7d88..462adac905a6 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -8,7 +8,7 @@ config IRQCHIP config ARM_GIC bool select IRQ_DOMAIN_HIERARCHY - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ARM_GIC_PM bool @@ -34,7 +34,7 @@ config ARM_GIC_V3 bool select IRQ_DOMAIN_HIERARCHY select PARTITION_PERCPU - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ARM_GIC_V3_ITS bool @@ -76,7 +76,7 @@ config ARMADA_370_XP_IRQ bool select GENERIC_IRQ_CHIP select PCI_MSI if PCI - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config ALPINE_MSI bool @@ -112,7 +112,7 @@ config BCM6345_L1_IRQ bool select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config BCM7038_L1_IRQ tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver" @@ -120,7 +120,7 @@ config BCM7038_L1_IRQ default ARCH_BRCMSTB || BMIPS_GENERIC select GENERIC_IRQ_CHIP select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config BCM7120_L2_IRQ tristate "Broadcom STB 7120-style L2 interrupt controller driver" @@ -177,9 +177,9 @@ config MADERA_IRQ config IRQ_MIPS_CPU bool select GENERIC_IRQ_CHIP - select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING + select GENERIC_IRQ_IPI if SMP && SYS_SUPPORTS_MULTITHREADING select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config CLPS711X_IRQCHIP bool @@ -294,7 +294,7 @@ config VERSATILE_FPGA_IRQ_NR config XTENSA_MX bool select IRQ_DOMAIN - select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP config XILINX_INTC bool "Xilinx Interrupt Controller IP" @@ -322,7 +322,8 @@ config KEYSTONE_IRQ config MIPS_GIC bool - select GENERIC_IRQ_IPI + select GENERIC_IRQ_IPI if SMP + select IRQ_DOMAIN_HIERARCHY select MIPS_CM config INGENIC_IRQ diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index 142a7431745f..6899e37810a8 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -216,11 +216,11 @@ static int bcm6345_l1_set_affinity(struct irq_data *d, enabled = intc->cpus[old_cpu]->enable_cache[word] & mask; if (enabled) __bcm6345_l1_mask(d); - cpumask_copy(irq_data_get_affinity_mask(d), dest); + irq_data_update_affinity(d, dest); if (enabled) __bcm6345_l1_unmask(d); } else { - cpumask_copy(irq_data_get_affinity_mask(d), dest); + irq_data_update_affinity(d, dest); } raw_spin_unlock_irqrestore(&intc->lock, flags); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index ff89b36267dd..8a9efb6ae587 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -52,13 +52,15 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); static DEFINE_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; -static struct irq_domain *gic_ipi_domain; static int gic_shared_intrs; static unsigned int gic_cpu_pin; static unsigned int timer_cpu_pin; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; + +#ifdef CONFIG_GENERIC_IRQ_IPI static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); +#endif /* CONFIG_GENERIC_IRQ_IPI */ static struct gic_all_vpes_chip_data { u32 map; @@ -472,9 +474,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, u32 map; if (hwirq >= GIC_SHARED_HWIRQ_BASE) { +#ifdef CONFIG_GENERIC_IRQ_IPI /* verify that shared irqs don't conflict with an IPI irq */ if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) return -EBUSY; +#endif /* CONFIG_GENERIC_IRQ_IPI */ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, &gic_level_irq_controller, @@ -567,6 +571,8 @@ static const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, }; +#ifdef CONFIG_GENERIC_IRQ_IPI + static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, @@ -670,6 +676,48 @@ static const struct irq_domain_ops gic_ipi_domain_ops = { .match = gic_ipi_domain_match, }; +static int gic_register_ipi_domain(struct device_node *node) +{ + struct irq_domain *gic_ipi_domain; + unsigned int v[2], num_ipis; + + gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, + IRQ_DOMAIN_FLAG_IPI_PER_CPU, + GIC_NUM_LOCAL_INTRS + gic_shared_intrs, + node, &gic_ipi_domain_ops, NULL); + if (!gic_ipi_domain) { + pr_err("Failed to add IPI domain"); + return -ENXIO; + } + + irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); + + if (node && + !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { + bitmap_set(ipi_resrv, v[0], v[1]); + } else { + /* + * Reserve 2 interrupts per possible CPU/VP for use as IPIs, + * meeting the requirements of arch/mips SMP. + */ + num_ipis = 2 * num_possible_cpus(); + bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); + } + + bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + + return 0; +} + +#else /* !CONFIG_GENERIC_IRQ_IPI */ + +static inline int gic_register_ipi_domain(struct device_node *node) +{ + return 0; +} + +#endif /* !CONFIG_GENERIC_IRQ_IPI */ + static int gic_cpu_startup(unsigned int cpu) { /* Enable or disable EIC */ @@ -688,11 +736,12 @@ static int gic_cpu_startup(unsigned int cpu) static int __init gic_of_init(struct device_node *node, struct device_node *parent) { - unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; + unsigned int cpu_vec, i, gicconfig; unsigned long reserved; phys_addr_t gic_base; struct resource res; size_t gic_len; + int ret; /* Find the first available CPU vector. */ i = 0; @@ -780,30 +829,9 @@ static int __init gic_of_init(struct device_node *node, return -ENXIO; } - gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, - IRQ_DOMAIN_FLAG_IPI_PER_CPU, - GIC_NUM_LOCAL_INTRS + gic_shared_intrs, - node, &gic_ipi_domain_ops, NULL); - if (!gic_ipi_domain) { - pr_err("Failed to add IPI domain"); - return -ENXIO; - } - - irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); - - if (node && - !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { - bitmap_set(ipi_resrv, v[0], v[1]); - } else { - /* - * Reserve 2 interrupts per possible CPU/VP for use as IPIs, - * meeting the requirements of arch/mips SMP. - */ - num_ipis = 2 * num_possible_cpus(); - bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); - } - - bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); + ret = gic_register_ipi_domain(node); + if (ret) + return ret; board_bind_eic_interrupt = &gic_bind_eic_interrupt; diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 8a3b0c3a1e92..3a8c98615634 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -677,7 +677,7 @@ static int iosapic_set_affinity_irq(struct irq_data *d, if (dest_cpu < 0) return -1; - cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu)); + irq_data_update_affinity(d, cpumask_of(dest_cpu)); vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu); spin_lock_irqsave(&iosapic_lock, flags); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index db814f7b93ba..e7c6f6629e7c 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -642,7 +642,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) struct hv_retarget_device_interrupt *params; struct tran_int_desc *int_desc; struct hv_pcibus_device *hbus; - struct cpumask *dest; + const struct cpumask *dest; cpumask_var_t tmp; struct pci_bus *pbus; struct pci_dev *pdev; @@ -1613,7 +1613,7 @@ out: } static u32 hv_compose_msi_req_v1( - struct pci_create_interrupt *int_pkt, struct cpumask *affinity, + struct pci_create_interrupt *int_pkt, const struct cpumask *affinity, u32 slot, u8 vector, u8 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; @@ -1635,13 +1635,13 @@ static u32 hv_compose_msi_req_v1( * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten * by subsequent retarget in hv_irq_unmask(). */ -static int hv_compose_msi_req_get_cpu(struct cpumask *affinity) +static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity) { return cpumask_first_and(affinity, cpu_online_mask); } static u32 hv_compose_msi_req_v2( - struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, + struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity, u32 slot, u8 vector, u8 vector_count) { int cpu; @@ -1660,7 +1660,7 @@ static u32 hv_compose_msi_req_v2( } static u32 hv_compose_msi_req_v3( - struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity, + struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity, u32 slot, u32 vector, u8 vector_count) { int cpu; @@ -1697,7 +1697,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; - struct cpumask *dest; + const struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct msi_desc *msi_desc; diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c index 358df7510186..828d81e02b37 100644 --- a/drivers/sh/intc/chip.c +++ b/drivers/sh/intc/chip.c @@ -72,7 +72,7 @@ static int intc_set_affinity(struct irq_data *data, if (!cpumask_intersects(cpumask, cpu_online_mask)) return -1; - cpumask_copy(irq_data_get_affinity_mask(data), cpumask); + irq_data_update_affinity(data, cpumask); return IRQ_SET_MASK_OK_NOCOPY; } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 46d9295d9a6e..5e8321f43cbd 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -528,9 +528,10 @@ static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu, BUG_ON(irq == -1); if (IS_ENABLED(CONFIG_SMP) && force_affinity) { - cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); - cpumask_copy(irq_get_effective_affinity_mask(irq), - cpumask_of(cpu)); + struct irq_data *data = irq_get_irq_data(irq); + + irq_data_update_affinity(data, cpumask_of(cpu)); + irq_data_update_effective_affinity(data, cpumask_of(cpu)); } xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu); diff --git a/include/linux/irq.h b/include/linux/irq.h index 505308253d23..996e22744edd 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -151,7 +151,9 @@ struct irq_common_data { #endif void *handler_data; struct msi_desc *msi_desc; +#ifdef CONFIG_SMP cpumask_var_t affinity; +#endif #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK cpumask_var_t effective_affinity; #endif @@ -879,21 +881,34 @@ static inline int irq_data_get_node(struct irq_data *d) return irq_common_data_get_node(d->common); } -static inline struct cpumask *irq_get_affinity_mask(int irq) +static inline +const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) +{ +#ifdef CONFIG_SMP + return d->common->affinity; +#else + return cpumask_of(0); +#endif +} + +static inline void irq_data_update_affinity(struct irq_data *d, + const struct cpumask *m) +{ +#ifdef CONFIG_SMP + cpumask_copy(d->common->affinity, m); +#endif +} + +static inline const struct cpumask *irq_get_affinity_mask(int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->common->affinity : NULL; -} - -static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) -{ - return d->common->affinity; + return d ? irq_data_get_affinity_mask(d) : NULL; } #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK static inline -struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) +const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { return d->common->effective_affinity; } @@ -908,13 +923,14 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d, { } static inline -struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) +const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { - return d->common->affinity; + return irq_data_get_affinity_mask(d); } #endif -static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) +static inline +const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 10929eda9825..db3d174c53d4 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -24,6 +24,7 @@ config GENERIC_IRQ_SHOW_LEVEL # Supports effective affinity mask config GENERIC_IRQ_EFFECTIVE_AFF_MASK + depends on SMP bool # Support for delayed migration from interrupt context @@ -82,6 +83,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS # Generic IRQ IPI support config GENERIC_IRQ_IPI bool + depends on SMP select IRQ_DOMAIN_HIERARCHY # Generic MSI interrupt support diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 886789dcee43..9c7ad2266317 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -188,7 +188,8 @@ enum { #ifdef CONFIG_SMP static int -__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) +__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, + bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); @@ -224,7 +225,8 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) } #else static __always_inline int -__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) +__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, + bool force) { return IRQ_STARTUP_NORMAL; } @@ -252,7 +254,7 @@ static int __irq_startup(struct irq_desc *desc) int irq_startup(struct irq_desc *desc, bool resend, bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); - struct cpumask *aff = irq_data_get_affinity_mask(d); + const struct cpumask *aff = irq_data_get_affinity_mask(d); int ret = 0; desc->depth = 0; diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index bc8e40cf2b65..bbcaac64038e 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -30,7 +30,7 @@ static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state, static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); - struct cpumask *msk; + const struct cpumask *msk; msk = irq_data_get_affinity_mask(data); seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk)); diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c index 08ce7da3b57c..bbd945bacef0 100644 --- a/kernel/irq/ipi.c +++ b/kernel/irq/ipi.c @@ -115,11 +115,11 @@ free_descs: int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) { struct irq_data *data = irq_get_irq_data(irq); - struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; + const struct cpumask *ipimask; struct irq_domain *domain; unsigned int nr_irqs; - if (!irq || !data || !ipimask) + if (!irq || !data) return -EINVAL; domain = data->domain; @@ -131,7 +131,8 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) return -EINVAL; } - if (WARN_ON(!cpumask_subset(dest, ipimask))) + ipimask = irq_data_get_affinity_mask(data); + if (!ipimask || WARN_ON(!cpumask_subset(dest, ipimask))) /* * Must be destroying a subset of CPUs to which this IPI * was set up to target @@ -162,12 +163,13 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) { struct irq_data *data = irq_get_irq_data(irq); - struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; + const struct cpumask *ipimask; - if (!data || !ipimask || cpu >= nr_cpu_ids) + if (!data || cpu >= nr_cpu_ids) return INVALID_HWIRQ; - if (!cpumask_test_cpu(cpu, ipimask)) + ipimask = irq_data_get_affinity_mask(data); + if (!ipimask || !cpumask_test_cpu(cpu, ipimask)) return INVALID_HWIRQ; /* @@ -186,7 +188,7 @@ EXPORT_SYMBOL_GPL(ipi_get_hwirq); static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data, const struct cpumask *dest, unsigned int cpu) { - struct cpumask *ipimask = irq_data_get_affinity_mask(data); + const struct cpumask *ipimask = irq_data_get_affinity_mask(data); if (!chip || !ipimask) return -EINVAL; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 8c396319d5ac..40fe7806cc8c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -205,16 +205,8 @@ static void irq_validate_effective_affinity(struct irq_data *data) pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", chip->name, data->irq); } - -static inline void irq_init_effective_affinity(struct irq_data *data, - const struct cpumask *mask) -{ - cpumask_copy(irq_data_get_effective_affinity_mask(data), mask); -} #else static inline void irq_validate_effective_affinity(struct irq_data *data) { } -static inline void irq_init_effective_affinity(struct irq_data *data, - const struct cpumask *mask) { } #endif int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, @@ -347,7 +339,7 @@ static bool irq_set_affinity_deactivated(struct irq_data *data, return false; cpumask_copy(desc->irq_common_data.affinity, mask); - irq_init_effective_affinity(data, mask); + irq_data_update_effective_affinity(data, mask); irqd_set(data, IRQD_AFFINITY_SET); return true; }