Merge branch 'irq/core' into x86/apic

Pick up the dependencies for the vector management rework series.
This commit is contained in:
Thomas Gleixner 2017-09-25 20:39:01 +02:00
commit e4ae4c8ea7
25 changed files with 855 additions and 61 deletions

View File

@ -41,8 +41,8 @@ extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg);
extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);
extern void mp_irqdomain_activate(struct irq_domain *domain,
struct irq_data *irq_data);
extern int mp_irqdomain_activate(struct irq_domain *domain,
struct irq_data *irq_data, bool early);
extern void mp_irqdomain_deactivate(struct irq_domain *domain,
struct irq_data *irq_data);
extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);

View File

@ -112,8 +112,8 @@ static void htirq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
static void htirq_domain_activate(struct irq_domain *domain,
struct irq_data *irq_data)
static int htirq_domain_activate(struct irq_domain *domain,
struct irq_data *irq_data, bool early)
{
struct ht_irq_msg msg;
struct irq_cfg *cfg = irqd_cfg(irq_data);
@ -132,6 +132,7 @@ static void htirq_domain_activate(struct irq_domain *domain,
HT_IRQ_LOW_MT_ARBITRATED) |
HT_IRQ_LOW_IRQ_MASKED;
write_ht_irq_msg(irq_data->irq, &msg);
return 0;
}
static void htirq_domain_deactivate(struct irq_domain *domain,

View File

@ -2137,7 +2137,7 @@ static inline void __init check_timer(void)
unmask_ioapic_irq(irq_get_irq_data(0));
}
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
irq_domain_activate_irq(irq_data, false);
if (timer_irq_works()) {
if (disable_timer_pin_1 > 0)
clear_IO_APIC_pin(0, pin1);
@ -2159,7 +2159,7 @@ static inline void __init check_timer(void)
*/
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
irq_domain_activate_irq(irq_data, false);
legacy_pic->unmask(0);
if (timer_irq_works()) {
apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@ -3018,8 +3018,8 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
void mp_irqdomain_activate(struct irq_domain *domain,
struct irq_data *irq_data)
int mp_irqdomain_activate(struct irq_domain *domain,
struct irq_data *irq_data, bool early)
{
unsigned long flags;
struct irq_pin_list *entry;
@ -3029,6 +3029,7 @@ void mp_irqdomain_activate(struct irq_domain *domain,
for_each_irq_pin(entry, data->irq_2_pin)
__ioapic_write_entry(entry->apic, entry->pin, data->entry);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return 0;
}
void mp_irqdomain_deactivate(struct irq_domain *domain,

View File

@ -127,10 +127,11 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
* Re-target the irq to the specified CPU and enable the specified MMR located
* on the specified blade to allow the sending of MSIs to the specified CPU.
*/
static void uv_domain_activate(struct irq_domain *domain,
struct irq_data *irq_data)
static int uv_domain_activate(struct irq_domain *domain,
struct irq_data *irq_data, bool early)
{
uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
return 0;
}
/*

View File

@ -140,8 +140,9 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
return irq_create_fwspec_mapping(&fwspec);
}
static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
struct irq_data *irq_data)
static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
struct irq_data *irq_data,
bool early)
{
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
@ -150,11 +151,12 @@ static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
dev_err(priv->gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
return;
return -ENOSPC;
}
xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
return 0;
}
static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,

View File

@ -4170,8 +4170,8 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
static void irq_remapping_activate(struct irq_domain *domain,
struct irq_data *irq_data)
static int irq_remapping_activate(struct irq_domain *domain,
struct irq_data *irq_data, bool early)
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
@ -4180,6 +4180,7 @@ static void irq_remapping_activate(struct irq_domain *domain,
if (iommu)
iommu->irte_ops->activate(data->entry, irte_info->devid,
irte_info->index);
return 0;
}
static void irq_remapping_deactivate(struct irq_domain *domain,

View File

@ -1389,12 +1389,13 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
static void intel_irq_remapping_activate(struct irq_domain *domain,
struct irq_data *irq_data)
static int intel_irq_remapping_activate(struct irq_domain *domain,
struct irq_data *irq_data, bool early)
{
struct intel_ir_data *data = irq_data->chip_data;
modify_irte(&data->irq_2_iommu, &data->irte_entry);
return 0;
}
static void intel_irq_remapping_deactivate(struct irq_domain *domain,

View File

@ -2186,8 +2186,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return 0;
}
static void its_irq_domain_activate(struct irq_domain *domain,
struct irq_data *d)
static int its_irq_domain_activate(struct irq_domain *domain,
struct irq_data *d, bool early)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
@ -2205,6 +2205,7 @@ static void its_irq_domain_activate(struct irq_domain *domain,
/* Map the GIC IRQ and event to the device */
its_send_mapti(its_dev, d->hwirq, event);
return 0;
}
static void its_irq_domain_deactivate(struct irq_domain *domain,
@ -2678,8 +2679,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
return err;
}
static void its_vpe_irq_domain_activate(struct irq_domain *domain,
struct irq_data *d)
static int its_vpe_irq_domain_activate(struct irq_domain *domain,
struct irq_data *d, bool early)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
@ -2687,6 +2688,7 @@ static void its_vpe_irq_domain_activate(struct irq_domain *domain,
vpe->col_idx = cpumask_first(cpu_online_mask);
its_send_vmapp(vpe, true);
its_send_vinvall(vpe);
return 0;
}
static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,

View File

@ -289,13 +289,14 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
return 0;
}
static void stm32_gpio_domain_activate(struct irq_domain *d,
struct irq_data *irq_data)
static int stm32_gpio_domain_activate(struct irq_domain *d,
struct irq_data *irq_data, bool early)
{
struct stm32_gpio_bank *bank = d->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr);
return 0;
}
static int stm32_gpio_domain_alloc(struct irq_domain *d,

View File

@ -1113,6 +1113,28 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
return readl(gc->reg_base + reg_offset);
}
struct irq_matrix;
struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
unsigned int alloc_start,
unsigned int alloc_end);
void irq_matrix_online(struct irq_matrix *m);
void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu);
void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
unsigned int bit, bool managed);
void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
unsigned int irq_matrix_allocated(struct irq_matrix *m);
unsigned int irq_matrix_reserved(struct irq_matrix *m);
void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
#define INVALID_HWIRQ (~0UL)
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);

View File

@ -93,6 +93,7 @@ struct irq_desc {
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
struct dentry *debugfs_file;
const char *dev_name;
#endif
#ifdef CONFIG_SPARSE_IRQ
struct rcu_head rcu;

View File

@ -40,6 +40,7 @@ struct of_device_id;
struct irq_chip;
struct irq_data;
struct cpumask;
struct seq_file;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
@ -104,18 +105,21 @@ struct irq_domain_ops {
int (*xlate)(struct irq_domain *d, struct device_node *node,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/* extended V2 interfaces to support hierarchy irq_domains */
int (*alloc)(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs, void *arg);
void (*free)(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs);
void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *out_hwirq, unsigned int *out_type);
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
void (*debug_show)(struct seq_file *m, struct irq_domain *d,
struct irq_data *irqd, int ind);
#endif
};
extern struct irq_domain_ops irq_generic_chip_ops;
@ -437,7 +441,7 @@ extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc, const struct cpumask *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
extern void irq_domain_activate_irq(struct irq_data *irq_data);
extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
@ -507,8 +511,6 @@ static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
static inline void irq_domain_activate_irq(struct irq_data *data) { }
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
unsigned int nr_irqs, int node, void *arg)
{
@ -557,8 +559,6 @@ irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
static inline void irq_domain_activate_irq(struct irq_data *data) { }
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
static inline struct irq_domain *irq_find_matching_fwnode(
struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
{

View File

@ -283,6 +283,11 @@ enum {
MSI_FLAG_PCI_MSIX = (1 << 3),
/* Needs early activate, required for PCI */
MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
/*
* Must reactivate when irq is started even when
* MSI_FLAG_ACTIVATE_EARLY has been set.
*/
MSI_FLAG_MUST_REACTIVATE = (1 << 5),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,

View File

@ -0,0 +1,201 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM irq_matrix
#if !defined(_TRACE_IRQ_MATRIX_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IRQ_MATRIX_H
#include <linux/tracepoint.h>
struct irq_matrix;
struct cpumap;
DECLARE_EVENT_CLASS(irq_matrix_global,
TP_PROTO(struct irq_matrix *matrix),
TP_ARGS(matrix),
TP_STRUCT__entry(
__field( unsigned int, online_maps )
__field( unsigned int, global_available )
__field( unsigned int, global_reserved )
__field( unsigned int, total_allocated )
),
TP_fast_assign(
__entry->online_maps = matrix->online_maps;
__entry->global_available = matrix->global_available;
__entry->global_reserved = matrix->global_reserved;
__entry->total_allocated = matrix->total_allocated;
),
TP_printk("online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u",
__entry->online_maps, __entry->global_available,
__entry->global_reserved, __entry->total_allocated)
);
DECLARE_EVENT_CLASS(irq_matrix_global_update,
TP_PROTO(int bit, struct irq_matrix *matrix),
TP_ARGS(bit, matrix),
TP_STRUCT__entry(
__field( int, bit )
__field( unsigned int, online_maps )
__field( unsigned int, global_available )
__field( unsigned int, global_reserved )
__field( unsigned int, total_allocated )
),
TP_fast_assign(
__entry->bit = bit;
__entry->online_maps = matrix->online_maps;
__entry->global_available = matrix->global_available;
__entry->global_reserved = matrix->global_reserved;
__entry->total_allocated = matrix->total_allocated;
),
TP_printk("bit=%d online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u",
__entry->bit, __entry->online_maps,
__entry->global_available, __entry->global_reserved,
__entry->total_allocated)
);
DECLARE_EVENT_CLASS(irq_matrix_cpu,
TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix,
struct cpumap *cmap),
TP_ARGS(bit, cpu, matrix, cmap),
TP_STRUCT__entry(
__field( int, bit )
__field( unsigned int, cpu )
__field( bool, online )
__field( unsigned int, available )
__field( unsigned int, allocated )
__field( unsigned int, managed )
__field( unsigned int, online_maps )
__field( unsigned int, global_available )
__field( unsigned int, global_reserved )
__field( unsigned int, total_allocated )
),
TP_fast_assign(
__entry->bit = bit;
__entry->cpu = cpu;
__entry->online = cmap->online;
__entry->available = cmap->available;
__entry->allocated = cmap->allocated;
__entry->managed = cmap->managed;
__entry->online_maps = matrix->online_maps;
__entry->global_available = matrix->global_available;
__entry->global_reserved = matrix->global_reserved;
__entry->total_allocated = matrix->total_allocated;
),
TP_printk("bit=%d cpu=%u online=%d avl=%u alloc=%u managed=%u online_maps=%u global_avl=%u, global_rsvd=%u, total_alloc=%u",
__entry->bit, __entry->cpu, __entry->online,
__entry->available, __entry->allocated,
__entry->managed, __entry->online_maps,
__entry->global_available, __entry->global_reserved,
__entry->total_allocated)
);
DEFINE_EVENT(irq_matrix_global, irq_matrix_online,
TP_PROTO(struct irq_matrix *matrix),
TP_ARGS(matrix)
);
DEFINE_EVENT(irq_matrix_global, irq_matrix_offline,
TP_PROTO(struct irq_matrix *matrix),
TP_ARGS(matrix)
);
DEFINE_EVENT(irq_matrix_global, irq_matrix_reserve,
TP_PROTO(struct irq_matrix *matrix),
TP_ARGS(matrix)
);
DEFINE_EVENT(irq_matrix_global, irq_matrix_remove_reserved,
TP_PROTO(struct irq_matrix *matrix),
TP_ARGS(matrix)
);
DEFINE_EVENT(irq_matrix_global_update, irq_matrix_assign_system,
TP_PROTO(int bit, struct irq_matrix *matrix),
TP_ARGS(bit, matrix)
);
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_reserved,
TP_PROTO(int bit, unsigned int cpu,
struct irq_matrix *matrix, struct cpumap *cmap),
TP_ARGS(bit, cpu, matrix, cmap)
);
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_reserve_managed,
TP_PROTO(int bit, unsigned int cpu,
struct irq_matrix *matrix, struct cpumap *cmap),
TP_ARGS(bit, cpu, matrix, cmap)
);
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_remove_managed,
TP_PROTO(int bit, unsigned int cpu,
struct irq_matrix *matrix, struct cpumap *cmap),
TP_ARGS(bit, cpu, matrix, cmap)
);
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_managed,
TP_PROTO(int bit, unsigned int cpu,
struct irq_matrix *matrix, struct cpumap *cmap),
TP_ARGS(bit, cpu, matrix, cmap)
);
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_assign,
TP_PROTO(int bit, unsigned int cpu,
struct irq_matrix *matrix, struct cpumap *cmap),
TP_ARGS(bit, cpu, matrix, cmap)
);
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc,
TP_PROTO(int bit, unsigned int cpu,
struct irq_matrix *matrix, struct cpumap *cmap),
TP_ARGS(bit, cpu, matrix, cmap)
);
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_free,
TP_PROTO(int bit, unsigned int cpu,
struct irq_matrix *matrix, struct cpumap *cmap),
TP_ARGS(bit, cpu, matrix, cmap)
);
#endif /* _TRACE_IRQ_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -97,6 +97,9 @@ config HANDLE_DOMAIN_IRQ
config IRQ_TIMINGS
bool
config GENERIC_IRQ_MATRIX_ALLOCATOR
bool
config IRQ_DOMAIN_DEBUG
bool "Expose hardware/virtual IRQ mapping via debugfs"
depends on IRQ_DOMAIN && DEBUG_FS

View File

@ -13,3 +13,4 @@ obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
obj-$(CONFIG_SMP) += affinity.o
obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o

View File

@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
if (desc->irq_data.chip->irq_set_type)
desc->irq_data.chip->irq_set_type(&desc->irq_data,
IRQ_TYPE_PROBE);
irq_startup(desc, IRQ_NORESEND, IRQ_START_FORCE);
irq_activate_and_startup(desc, IRQ_NORESEND);
}
raw_spin_unlock_irq(&desc->lock);
}

View File

@ -207,20 +207,24 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
* Catch code which fiddles with enable_irq() on a managed
* and potentially shutdown IRQ. Chained interrupt
* installment or irq auto probing should not happen on
* managed irqs either. Emit a warning, break the affinity
* and start it up as a normal interrupt.
* managed irqs either.
*/
if (WARN_ON_ONCE(force))
return IRQ_STARTUP_NORMAL;
return IRQ_STARTUP_ABORT;
/*
* The interrupt was requested, but there is no online CPU
* in it's affinity mask. Put it into managed shutdown
* state and let the cpu hotplug mechanism start it up once
* a CPU in the mask becomes available.
*/
irqd_set_managed_shutdown(d);
return IRQ_STARTUP_ABORT;
}
/*
* Managed interrupts have reserved resources, so this should not
* happen.
*/
if (WARN_ON(irq_domain_activate_irq(d, false)))
return IRQ_STARTUP_ABORT;
return IRQ_STARTUP_MANAGED;
}
#else
@ -236,7 +240,9 @@ static int __irq_startup(struct irq_desc *desc)
struct irq_data *d = irq_desc_get_irq_data(desc);
int ret = 0;
irq_domain_activate_irq(d);
/* Warn if this interrupt is not activated but try nevertheless */
WARN_ON_ONCE(!irqd_is_activated(d));
if (d->chip->irq_startup) {
ret = d->chip->irq_startup(d);
irq_state_clr_disabled(desc);
@ -269,6 +275,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
irq_set_affinity_locked(d, aff, false);
break;
case IRQ_STARTUP_ABORT:
irqd_set_managed_shutdown(d);
return 0;
}
}
@ -278,6 +285,22 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
return ret;
}
int irq_activate(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
if (!irqd_affinity_is_managed(d))
return irq_domain_activate_irq(d, false);
return 0;
}
void irq_activate_and_startup(struct irq_desc *desc, bool resend)
{
if (WARN_ON(irq_activate(desc)))
return;
irq_startup(desc, resend, IRQ_START_FORCE);
}
static void __irq_disable(struct irq_desc *desc, bool mask);
void irq_shutdown(struct irq_desc *desc)
@ -953,7 +976,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
desc->action = &chained_action;
irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
irq_activate_and_startup(desc, IRQ_RESEND);
}
}

View File

@ -81,6 +81,8 @@ irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
data->domain ? data->domain->name : "");
seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq);
irq_debug_show_chip(m, data, ind + 1);
if (data->domain && data->domain->ops && data->domain->ops->debug_show)
data->domain->ops->debug_show(m, NULL, data, ind + 1);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
if (!data->parent_data)
return;
@ -149,6 +151,7 @@ static int irq_debug_show(struct seq_file *m, void *p)
raw_spin_lock_irq(&desc->lock);
data = irq_desc_get_irq_data(desc);
seq_printf(m, "handler: %pf\n", desc->handle_irq);
seq_printf(m, "device: %s\n", desc->dev_name);
seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors);
irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
ARRAY_SIZE(irqdesc_states));
@ -226,6 +229,15 @@ static const struct file_operations dfs_irq_ops = {
.release = single_release,
};
void irq_debugfs_copy_devname(int irq, struct device *dev)
{
struct irq_desc *desc = irq_to_desc(irq);
const char *name = dev_name(dev);
if (name)
desc->dev_name = kstrdup(name, GFP_KERNEL);
}
void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
{
char name [10];

View File

@ -74,6 +74,8 @@ extern void __enable_irq(struct irq_desc *desc);
#define IRQ_START_FORCE true
#define IRQ_START_COND false
extern int irq_activate(struct irq_desc *desc);
extern void irq_activate_and_startup(struct irq_desc *desc, bool resend);
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
extern void irq_shutdown(struct irq_desc *desc);
@ -436,6 +438,18 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
}
#endif /* !CONFIG_GENERIC_PENDING_IRQ */
#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
static inline int irq_domain_activate_irq(struct irq_data *data, bool early)
{
irqd_set_activated(data);
return 0;
}
static inline void irq_domain_deactivate_irq(struct irq_data *data)
{
irqd_clr_activated(data);
}
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
#include <linux/debugfs.h>
@ -443,7 +457,9 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
static inline void irq_remove_debugfs_entry(struct irq_desc *desc)
{
debugfs_remove(desc->debugfs_file);
kfree(desc->dev_name);
}
void irq_debugfs_copy_devname(int irq, struct device *dev);
# ifdef CONFIG_IRQ_DOMAIN
void irq_domain_debugfs_init(struct dentry *root);
# else
@ -458,4 +474,7 @@ static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d)
static inline void irq_remove_debugfs_entry(struct irq_desc *d)
{
}
static inline void irq_debugfs_copy_devname(int irq, struct device *dev)
{
}
#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */

View File

@ -448,7 +448,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
}
}
flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
mask = NULL;
for (i = 0; i < cnt; i++) {
@ -462,6 +462,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
goto err;
irq_insert_desc(start + i, desc);
irq_sysfs_add(start + i, desc);
irq_add_debugfs_entry(start + i, desc);
}
bitmap_set(allocated_irqs, start, cnt);
return start;

View File

@ -1682,18 +1682,6 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
}
EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
static void __irq_domain_activate_irq(struct irq_data *irq_data)
{
if (irq_data && irq_data->domain) {
struct irq_domain *domain = irq_data->domain;
if (irq_data->parent_data)
__irq_domain_activate_irq(irq_data->parent_data);
if (domain->ops->activate)
domain->ops->activate(domain, irq_data);
}
}
static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
{
if (irq_data && irq_data->domain) {
@ -1706,6 +1694,26 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
}
}
static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
{
int ret = 0;
if (irqd && irqd->domain) {
struct irq_domain *domain = irqd->domain;
if (irqd->parent_data)
ret = __irq_domain_activate_irq(irqd->parent_data,
early);
if (!ret && domain->ops->activate) {
ret = domain->ops->activate(domain, irqd, early);
/* Rollback in case of error */
if (ret && irqd->parent_data)
__irq_domain_deactivate_irq(irqd->parent_data);
}
}
return ret;
}
/**
* irq_domain_activate_irq - Call domain_ops->activate recursively to activate
* interrupt
@ -1714,12 +1722,15 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
* This is the second step to call domain_ops->activate to program interrupt
* controllers, so the interrupt could actually get delivered.
*/
void irq_domain_activate_irq(struct irq_data *irq_data)
int irq_domain_activate_irq(struct irq_data *irq_data, bool early)
{
if (!irqd_is_activated(irq_data)) {
__irq_domain_activate_irq(irq_data);
int ret = 0;
if (!irqd_is_activated(irq_data))
ret = __irq_domain_activate_irq(irq_data, early);
if (!ret)
irqd_set_activated(irq_data);
}
return ret;
}
/**
@ -1810,6 +1821,8 @@ irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
d->revmap_size + d->revmap_direct_max_irq);
seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
if (d->ops && d->ops->debug_show)
d->ops->debug_show(m, d, NULL, ind + 1);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
if (!d->parent)
return;

View File

@ -519,7 +519,7 @@ void __enable_irq(struct irq_desc *desc)
* time. If it was already started up, then irq_startup()
* will invoke irq_enable() under the hood.
*/
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
break;
}
default:
@ -1325,6 +1325,21 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
goto out_unlock;
}
/*
* Activate the interrupt. That activation must happen
* independently of IRQ_NOAUTOEN. request_irq() can fail
* and the callers are supposed to handle
* that. enable_irq() of an interrupt requested with
* IRQ_NOAUTOEN is not supposed to fail. The activation
* keeps it in shutdown mode, it merily associates
* resources if necessary and if that's not possible it
* fails. Interrupts which are in managed shutdown mode
* will simply ignore that activation request.
*/
ret = irq_activate(desc);
if (ret)
goto out_unlock;
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
IRQS_ONESHOT | IRQS_WAITING);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
@ -1400,7 +1415,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
wake_up_process(new->secondary->thread);
register_irq_proc(irq, desc);
irq_add_debugfs_entry(irq, desc);
new->dir = NULL;
register_handler_proc(irq, new);
return 0;
@ -1643,6 +1657,10 @@ const void *free_irq(unsigned int irq, void *dev_id)
#endif
action = __free_irq(irq, dev_id);
if (!action)
return NULL;
devname = action->name;
kfree(action);
return devname;

443
kernel/irq/matrix.c Normal file
View File

@ -0,0 +1,443 @@
/*
* Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <linux/spinlock.h>
#include <linux/seq_file.h>
#include <linux/bitmap.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/irq.h>
#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
struct cpumap {
unsigned int available;
unsigned int allocated;
unsigned int managed;
bool online;
unsigned long alloc_map[IRQ_MATRIX_SIZE];
unsigned long managed_map[IRQ_MATRIX_SIZE];
};
struct irq_matrix {
unsigned int matrix_bits;
unsigned int alloc_start;
unsigned int alloc_end;
unsigned int alloc_size;
unsigned int global_available;
unsigned int global_reserved;
unsigned int systembits_inalloc;
unsigned int total_allocated;
unsigned int online_maps;
struct cpumap __percpu *maps;
unsigned long scratch_map[IRQ_MATRIX_SIZE];
unsigned long system_map[IRQ_MATRIX_SIZE];
};
#define CREATE_TRACE_POINTS
#include <trace/events/irq_matrix.h>
/**
* irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
* @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
* @alloc_start: From which bit the allocation search starts
* @alloc_end: At which bit the allocation search ends, i.e first
* invalid bit
*/
__init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
unsigned int alloc_start,
unsigned int alloc_end)
{
struct irq_matrix *m;
if (matrix_bits > IRQ_MATRIX_BITS)
return NULL;
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return NULL;
m->matrix_bits = matrix_bits;
m->alloc_start = alloc_start;
m->alloc_end = alloc_end;
m->alloc_size = alloc_end - alloc_start;
m->maps = alloc_percpu(*m->maps);
if (!m->maps) {
kfree(m);
return NULL;
}
return m;
}
/**
* irq_matrix_online - Bring the local CPU matrix online
* @m: Matrix pointer
*/
void irq_matrix_online(struct irq_matrix *m)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
BUG_ON(cm->online);
bitmap_zero(cm->alloc_map, m->matrix_bits);
cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
cm->allocated = 0;
m->global_available += cm->available;
cm->online = true;
m->online_maps++;
trace_irq_matrix_online(m);
}
/**
* irq_matrix_offline - Bring the local CPU matrix offline
* @m: Matrix pointer
*/
void irq_matrix_offline(struct irq_matrix *m)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
/* Update the global available size */
m->global_available -= cm->available;
cm->online = false;
m->online_maps--;
trace_irq_matrix_offline(m);
}
static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
unsigned int num, bool managed)
{
unsigned int area, start = m->alloc_start;
unsigned int end = m->alloc_end;
bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
if (area >= end)
return area;
if (managed)
bitmap_set(cm->managed_map, area, num);
else
bitmap_set(cm->alloc_map, area, num);
return area;
}
/**
* irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer
* @bit: Which bit to reserve
* @replace: Replace an already allocated vector with a system
* vector at the same bit position.
*
* The BUG_ON()s below are on purpose. If this goes wrong in the
* early boot process, then the chance to survive is about zero.
* If this happens when the system is life, it's not much better.
*/
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
bool replace)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
BUG_ON(bit > m->matrix_bits);
BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
set_bit(bit, m->system_map);
if (replace) {
BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
cm->allocated--;
m->total_allocated--;
}
if (bit >= m->alloc_start && bit < m->alloc_end)
m->systembits_inalloc++;
trace_irq_matrix_assign_system(bit, m);
}
/**
* irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
* @m: Matrix pointer
* @msk: On which CPUs the bits should be reserved.
*
* Can be called for offline CPUs. Note, this will only reserve one bit
* on all CPUs in @msk, but it's not guaranteed that the bits are at the
* same offset on all CPUs
*/
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
{
unsigned int cpu, failed_cpu;
for_each_cpu(cpu, msk) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
unsigned int bit;
bit = matrix_alloc_area(m, cm, 1, true);
if (bit >= m->alloc_end)
goto cleanup;
cm->managed++;
if (cm->online) {
cm->available--;
m->global_available--;
}
trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
}
return 0;
cleanup:
failed_cpu = cpu;
for_each_cpu(cpu, msk) {
if (cpu == failed_cpu)
break;
irq_matrix_remove_managed(m, cpumask_of(cpu));
}
return -ENOSPC;
}
/**
* irq_matrix_remove_managed - Remove managed interrupts in a CPU map
* @m: Matrix pointer
* @msk: On which CPUs the bits should be removed
*
* Can be called for offline CPUs
*
* This removes not allocated managed interrupts from the map. It does
* not matter which one because the managed interrupts free their
* allocation when they shut down. If not, the accounting is screwed,
* but all what can be done at this point is warn about it.
*/
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
{
unsigned int cpu;
for_each_cpu(cpu, msk) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
unsigned int bit, end = m->alloc_end;
if (WARN_ON_ONCE(!cm->managed))
continue;
/* Get managed bit which are not allocated */
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
bit = find_first_bit(m->scratch_map, end);
if (WARN_ON_ONCE(bit >= end))
continue;
clear_bit(bit, cm->managed_map);
cm->managed--;
if (cm->online) {
cm->available++;
m->global_available++;
}
trace_irq_matrix_remove_managed(bit, cpu, m, cm);
}
}
/**
* irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
* @m: Matrix pointer
* @cpu: On which CPU the interrupt should be allocated
*/
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
{
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
unsigned int bit, end = m->alloc_end;
/* Get managed bit which are not allocated */
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
bit = find_first_bit(m->scratch_map, end);
if (bit >= end)
return -ENOSPC;
set_bit(bit, cm->alloc_map);
cm->allocated++;
m->total_allocated++;
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
return bit;
}
/**
* irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
* @m: Matrix pointer
* @bit: Which bit to mark
*
* This should only be used to mark preallocated vectors
*/
void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
return;
if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
return;
cm->allocated++;
m->total_allocated++;
cm->available--;
m->global_available--;
trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
}
/**
* irq_matrix_reserve - Reserve interrupts
* @m: Matrix pointer
*
* This is merily a book keeping call. It increments the number of globally
* reserved interrupt bits w/o actually allocating them. This allows to
* setup interrupt descriptors w/o assigning low level resources to it.
* The actual allocation happens when the interrupt gets activated.
*/
void irq_matrix_reserve(struct irq_matrix *m)
{
if (m->global_reserved <= m->global_available &&
m->global_reserved + 1 > m->global_available)
pr_warn("Interrupt reservation exceeds available resources\n");
m->global_reserved++;
trace_irq_matrix_reserve(m);
}
/**
* irq_matrix_remove_reserved - Remove interrupt reservation
* @m: Matrix pointer
*
* This is merily a book keeping call. It decrements the number of globally
* reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
* interrupt was never in use and a real vector allocated, which undid the
* reservation.
*/
void irq_matrix_remove_reserved(struct irq_matrix *m)
{
m->global_reserved--;
trace_irq_matrix_remove_reserved(m);
}
/**
* irq_matrix_alloc - Allocate a regular interrupt in a CPU map
* @m: Matrix pointer
* @msk: Which CPUs to search in
* @reserved: Allocate previously reserved interrupts
* @mapped_cpu: Pointer to store the CPU for which the irq was allocated
*/
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu)
{
unsigned int cpu;
for_each_cpu(cpu, msk) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
unsigned int bit;
if (!cm->online)
continue;
bit = matrix_alloc_area(m, cm, 1, false);
if (bit < m->alloc_end) {
cm->allocated++;
cm->available--;
m->total_allocated++;
m->global_available--;
if (reserved)
m->global_reserved--;
*mapped_cpu = cpu;
trace_irq_matrix_alloc(bit, cpu, m, cm);
return bit;
}
}
return -ENOSPC;
}
/**
* irq_matrix_free - Free allocated interrupt in the matrix
* @m: Matrix pointer
* @cpu: Which CPU map needs be updated
* @bit: The bit to remove
* @managed: If true, the interrupt is managed and not accounted
* as available.
*/
void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
unsigned int bit, bool managed)
{
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
return;
if (cm->online) {
clear_bit(bit, cm->alloc_map);
cm->allocated--;
m->total_allocated--;
if (!managed) {
cm->available++;
m->global_available++;
}
}
trace_irq_matrix_free(bit, cpu, m, cm);
}
/**
* irq_matrix_available - Get the number of globally available irqs
* @m: Pointer to the matrix to query
* @cpudown: If true, the local CPU is about to go down, adjust
* the number of available irqs accordingly
*/
unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
return m->global_available - cpudown ? cm->available : 0;
}
/**
* irq_matrix_reserved - Get the number of globally reserved irqs
* @m: Pointer to the matrix to query
*/
unsigned int irq_matrix_reserved(struct irq_matrix *m)
{
return m->global_reserved;
}
/**
* irq_matrix_allocated - Get the number of allocated irqs on the local cpu
* @m: Pointer to the matrix to search
*
* This returns number of allocated irqs
*/
unsigned int irq_matrix_allocated(struct irq_matrix *m)
{
struct cpumap *cm = this_cpu_ptr(m->maps);
return cm->allocated;
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
/**
* irq_matrix_debug_show - Show detailed allocation information
* @sf: Pointer to the seq_file to print to
* @m: Pointer to the matrix allocator
* @ind: Indentation for the print format
*
* Note, this is a lockless snapshot.
*/
void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
{
unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
int cpu;
seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
seq_printf(sf, "Global available: %6u\n", m->global_available);
seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
m->system_map);
seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
cpus_read_lock();
for_each_online_cpu(cpu) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
cpu, cm->available, cm->managed, cm->allocated,
m->matrix_bits, cm->alloc_map);
}
cpus_read_unlock();
}
#endif

View File

@ -16,6 +16,8 @@
#include <linux/msi.h>
#include <linux/slab.h>
#include "internals.h"
/**
* alloc_msi_entry - Allocate an initialize msi_entry
* @dev: Pointer to the device for which this is allocated
@ -100,13 +102,14 @@ int msi_domain_set_affinity(struct irq_data *irq_data,
return ret;
}
static void msi_domain_activate(struct irq_domain *domain,
struct irq_data *irq_data)
static int msi_domain_activate(struct irq_domain *domain,
struct irq_data *irq_data, bool early)
{
struct msi_msg msg;
BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
irq_chip_write_msi_msg(irq_data, &msg);
return 0;
}
static void msi_domain_deactivate(struct irq_domain *domain,
@ -373,8 +376,10 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
return ret;
}
for (i = 0; i < desc->nvec_used; i++)
for (i = 0; i < desc->nvec_used; i++) {
irq_set_msi_desc_off(virq, i, desc);
irq_debugfs_copy_devname(virq + i, dev);
}
}
if (ops->msi_finish)
@ -396,11 +401,28 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct irq_data *irq_data;
irq_data = irq_domain_get_irq_data(domain, desc->irq);
irq_domain_activate_irq(irq_data);
ret = irq_domain_activate_irq(irq_data, true);
if (ret)
goto cleanup;
if (info->flags & MSI_FLAG_MUST_REACTIVATE)
irqd_clr_activated(irq_data);
}
}
return 0;
cleanup:
for_each_msi_entry(desc, dev) {
struct irq_data *irqd;
if (desc->irq == virq)
break;
irqd = irq_domain_get_irq_data(domain, desc->irq);
if (irqd_is_activated(irqd))
irq_domain_deactivate_irq(irqd);
}
msi_domain_free_irqs(domain, dev);
return ret;
}
/**