irqchip updates for Linux 5.5

- Qualcomm PDC wakeup interrupt support
 - Layerscape external IRQ support
 - Broadcom bcm7038 PM and wakeup support
 - Ingenic driver cleanup and modernization
 - GICv3 ITS preparation for GICv4.1 updates
 - GICv4 fixes
 - Various cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAl3VJasPHG1hekBrZXJu
 ZWwub3JnAAoJECPQ0LrRPXpDMLEP/2U55GLmPuNqW/na4YFmcOYAkoP0WpEKDzn9
 u8lBi8CukKl1Z2JLAyP0E1e5iOVS0exvQ5V4OwxGKmeR9oWmB3Lym8UWRw7vcKEF
 HMKgtPCd3U3J5jW0P4Hr8hn6Q+B55fdMrrAaJcgsfBVB7bRB0lC0LZYGtN4VC4d8
 rTQzup5CK8Mu9k4NztLCxxoBUKFoqM+ZKsrRB2eOXB9amcPQtFvwC+5ZL2tDr3wS
 7d+pd6G4A+hsloIDUxoH9BrO/jd1jlfHyBRDFJIgpo/IQWVT6ciQECZomRR1pW30
 bGFYBf/HPFqfyH+ZOrWprSAd0Yx33WtYaMokYaJ6vGu4wedyxh/1LTRLzL0tWuyZ
 tPFvEmiiP/Hoeq1JHFRFQUO/75ckqALLeAxCjACCN8+F2Z0armk1W/iwehZNQHHV
 JdDXegRNUlMipG2kk5D3L6AK28bi+3+axc1ERMN1RO40eLm8NLogWL2TJlxLbyUe
 lMZMe43ceC0McGnQpAY8qyuC7IycQtngKNBvzG+6ADucGpFez3gYxh39RR43XMVo
 37Hsj+Ur7CFBJj6WTCzV2teC/WaXXQkJYxn6fsHNmUgdwPgGD3LppxhlWG49Ao9w
 x8ZnfyrrYmcFOJrKbT45ExMihioaGf8dyksKZNA/Z4dI0g/kf0LyYi5ujZaDDilI
 eDkMI/xI
 =uENR
 -----END PGP SIGNATURE-----

Merge tag 'irqchip-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core

Pull irqchip updates from Marc Zyngier:

 - Qualcomm PDC wakeup interrupt support
 - Layerscape external IRQ support
 - Broadcom bcm7038 PM and wakeup support
 - Ingenic driver cleanup and modernization
 - GICv3 ITS preparation for GICv4.1 updates
 - GICv4 fixes
 - Various cleanups
This commit is contained in:
Thomas Gleixner 2019-11-20 14:16:34 +01:00
commit 407e62f52a
24 changed files with 1041 additions and 160 deletions

View File

@ -31,6 +31,17 @@ Required properties:
- interrupts: specifies the interrupt line(s) in the interrupt-parent controller - interrupts: specifies the interrupt line(s) in the interrupt-parent controller
node; valid values depend on the type of parent interrupt controller node; valid values depend on the type of parent interrupt controller
Optional properties:
- brcm,irq-can-wake: If present, this means the L1 controller can be used as a
wakeup source for system suspend/resume.
Optional properties:
- brcm,int-fwd-mask: if present, a bit mask to indicate which interrupts
have already been configured by the firmware and should be left unmanaged.
This should have one 32-bit word per status/set/clear/mask group.
If multiple reg ranges and interrupt-parent entries are present on an SMP If multiple reg ranges and interrupt-parent entries are present on an SMP
system, the driver will allow IRQ SMP affinity to be set up through the system, the driver will allow IRQ SMP affinity to be set up through the
/proc/irq/ interface. In the simplest possible configuration, only one /proc/irq/ interface. In the simplest possible configuration, only one

View File

@ -0,0 +1,49 @@
* Freescale Layerscape external IRQs
Some Layerscape SOCs (LS1021A, LS1043A, LS1046A) support inverting
the polarity of certain external interrupt lines.
The device node must be a child of the node representing the
Supplemental Configuration Unit (SCFG).
Required properties:
- compatible: should be "fsl,<soc-name>-extirq", e.g. "fsl,ls1021a-extirq".
- #interrupt-cells: Must be 2. The first element is the index of the
external interrupt line. The second element is the trigger type.
- #address-cells: Must be 0.
- interrupt-controller: Identifies the node as an interrupt controller
- reg: Specifies the Interrupt Polarity Control Register (INTPCR) in
the SCFG.
- interrupt-map: Specifies the mapping from external interrupts to GIC
interrupts.
- interrupt-map-mask: Must be <0xffffffff 0>.
Example:
scfg: scfg@1570000 {
compatible = "fsl,ls1021a-scfg", "syscon";
reg = <0x0 0x1570000 0x0 0x10000>;
big-endian;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x1570000 0x10000>;
extirq: interrupt-controller@1ac {
compatible = "fsl,ls1021a-extirq";
#interrupt-cells = <2>;
#address-cells = <0>;
interrupt-controller;
reg = <0x1ac 4>;
interrupt-map =
<0 0 &gic GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
<1 0 &gic GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
<2 0 &gic GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
<3 0 &gic GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
<4 0 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
<5 0 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map-mask = <0xffffffff 0x0>;
};
};
interrupts-extended = <&gic GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
<&extirq 1 IRQ_TYPE_LEVEL_LOW>;

View File

@ -108,3 +108,15 @@ commonly used:
sensitivity = <7>; sensitivity = <7>;
}; };
}; };
3) Interrupt wakeup parent
--------------------------
Some interrupt controllers in a SoC, are always powered on and have a select
interrupts routed to them, so that they can wakeup the SoC from suspend. These
interrupt controllers do not fall into the category of a parent interrupt
controller and can be specified by the "wakeup-parent" property and contain a
single phandle referring to the wakeup capable interrupt controller.
Example:
wakeup-parent = <&pdc_intc>;

View File

@ -17,7 +17,8 @@ Properties:
- compatible: - compatible:
Usage: required Usage: required
Value type: <string> Value type: <string>
Definition: Should contain "qcom,<soc>-pdc" Definition: Should contain "qcom,<soc>-pdc" and "qcom,pdc"
- "qcom,sc7180-pdc": For SC7180
- "qcom,sdm845-pdc": For SDM845 - "qcom,sdm845-pdc": For SDM845
- reg: - reg:

View File

@ -333,7 +333,7 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
* GITS_VPENDBASER - the Valid bit must be cleared before changing * GITS_VPENDBASER - the Valid bit must be cleared before changing
* anything else. * anything else.
*/ */
static inline void gits_write_vpendbaser(u64 val, void * __iomem addr) static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
{ {
u32 tmp; u32 tmp;

View File

@ -370,6 +370,10 @@ config MVEBU_PIC
config MVEBU_SEI config MVEBU_SEI
bool bool
config LS_EXTIRQ
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
select MFD_SYSCON
config LS_SCFG_MSI config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI depends on PCI && PCI_MSI
@ -483,8 +487,6 @@ config TI_SCI_INTA_IRQCHIP
If you wish to use interrupt aggregator irq resources managed by the If you wish to use interrupt aggregator irq resources managed by the
TI System Controller, say Y here. Otherwise, say N. TI System Controller, say Y here. Otherwise, say N.
endmenu
config SIFIVE_PLIC config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller" bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV depends on RISCV
@ -496,3 +498,5 @@ config SIFIVE_PLIC
interrupt sources are subordinate to the PLIC. interrupt sources are subordinate to the PLIC.
If you don't know what to do here, say Y. If you don't know what to do here, say Y.
endmenu

View File

@ -84,6 +84,7 @@ obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o

View File

@ -27,6 +27,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h> #include <linux/irqchip/chained_irq.h>
#include <linux/syscore_ops.h>
#define IRQS_PER_WORD 32 #define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4) #define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
@ -39,6 +40,11 @@ struct bcm7038_l1_chip {
unsigned int n_words; unsigned int n_words;
struct irq_domain *domain; struct irq_domain *domain;
struct bcm7038_l1_cpu *cpus[NR_CPUS]; struct bcm7038_l1_cpu *cpus[NR_CPUS];
#ifdef CONFIG_PM_SLEEP
struct list_head list;
u32 wake_mask[MAX_WORDS];
#endif
u32 irq_fwd_mask[MAX_WORDS];
u8 affinity[MAX_WORDS * IRQS_PER_WORD]; u8 affinity[MAX_WORDS * IRQS_PER_WORD];
}; };
@ -249,6 +255,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
resource_size_t sz; resource_size_t sz;
struct bcm7038_l1_cpu *cpu; struct bcm7038_l1_cpu *cpu;
unsigned int i, n_words, parent_irq; unsigned int i, n_words, parent_irq;
int ret;
if (of_address_to_resource(dn, idx, &res)) if (of_address_to_resource(dn, idx, &res))
return -EINVAL; return -EINVAL;
@ -262,6 +269,14 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
else if (intc->n_words != n_words) else if (intc->n_words != n_words)
return -EINVAL; return -EINVAL;
ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask",
intc->irq_fwd_mask, n_words);
if (ret != 0 && ret != -EINVAL) {
/* property exists but has the wrong number of words */
pr_err("invalid brcm,int-fwd-mask property\n");
return -EINVAL;
}
cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
GFP_KERNEL); GFP_KERNEL);
if (!cpu) if (!cpu)
@ -272,8 +287,11 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
return -ENOMEM; return -ENOMEM;
for (i = 0; i < n_words; i++) { for (i = 0; i < n_words; i++) {
l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i)); l1_writel(~intc->irq_fwd_mask[i],
cpu->mask_cache[i] = 0xffffffff; cpu->map_base + reg_mask_set(intc, i));
l1_writel(intc->irq_fwd_mask[i],
cpu->map_base + reg_mask_clr(intc, i));
cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
} }
parent_irq = irq_of_parse_and_map(dn, idx); parent_irq = irq_of_parse_and_map(dn, idx);
@ -281,12 +299,89 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
pr_err("failed to map parent interrupt %d\n", parent_irq); pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL; return -EINVAL;
} }
if (of_property_read_bool(dn, "brcm,irq-can-wake"))
enable_irq_wake(parent_irq);
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle, irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
intc); intc);
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP
/*
* We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
* used because the struct chip_type suspend/resume hooks are not called
* unless chip_type is hooked onto a generic_chip. Since this driver does
* not use generic_chip, we need to manually hook our resume/suspend to
* syscore_ops.
*/
static LIST_HEAD(bcm7038_l1_intcs_list);
static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
static int bcm7038_l1_suspend(void)
{
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
u32 val;
/* Wakeup interrupt should only come from the boot cpu */
boot_cpu = cpu_logical_map(0);
list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
for (word = 0; word < intc->n_words; word++) {
val = intc->wake_mask[word] | intc->irq_fwd_mask[word];
l1_writel(~val,
intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
l1_writel(val,
intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
}
}
return 0;
}
static void bcm7038_l1_resume(void)
{
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
boot_cpu = cpu_logical_map(0);
list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
for (word = 0; word < intc->n_words; word++) {
l1_writel(intc->cpus[boot_cpu]->mask_cache[word],
intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
l1_writel(~intc->cpus[boot_cpu]->mask_cache[word],
intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
}
}
}
static struct syscore_ops bcm7038_l1_syscore_ops = {
.suspend = bcm7038_l1_suspend,
.resume = bcm7038_l1_resume,
};
static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
{
struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
unsigned long flags;
u32 word = d->hwirq / IRQS_PER_WORD;
u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
raw_spin_lock_irqsave(&intc->lock, flags);
if (on)
intc->wake_mask[word] |= mask;
else
intc->wake_mask[word] &= ~mask;
raw_spin_unlock_irqrestore(&intc->lock, flags);
return 0;
}
#endif
static struct irq_chip bcm7038_l1_irq_chip = { static struct irq_chip bcm7038_l1_irq_chip = {
.name = "bcm7038-l1", .name = "bcm7038-l1",
.irq_mask = bcm7038_l1_mask, .irq_mask = bcm7038_l1_mask,
@ -295,11 +390,21 @@ static struct irq_chip bcm7038_l1_irq_chip = {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.irq_cpu_offline = bcm7038_l1_cpu_offline, .irq_cpu_offline = bcm7038_l1_cpu_offline,
#endif #endif
#ifdef CONFIG_PM_SLEEP
.irq_set_wake = bcm7038_l1_set_wake,
#endif
}; };
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq) irq_hw_number_t hw_irq)
{ {
struct bcm7038_l1_chip *intc = d->host_data;
u32 mask = BIT(hw_irq % IRQS_PER_WORD);
u32 word = hw_irq / IRQS_PER_WORD;
if (intc->irq_fwd_mask[word] & mask)
return -EPERM;
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data); irq_set_chip_data(virq, d->host_data);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
@ -340,6 +445,16 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
goto out_unmap; goto out_unmap;
} }
#ifdef CONFIG_PM_SLEEP
/* Add bcm7038_l1_chip into a list */
raw_spin_lock(&bcm7038_l1_intcs_lock);
list_add_tail(&intc->list, &bcm7038_l1_intcs_list);
raw_spin_unlock(&bcm7038_l1_intcs_lock);
if (list_is_singular(&bcm7038_l1_intcs_list))
register_syscore_ops(&bcm7038_l1_syscore_ops);
#endif
pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n", pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
dn, IRQS_PER_WORD * intc->n_words); dn, IRQS_PER_WORD * intc->n_words);

View File

@ -6,6 +6,7 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/acpi_iort.h> #include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
@ -102,20 +103,21 @@ struct its_node {
struct its_collection *collections; struct its_collection *collections;
struct fwnode_handle *fwnode_handle; struct fwnode_handle *fwnode_handle;
u64 (*get_msi_base)(struct its_device *its_dev); u64 (*get_msi_base)(struct its_device *its_dev);
u64 typer;
u64 cbaser_save; u64 cbaser_save;
u32 ctlr_save; u32 ctlr_save;
struct list_head its_device_list; struct list_head its_device_list;
u64 flags; u64 flags;
unsigned long list_nr; unsigned long list_nr;
u32 ite_size;
u32 device_ids;
int numa_node; int numa_node;
unsigned int msi_domain_flags; unsigned int msi_domain_flags;
u32 pre_its_base; /* for Socionext Synquacer */ u32 pre_its_base; /* for Socionext Synquacer */
bool is_v4;
int vlpi_redist_offset; int vlpi_redist_offset;
}; };
#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
#define ITS_ITT_ALIGN SZ_256 #define ITS_ITT_ALIGN SZ_256
/* The maximum number of VPEID bits supported by VLPI commands */ /* The maximum number of VPEID bits supported by VLPI commands */
@ -130,7 +132,7 @@ struct event_lpi_map {
u16 *col_map; u16 *col_map;
irq_hw_number_t lpi_base; irq_hw_number_t lpi_base;
int nr_lpis; int nr_lpis;
struct mutex vlpi_lock; raw_spinlock_t vlpi_lock;
struct its_vm *vm; struct its_vm *vm;
struct its_vlpi_map *vlpi_maps; struct its_vlpi_map *vlpi_maps;
int nr_vlpis; int nr_vlpis;
@ -181,7 +183,7 @@ static u16 get_its_list(struct its_vm *vm)
unsigned long its_list = 0; unsigned long its_list = 0;
list_for_each_entry(its, &its_nodes, entry) { list_for_each_entry(its, &its_nodes, entry) {
if (!its->is_v4) if (!is_v4(its))
continue; continue;
if (vm->vlpi_count[its->list_nr]) if (vm->vlpi_count[its->list_nr])
@ -191,6 +193,12 @@ static u16 get_its_list(struct its_vm *vm)
return (u16)its_list; return (u16)its_list;
} }
static inline u32 its_get_event_id(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
return d->hwirq - its_dev->event_map.lpi_base;
}
static struct its_collection *dev_event_to_col(struct its_device *its_dev, static struct its_collection *dev_event_to_col(struct its_device *its_dev,
u32 event) u32 event)
{ {
@ -199,6 +207,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
return its->collections + its_dev->event_map.col_map[event]; return its->collections + its_dev->event_map.col_map[event];
} }
static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
u32 event)
{
if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
return NULL;
return &its_dev->event_map.vlpi_maps[event];
}
static struct its_collection *irq_to_col(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
return dev_event_to_col(its_dev, its_get_event_id(d));
}
static struct its_collection *valid_col(struct its_collection *col) static struct its_collection *valid_col(struct its_collection *col)
{ {
if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
@ -305,7 +329,10 @@ struct its_cmd_desc {
* The ITS command block, which is what the ITS actually parses. * The ITS command block, which is what the ITS actually parses.
*/ */
struct its_cmd_block { struct its_cmd_block {
union {
u64 raw_cmd[4]; u64 raw_cmd[4];
__le64 raw_cmd_le[4];
};
}; };
#define ITS_CMD_QUEUE_SZ SZ_64K #define ITS_CMD_QUEUE_SZ SZ_64K
@ -414,10 +441,10 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
static inline void its_fixup_cmd(struct its_cmd_block *cmd) static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{ {
/* Let's fixup BE commands */ /* Let's fixup BE commands */
cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
} }
static struct its_collection *its_build_mapd_cmd(struct its_node *its, static struct its_collection *its_build_mapd_cmd(struct its_node *its,
@ -676,6 +703,60 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
return valid_vpe(its, desc->its_vmovp_cmd.vpe); return valid_vpe(its, desc->its_vmovp_cmd.vpe);
} }
static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_vlpi_map *map;
map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
desc->its_inv_cmd.event_id);
its_encode_cmd(cmd, GITS_CMD_INV);
its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
its_fixup_cmd(cmd);
return valid_vpe(its, map->vpe);
}
static struct its_vpe *its_build_vint_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_vlpi_map *map;
map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
desc->its_int_cmd.event_id);
its_encode_cmd(cmd, GITS_CMD_INT);
its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_int_cmd.event_id);
its_fixup_cmd(cmd);
return valid_vpe(its, map->vpe);
}
static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_vlpi_map *map;
map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
desc->its_clear_cmd.event_id);
its_encode_cmd(cmd, GITS_CMD_CLEAR);
its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
its_fixup_cmd(cmd);
return valid_vpe(its, map->vpe);
}
static u64 its_cmd_ptr_to_offset(struct its_node *its, static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr) struct its_cmd_block *ptr)
{ {
@ -953,7 +1034,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
static void its_send_vmapti(struct its_device *dev, u32 id) static void its_send_vmapti(struct its_device *dev, u32 id)
{ {
struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
struct its_cmd_desc desc; struct its_cmd_desc desc;
desc.its_vmapti_cmd.vpe = map->vpe; desc.its_vmapti_cmd.vpe = map->vpe;
@ -967,7 +1048,7 @@ static void its_send_vmapti(struct its_device *dev, u32 id)
static void its_send_vmovi(struct its_device *dev, u32 id) static void its_send_vmovi(struct its_device *dev, u32 id)
{ {
struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
struct its_cmd_desc desc; struct its_cmd_desc desc;
desc.its_vmovi_cmd.vpe = map->vpe; desc.its_vmovi_cmd.vpe = map->vpe;
@ -1021,7 +1102,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
/* Emit VMOVPs */ /* Emit VMOVPs */
list_for_each_entry(its, &its_nodes, entry) { list_for_each_entry(its, &its_nodes, entry) {
if (!its->is_v4) if (!is_v4(its))
continue; continue;
if (!vpe->its_vm->vlpi_count[its->list_nr]) if (!vpe->its_vm->vlpi_count[its->list_nr])
@ -1042,29 +1123,71 @@ static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
} }
static void its_send_vinv(struct its_device *dev, u32 event_id)
{
struct its_cmd_desc desc;
/*
* There is no real VINV command. This is just a normal INV,
* with a VSYNC instead of a SYNC.
*/
desc.its_inv_cmd.dev = dev;
desc.its_inv_cmd.event_id = event_id;
its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
}
static void its_send_vint(struct its_device *dev, u32 event_id)
{
struct its_cmd_desc desc;
/*
* There is no real VINT command. This is just a normal INT,
* with a VSYNC instead of a SYNC.
*/
desc.its_int_cmd.dev = dev;
desc.its_int_cmd.event_id = event_id;
its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
}
static void its_send_vclear(struct its_device *dev, u32 event_id)
{
struct its_cmd_desc desc;
/*
* There is no real VCLEAR command. This is just a normal CLEAR,
* with a VSYNC instead of a SYNC.
*/
desc.its_clear_cmd.dev = dev;
desc.its_clear_cmd.event_id = event_id;
its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
}
/* /*
* irqchip functions - assumes MSI, mostly. * irqchip functions - assumes MSI, mostly.
*/ */
static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
static inline u32 its_get_event_id(struct irq_data *d)
{ {
struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_device *its_dev = irq_data_get_irq_chip_data(d);
return d->hwirq - its_dev->event_map.lpi_base; u32 event = its_get_event_id(d);
if (!irqd_is_forwarded_to_vcpu(d))
return NULL;
return dev_event_to_vlpi_map(its_dev, event);
} }
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{ {
struct its_vlpi_map *map = get_vlpi_map(d);
irq_hw_number_t hwirq; irq_hw_number_t hwirq;
void *va; void *va;
u8 *cfg; u8 *cfg;
if (irqd_is_forwarded_to_vcpu(d)) { if (map) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d); va = page_address(map->vm->vprop_page);
u32 event = its_get_event_id(d);
struct its_vlpi_map *map;
va = page_address(its_dev->event_map.vm->vprop_page);
map = &its_dev->event_map.vlpi_maps[event];
hwirq = map->vintid; hwirq = map->vintid;
/* Remember the updated property */ /* Remember the updated property */
@ -1090,23 +1213,50 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
dsb(ishst); dsb(ishst);
} }
static void wait_for_syncr(void __iomem *rdbase)
{
while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
cpu_relax();
}
static void direct_lpi_inv(struct irq_data *d)
{
struct its_collection *col;
void __iomem *rdbase;
/* Target the redistributor this LPI is currently routed to */
col = irq_to_col(d);
rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
wait_for_syncr(rdbase);
}
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
{ {
struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_device *its_dev = irq_data_get_irq_chip_data(d);
lpi_write_config(d, clr, set); lpi_write_config(d, clr, set);
if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
direct_lpi_inv(d);
else if (!irqd_is_forwarded_to_vcpu(d))
its_send_inv(its_dev, its_get_event_id(d)); its_send_inv(its_dev, its_get_event_id(d));
else
its_send_vinv(its_dev, its_get_event_id(d));
} }
static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
{ {
struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d); u32 event = its_get_event_id(d);
struct its_vlpi_map *map;
if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) map = dev_event_to_vlpi_map(its_dev, event);
if (map->db_enabled == enable)
return; return;
its_dev->event_map.vlpi_maps[event].db_enabled = enable; map->db_enabled = enable;
/* /*
* More fun with the architecture: * More fun with the architecture:
@ -1208,10 +1358,17 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
if (which != IRQCHIP_STATE_PENDING) if (which != IRQCHIP_STATE_PENDING)
return -EINVAL; return -EINVAL;
if (irqd_is_forwarded_to_vcpu(d)) {
if (state)
its_send_vint(its_dev, event);
else
its_send_vclear(its_dev, event);
} else {
if (state) if (state)
its_send_int(its_dev, event); its_send_int(its_dev, event);
else else
its_send_clear(its_dev, event); its_send_clear(its_dev, event);
}
return 0; return 0;
} }
@ -1279,13 +1436,13 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
if (!info->map) if (!info->map)
return -EINVAL; return -EINVAL;
mutex_lock(&its_dev->event_map.vlpi_lock); raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm) { if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps; struct its_vlpi_map *maps;
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
GFP_KERNEL); GFP_ATOMIC);
if (!maps) { if (!maps) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
@ -1328,29 +1485,30 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
} }
out: out:
mutex_unlock(&its_dev->event_map.vlpi_lock); raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret; return ret;
} }
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{ {
struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d); struct its_vlpi_map *map;
int ret = 0; int ret = 0;
mutex_lock(&its_dev->event_map.vlpi_lock); raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm || map = get_vlpi_map(d);
!its_dev->event_map.vlpi_maps[event].vm) {
if (!its_dev->event_map.vm || !map) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
/* Copy our mapping information to the incoming request */ /* Copy our mapping information to the incoming request */
*info->map = its_dev->event_map.vlpi_maps[event]; *info->map = *map;
out: out:
mutex_unlock(&its_dev->event_map.vlpi_lock); raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret; return ret;
} }
@ -1360,7 +1518,7 @@ static int its_vlpi_unmap(struct irq_data *d)
u32 event = its_get_event_id(d); u32 event = its_get_event_id(d);
int ret = 0; int ret = 0;
mutex_lock(&its_dev->event_map.vlpi_lock); raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
ret = -EINVAL; ret = -EINVAL;
@ -1390,7 +1548,7 @@ static int its_vlpi_unmap(struct irq_data *d)
} }
out: out:
mutex_unlock(&its_dev->event_map.vlpi_lock); raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret; return ret;
} }
@ -1416,7 +1574,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
struct its_cmd_info *info = vcpu_info; struct its_cmd_info *info = vcpu_info;
/* Need a v4 ITS */ /* Need a v4 ITS */
if (!its_dev->its->is_v4) if (!is_v4(its_dev->its))
return -EINVAL; return -EINVAL;
/* Unmap request? */ /* Unmap request? */
@ -1922,9 +2080,9 @@ static bool its_parse_indirect_baser(struct its_node *its,
if (new_order >= MAX_ORDER) { if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1; new_order = MAX_ORDER - 1;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
&its->phys_base, its_base_type_string[type], &its->phys_base, its_base_type_string[type],
its->device_ids, ids); device_ids(its), ids);
} }
*order = new_order; *order = new_order;
@ -1970,7 +2128,7 @@ static int its_alloc_tables(struct its_node *its)
case GITS_BASER_TYPE_DEVICE: case GITS_BASER_TYPE_DEVICE:
indirect = its_parse_indirect_baser(its, baser, indirect = its_parse_indirect_baser(its, baser,
psz, &order, psz, &order,
its->device_ids); device_ids(its));
break; break;
case GITS_BASER_TYPE_VCPU: case GITS_BASER_TYPE_VCPU:
@ -2361,7 +2519,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
/* Don't allow device id that exceeds ITS hardware limit */ /* Don't allow device id that exceeds ITS hardware limit */
if (!baser) if (!baser)
return (ilog2(dev_id) < its->device_ids); return (ilog2(dev_id) < device_ids(its));
return its_alloc_table_entry(its, baser, dev_id); return its_alloc_table_entry(its, baser, dev_id);
} }
@ -2380,7 +2538,7 @@ static bool its_alloc_vpe_table(u32 vpe_id)
list_for_each_entry(its, &its_nodes, entry) { list_for_each_entry(its, &its_nodes, entry) {
struct its_baser *baser; struct its_baser *baser;
if (!its->is_v4) if (!is_v4(its))
continue; continue;
baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
@ -2419,7 +2577,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
* sized as a power of two (and you need at least one bit...). * sized as a power of two (and you need at least one bit...).
*/ */
nr_ites = max(2, nvecs); nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size; sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
if (alloc_lpis) { if (alloc_lpis) {
@ -2450,7 +2608,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev->event_map.col_map = col_map; dev->event_map.col_map = col_map;
dev->event_map.lpi_base = lpi_base; dev->event_map.lpi_base = lpi_base;
dev->event_map.nr_lpis = nr_lpis; dev->event_map.nr_lpis = nr_lpis;
mutex_init(&dev->event_map.vlpi_lock); raw_spin_lock_init(&dev->event_map.vlpi_lock);
dev->device_id = dev_id; dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry); INIT_LIST_HEAD(&dev->entry);
@ -2471,6 +2629,7 @@ static void its_free_device(struct its_device *its_dev)
raw_spin_lock_irqsave(&its_dev->its->lock, flags); raw_spin_lock_irqsave(&its_dev->its->lock, flags);
list_del(&its_dev->entry); list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
kfree(its_dev->event_map.col_map);
kfree(its_dev->itt); kfree(its_dev->itt);
kfree(its_dev); kfree(its_dev);
} }
@ -2679,7 +2838,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
its_lpi_free(its_dev->event_map.lpi_map, its_lpi_free(its_dev->event_map.lpi_map,
its_dev->event_map.lpi_base, its_dev->event_map.lpi_base,
its_dev->event_map.nr_lpis); its_dev->event_map.nr_lpis);
kfree(its_dev->event_map.col_map);
/* Unmap device/itt */ /* Unmap device/itt */
its_send_mapd(its_dev, 0); its_send_mapd(its_dev, 0);
@ -2772,8 +2930,7 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) wait_for_syncr(rdbase);
cpu_relax();
return; return;
} }
@ -2869,7 +3026,7 @@ static void its_vpe_invall(struct its_vpe *vpe)
struct its_node *its; struct its_node *its;
list_for_each_entry(its, &its_nodes, entry) { list_for_each_entry(its, &its_nodes, entry) {
if (!its->is_v4) if (!is_v4(its))
continue; continue;
if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
@ -2927,10 +3084,10 @@ static void its_vpe_send_inv(struct irq_data *d)
if (gic_rdists->has_direct_lpi) { if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase; void __iomem *rdbase;
/* Target the redistributor this VPE is currently known on */
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) wait_for_syncr(rdbase);
cpu_relax();
} else { } else {
its_vpe_send_cmd(vpe, its_send_inv); its_vpe_send_cmd(vpe, its_send_inv);
} }
@ -2972,8 +3129,7 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
} else { } else {
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) wait_for_syncr(rdbase);
cpu_relax();
} }
} else { } else {
if (state) if (state)
@ -3138,7 +3294,7 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
vpe->col_idx = cpumask_first(cpu_online_mask); vpe->col_idx = cpumask_first(cpu_online_mask);
list_for_each_entry(its, &its_nodes, entry) { list_for_each_entry(its, &its_nodes, entry) {
if (!its->is_v4) if (!is_v4(its))
continue; continue;
its_send_vmapp(its, vpe, true); its_send_vmapp(its, vpe, true);
@ -3164,7 +3320,7 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
return; return;
list_for_each_entry(its, &its_nodes, entry) { list_for_each_entry(its, &its_nodes, entry) {
if (!its->is_v4) if (!is_v4(its))
continue; continue;
its_send_vmapp(its, vpe, false); its_send_vmapp(its, vpe, false);
@ -3215,8 +3371,9 @@ static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
{ {
struct its_node *its = data; struct its_node *its = data;
/* erratum 22375: only alloc 8MB table size */ /* erratum 22375: only alloc 8MB table size (20 bits) */
its->device_ids = 0x14; /* 20 bits, 8MB */ its->typer &= ~GITS_TYPER_DEVBITS;
its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
return true; return true;
@ -3236,7 +3393,8 @@ static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
struct its_node *its = data; struct its_node *its = data;
/* On QDF2400, the size of the ITE is 16Bytes */ /* On QDF2400, the size of the ITE is 16Bytes */
its->ite_size = 16; its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
return true; return true;
} }
@ -3270,8 +3428,10 @@ static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
its->get_msi_base = its_irq_get_msi_base_pre_its; its->get_msi_base = its_irq_get_msi_base_pre_its;
ids = ilog2(pre_its_window[1]) - 2; ids = ilog2(pre_its_window[1]) - 2;
if (its->device_ids > ids) if (device_ids(its) > ids) {
its->device_ids = ids; its->typer &= ~GITS_TYPER_DEVBITS;
its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
}
/* the pre-ITS breaks isolation, so disable MSI remapping */ /* the pre-ITS breaks isolation, so disable MSI remapping */
its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
@ -3504,7 +3664,7 @@ static int its_init_vpe_domain(void)
} }
/* Use the last possible DevID */ /* Use the last possible DevID */
devid = GENMASK(its->device_ids - 1, 0); devid = GENMASK(device_ids(its) - 1, 0);
vpe_proxy.dev = its_create_device(its, devid, entries, false); vpe_proxy.dev = its_create_device(its, devid, entries, false);
if (!vpe_proxy.dev) { if (!vpe_proxy.dev) {
kfree(vpe_proxy.vpes); kfree(vpe_proxy.vpes);
@ -3602,12 +3762,10 @@ static int __init its_probe_one(struct resource *res,
INIT_LIST_HEAD(&its->entry); INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list); INIT_LIST_HEAD(&its->its_device_list);
typer = gic_read_typer(its_base + GITS_TYPER); typer = gic_read_typer(its_base + GITS_TYPER);
its->typer = typer;
its->base = its_base; its->base = its_base;
its->phys_base = res->start; its->phys_base = res->start;
its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); if (is_v4(its)) {
its->device_ids = GITS_TYPER_DEVBITS(typer);
its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
if (its->is_v4) {
if (!(typer & GITS_TYPER_VMOVP)) { if (!(typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(res, its_base); err = its_compute_its_list_map(res, its_base);
if (err < 0) if (err < 0)
@ -3674,7 +3832,7 @@ static int __init its_probe_one(struct resource *res,
gits_write_cwriter(0, its->base + GITS_CWRITER); gits_write_cwriter(0, its->base + GITS_CWRITER);
ctlr = readl_relaxed(its->base + GITS_CTLR); ctlr = readl_relaxed(its->base + GITS_CTLR);
ctlr |= GITS_CTLR_ENABLE; ctlr |= GITS_CTLR_ENABLE;
if (its->is_v4) if (is_v4(its))
ctlr |= GITS_CTLR_ImDe; ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR); writel_relaxed(ctlr, its->base + GITS_CTLR);
@ -3999,7 +4157,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return err; return err;
list_for_each_entry(its, &its_nodes, entry) list_for_each_entry(its, &its_nodes, entry)
has_v4 |= its->is_v4; has_v4 |= is_v4(its);
if (has_v4 & rdists->has_vlpis) { if (has_v4 & rdists->has_vlpis) {
if (its_init_vpe_domain() || if (its_init_vpe_domain() ||

View File

@ -174,7 +174,7 @@ static void gic_do_wait_for_rwp(void __iomem *base)
} }
cpu_relax(); cpu_relax();
udelay(1); udelay(1);
}; }
} }
/* Wait for completion of a distributor change */ /* Wait for completion of a distributor change */
@ -231,7 +231,7 @@ static void gic_enable_redist(bool enable)
break; break;
cpu_relax(); cpu_relax();
udelay(1); udelay(1);
}; }
if (!count) if (!count)
pr_err_ratelimited("redistributor failed to %s...\n", pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep"); enable ? "wakeup" : "sleep");

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
/* /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 platform IRQ support * Ingenic XBurst platform IRQ support
*/ */
#include <linux/errno.h> #include <linux/errno.h>
@ -10,7 +10,6 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/irqchip/ingenic.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/timex.h> #include <linux/timex.h>
@ -22,6 +21,7 @@
struct ingenic_intc_data { struct ingenic_intc_data {
void __iomem *base; void __iomem *base;
struct irq_domain *domain;
unsigned num_chips; unsigned num_chips;
}; };
@ -35,41 +35,30 @@ struct ingenic_intc_data {
static irqreturn_t intc_cascade(int irq, void *data) static irqreturn_t intc_cascade(int irq, void *data)
{ {
struct ingenic_intc_data *intc = irq_get_handler_data(irq); struct ingenic_intc_data *intc = irq_get_handler_data(irq);
uint32_t irq_reg; struct irq_domain *domain = intc->domain;
struct irq_chip_generic *gc;
uint32_t pending;
unsigned i; unsigned i;
for (i = 0; i < intc->num_chips; i++) { for (i = 0; i < intc->num_chips; i++) {
irq_reg = readl(intc->base + (i * CHIP_SIZE) + gc = irq_get_domain_generic_chip(domain, i * 32);
JZ_REG_INTC_PENDING);
if (!irq_reg) pending = irq_reg_readl(gc, JZ_REG_INTC_PENDING);
if (!pending)
continue; continue;
generic_handle_irq(__fls(irq_reg) + (i * 32) + JZ4740_IRQ_BASE); while (pending) {
int bit = __fls(pending);
irq = irq_find_mapping(domain, bit + (i * 32));
generic_handle_irq(irq);
pending &= ~BIT(bit);
}
} }
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void intc_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask)
{
struct irq_chip_regs *regs = &gc->chip_types->regs;
writel(mask, gc->reg_base + regs->enable);
writel(~mask, gc->reg_base + regs->disable);
}
void ingenic_intc_irq_suspend(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
intc_irq_set_mask(gc, gc->wake_active);
}
void ingenic_intc_irq_resume(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
intc_irq_set_mask(gc, gc->mask_cache);
}
static struct irqaction intc_cascade_action = { static struct irqaction intc_cascade_action = {
.handler = intc_cascade, .handler = intc_cascade,
.name = "SoC intc cascade interrupt", .name = "SoC intc cascade interrupt",
@ -108,17 +97,27 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq; goto out_unmap_irq;
} }
for (i = 0; i < num_chips; i++) { domain = irq_domain_add_legacy(node, num_chips * 32,
/* Mask all irqs */ JZ4740_IRQ_BASE, 0,
writel(0xffffffff, intc->base + (i * CHIP_SIZE) + &irq_generic_chip_ops, NULL);
JZ_REG_INTC_SET_MASK); if (!domain) {
err = -ENOMEM;
goto out_unmap_base;
}
gc = irq_alloc_generic_chip("INTC", 1, intc->domain = domain;
JZ4740_IRQ_BASE + (i * 32),
intc->base + (i * CHIP_SIZE), err = irq_alloc_domain_generic_chips(domain, 32, 1, "INTC",
handle_level_irq); handle_level_irq, 0,
IRQ_NOPROBE | IRQ_LEVEL, 0);
if (err)
goto out_domain_remove;
for (i = 0; i < num_chips; i++) {
gc = irq_get_domain_generic_chip(domain, i * 32);
gc->wake_enabled = IRQ_MSK(32); gc->wake_enabled = IRQ_MSK(32);
gc->reg_base = intc->base + (i * CHIP_SIZE);
ct = gc->chip_types; ct = gc->chip_types;
ct->regs.enable = JZ_REG_INTC_CLEAR_MASK; ct->regs.enable = JZ_REG_INTC_CLEAR_MASK;
@ -127,21 +126,19 @@ static int __init ingenic_intc_of_init(struct device_node *node,
ct->chip.irq_mask = irq_gc_mask_disable_reg; ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
ct->chip.irq_set_wake = irq_gc_set_wake; ct->chip.irq_set_wake = irq_gc_set_wake;
ct->chip.irq_suspend = ingenic_intc_irq_suspend; ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
ct->chip.irq_resume = ingenic_intc_irq_resume;
irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0, /* Mask all irqs */
IRQ_NOPROBE | IRQ_LEVEL); irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
} }
domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
&irq_domain_simple_ops, NULL);
if (!domain)
pr_warn("unable to register IRQ domain\n");
setup_irq(parent_irq, &intc_cascade_action); setup_irq(parent_irq, &intc_cascade_action);
return 0; return 0;
out_domain_remove:
irq_domain_remove(domain);
out_unmap_base:
iounmap(intc->base);
out_unmap_irq: out_unmap_irq:
irq_dispose_mapping(parent_irq); irq_dispose_mapping(parent_irq);
out_free: out_free:

View File

@ -0,0 +1,197 @@
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "irq-ls-extirq: " fmt
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#define MAXIRQ 12
#define LS1021A_SCFGREVCR 0x200
struct ls_extirq_data {
struct regmap *syscon;
u32 intpcr;
bool bit_reverse;
u32 nirq;
struct irq_fwspec map[MAXIRQ];
};
static int
ls_extirq_set_type(struct irq_data *data, unsigned int type)
{
struct ls_extirq_data *priv = data->chip_data;
irq_hw_number_t hwirq = data->hwirq;
u32 value, mask;
if (priv->bit_reverse)
mask = 1U << (31 - hwirq);
else
mask = 1U << hwirq;
switch (type) {
case IRQ_TYPE_LEVEL_LOW:
type = IRQ_TYPE_LEVEL_HIGH;
value = mask;
break;
case IRQ_TYPE_EDGE_FALLING:
type = IRQ_TYPE_EDGE_RISING;
value = mask;
break;
case IRQ_TYPE_LEVEL_HIGH:
case IRQ_TYPE_EDGE_RISING:
value = 0;
break;
default:
return -EINVAL;
}
regmap_update_bits(priv->syscon, priv->intpcr, mask, value);
return irq_chip_set_type_parent(data, type);
}
static struct irq_chip ls_extirq_chip = {
.name = "ls-extirq",
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_type = ls_extirq_set_type,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_affinity = irq_chip_set_affinity_parent,
.flags = IRQCHIP_SET_TYPE_MASKED,
};
static int
ls_extirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct ls_extirq_data *priv = domain->host_data;
struct irq_fwspec *fwspec = arg;
irq_hw_number_t hwirq;
if (fwspec->param_count != 2)
return -EINVAL;
hwirq = fwspec->param[0];
if (hwirq >= priv->nirq)
return -EINVAL;
irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &ls_extirq_chip,
priv);
return irq_domain_alloc_irqs_parent(domain, virq, 1, &priv->map[hwirq]);
}
static const struct irq_domain_ops extirq_domain_ops = {
.xlate = irq_domain_xlate_twocell,
.alloc = ls_extirq_domain_alloc,
.free = irq_domain_free_irqs_common,
};
static int
ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
{
const __be32 *map;
u32 mapsize;
int ret;
map = of_get_property(node, "interrupt-map", &mapsize);
if (!map)
return -ENOENT;
if (mapsize % sizeof(*map))
return -EINVAL;
mapsize /= sizeof(*map);
while (mapsize) {
struct device_node *ipar;
u32 hwirq, intsize, j;
if (mapsize < 3)
return -EINVAL;
hwirq = be32_to_cpup(map);
if (hwirq >= MAXIRQ)
return -EINVAL;
priv->nirq = max(priv->nirq, hwirq + 1);
ipar = of_find_node_by_phandle(be32_to_cpup(map + 2));
map += 3;
mapsize -= 3;
if (!ipar)
return -EINVAL;
priv->map[hwirq].fwnode = &ipar->fwnode;
ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize);
if (ret)
return ret;
if (intsize > mapsize)
return -EINVAL;
priv->map[hwirq].param_count = intsize;
for (j = 0; j < intsize; ++j)
priv->map[hwirq].param[j] = be32_to_cpup(map++);
mapsize -= intsize;
}
return 0;
}
static int __init
ls_extirq_of_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *domain, *parent_domain;
struct ls_extirq_data *priv;
int ret;
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("Cannot find parent domain\n");
return -ENODEV;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->syscon = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->syscon)) {
ret = PTR_ERR(priv->syscon);
pr_err("Failed to lookup parent regmap\n");
goto out;
}
ret = of_property_read_u32(node, "reg", &priv->intpcr);
if (ret) {
pr_err("Missing INTPCR offset value\n");
goto out;
}
ret = ls_extirq_parse_map(priv, node);
if (ret)
goto out;
if (of_device_is_compatible(node, "fsl,ls1021a-extirq")) {
u32 revcr;
ret = regmap_read(priv->syscon, LS1021A_SCFGREVCR, &revcr);
if (ret)
goto out;
priv->bit_reverse = (revcr != 0);
}
domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node,
&extirq_domain_ops, priv);
if (!domain)
ret = -ENOMEM;
out:
if (ret)
kfree(priv);
return ret;
}
IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init);

View File

@ -246,8 +246,8 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *d
/* No free bits available. Allocate a new vint */ /* No free bits available. Allocate a new vint */
vint_desc = ti_sci_inta_alloc_parent_irq(domain); vint_desc = ti_sci_inta_alloc_parent_irq(domain);
if (IS_ERR(vint_desc)) { if (IS_ERR(vint_desc)) {
mutex_unlock(&inta->vint_mutex); event_desc = ERR_CAST(vint_desc);
return ERR_PTR(PTR_ERR(vint_desc)); goto unlock;
} }
free_bit = find_first_zero_bit(vint_desc->event_map, free_bit = find_first_zero_bit(vint_desc->event_map,
@ -259,6 +259,7 @@ alloc_event:
if (IS_ERR(event_desc)) if (IS_ERR(event_desc))
clear_bit(free_bit, vint_desc->event_map); clear_bit(free_bit, vint_desc->event_map);
unlock:
mutex_unlock(&inta->vint_mutex); mutex_unlock(&inta->vint_mutex);
return event_desc; return event_desc;
} }

View File

@ -51,7 +51,7 @@ static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
while (readl(zevio_irq_io + IO_STATUS)) { while (readl(zevio_irq_io + IO_STATUS)) {
irqnr = readl(zevio_irq_io + IO_CURRENT); irqnr = readl(zevio_irq_io + IO_CURRENT);
handle_domain_irq(zevio_irq_domain, irqnr, regs); handle_domain_irq(zevio_irq_domain, irqnr, regs);
}; }
} }
static void __init zevio_init_irq_base(void __iomem *base) static void __init zevio_init_irq_base(void __iomem *base)

View File

@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/ */
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
@ -13,12 +14,13 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
#define PDC_MAX_IRQS 126 #define PDC_MAX_IRQS 168
#define PDC_MAX_GPIO_IRQS 256
#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr)) #define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
#define ENABLE_INTR(reg, intr) (reg | (1 << intr)) #define ENABLE_INTR(reg, intr) (reg | (1 << intr))
@ -26,6 +28,8 @@
#define IRQ_ENABLE_BANK 0x10 #define IRQ_ENABLE_BANK 0x10
#define IRQ_i_CFG 0x110 #define IRQ_i_CFG 0x110
#define PDC_NO_PARENT_IRQ ~0UL
struct pdc_pin_region { struct pdc_pin_region {
u32 pin_base; u32 pin_base;
u32 parent_base; u32 parent_base;
@ -47,6 +51,26 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32)); return readl_relaxed(pdc_base + reg + i * sizeof(u32));
} }
static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which,
bool *state)
{
if (d->hwirq == GPIO_NO_WAKE_IRQ)
return 0;
return irq_chip_get_parent_state(d, which, state);
}
static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which,
bool value)
{
if (d->hwirq == GPIO_NO_WAKE_IRQ)
return 0;
return irq_chip_set_parent_state(d, which, value);
}
static void pdc_enable_intr(struct irq_data *d, bool on) static void pdc_enable_intr(struct irq_data *d, bool on)
{ {
int pin_out = d->hwirq; int pin_out = d->hwirq;
@ -63,15 +87,37 @@ static void pdc_enable_intr(struct irq_data *d, bool on)
raw_spin_unlock(&pdc_lock); raw_spin_unlock(&pdc_lock);
} }
static void qcom_pdc_gic_disable(struct irq_data *d)
{
if (d->hwirq == GPIO_NO_WAKE_IRQ)
return;
pdc_enable_intr(d, false);
irq_chip_disable_parent(d);
}
static void qcom_pdc_gic_enable(struct irq_data *d)
{
if (d->hwirq == GPIO_NO_WAKE_IRQ)
return;
pdc_enable_intr(d, true);
irq_chip_enable_parent(d);
}
static void qcom_pdc_gic_mask(struct irq_data *d) static void qcom_pdc_gic_mask(struct irq_data *d)
{ {
pdc_enable_intr(d, false); if (d->hwirq == GPIO_NO_WAKE_IRQ)
return;
irq_chip_mask_parent(d); irq_chip_mask_parent(d);
} }
static void qcom_pdc_gic_unmask(struct irq_data *d) static void qcom_pdc_gic_unmask(struct irq_data *d)
{ {
pdc_enable_intr(d, true); if (d->hwirq == GPIO_NO_WAKE_IRQ)
return;
irq_chip_unmask_parent(d); irq_chip_unmask_parent(d);
} }
@ -114,6 +160,9 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
int pin_out = d->hwirq; int pin_out = d->hwirq;
enum pdc_irq_config_bits pdc_type; enum pdc_irq_config_bits pdc_type;
if (pin_out == GPIO_NO_WAKE_IRQ)
return 0;
switch (type) { switch (type) {
case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_EDGE_RISING:
pdc_type = PDC_EDGE_RISING; pdc_type = PDC_EDGE_RISING;
@ -148,6 +197,10 @@ static struct irq_chip qcom_pdc_gic_chip = {
.irq_eoi = irq_chip_eoi_parent, .irq_eoi = irq_chip_eoi_parent,
.irq_mask = qcom_pdc_gic_mask, .irq_mask = qcom_pdc_gic_mask,
.irq_unmask = qcom_pdc_gic_unmask, .irq_unmask = qcom_pdc_gic_unmask,
.irq_disable = qcom_pdc_gic_disable,
.irq_enable = qcom_pdc_gic_enable,
.irq_get_irqchip_state = qcom_pdc_gic_get_irqchip_state,
.irq_set_irqchip_state = qcom_pdc_gic_set_irqchip_state,
.irq_retrigger = irq_chip_retrigger_hierarchy, .irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = qcom_pdc_gic_set_type, .irq_set_type = qcom_pdc_gic_set_type,
.flags = IRQCHIP_MASK_ON_SUSPEND | .flags = IRQCHIP_MASK_ON_SUSPEND |
@ -169,8 +222,7 @@ static irq_hw_number_t get_parent_hwirq(int pin)
return (region->parent_base + pin - region->pin_base); return (region->parent_base + pin - region->pin_base);
} }
WARN_ON(1); return PDC_NO_PARENT_IRQ;
return ~0UL;
} }
static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec, static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
@ -199,17 +251,17 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type); ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
if (ret) if (ret)
return -EINVAL; return ret;
parent_hwirq = get_parent_hwirq(hwirq);
if (parent_hwirq == ~0UL)
return -EINVAL;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&qcom_pdc_gic_chip, NULL); &qcom_pdc_gic_chip, NULL);
if (ret) if (ret)
return ret; return ret;
parent_hwirq = get_parent_hwirq(hwirq);
if (parent_hwirq == PDC_NO_PARENT_IRQ)
return 0;
if (type & IRQ_TYPE_EDGE_BOTH) if (type & IRQ_TYPE_EDGE_BOTH)
type = IRQ_TYPE_EDGE_RISING; type = IRQ_TYPE_EDGE_RISING;
@ -232,6 +284,60 @@ static const struct irq_domain_ops qcom_pdc_ops = {
.free = irq_domain_free_irqs_common, .free = irq_domain_free_irqs_common,
}; };
static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *data)
{
struct irq_fwspec *fwspec = data;
struct irq_fwspec parent_fwspec;
irq_hw_number_t hwirq, parent_hwirq;
unsigned int type;
int ret;
ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
if (ret)
return ret;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&qcom_pdc_gic_chip, NULL);
if (ret)
return ret;
if (hwirq == GPIO_NO_WAKE_IRQ)
return 0;
parent_hwirq = get_parent_hwirq(hwirq);
if (parent_hwirq == PDC_NO_PARENT_IRQ)
return 0;
if (type & IRQ_TYPE_EDGE_BOTH)
type = IRQ_TYPE_EDGE_RISING;
if (type & IRQ_TYPE_LEVEL_MASK)
type = IRQ_TYPE_LEVEL_HIGH;
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 3;
parent_fwspec.param[0] = 0;
parent_fwspec.param[1] = parent_hwirq;
parent_fwspec.param[2] = type;
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
&parent_fwspec);
}
static int qcom_pdc_gpio_domain_select(struct irq_domain *d,
struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token)
{
return bus_token == DOMAIN_BUS_WAKEUP;
}
static const struct irq_domain_ops qcom_pdc_gpio_ops = {
.select = qcom_pdc_gpio_domain_select,
.alloc = qcom_pdc_gpio_alloc,
.free = irq_domain_free_irqs_common,
};
static int pdc_setup_pin_mapping(struct device_node *np) static int pdc_setup_pin_mapping(struct device_node *np)
{ {
int ret, n; int ret, n;
@ -270,7 +376,7 @@ static int pdc_setup_pin_mapping(struct device_node *np)
static int qcom_pdc_init(struct device_node *node, struct device_node *parent) static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{ {
struct irq_domain *parent_domain, *pdc_domain; struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
int ret; int ret;
pdc_base = of_iomap(node, 0); pdc_base = of_iomap(node, 0);
@ -301,12 +407,27 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
goto fail; goto fail;
} }
pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain,
IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
PDC_MAX_GPIO_IRQS,
of_fwnode_handle(node),
&qcom_pdc_gpio_ops, NULL);
if (!pdc_gpio_domain) {
pr_err("%pOF: PDC domain add failed for GPIO domain\n", node);
ret = -ENOMEM;
goto remove;
}
irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP);
return 0; return 0;
remove:
irq_domain_remove(pdc_domain);
fail: fail:
kfree(pdc_region); kfree(pdc_region);
iounmap(pdc_base); iounmap(pdc_base);
return ret; return ret;
} }
IRQCHIP_DECLARE(pdc_sdm845, "qcom,sdm845-pdc", qcom_pdc_init); IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init);

View File

@ -23,6 +23,8 @@
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/soc/qcom/irq.h>
#include "../core.h" #include "../core.h"
#include "../pinconf.h" #include "../pinconf.h"
#include "pinctrl-msm.h" #include "pinctrl-msm.h"
@ -44,6 +46,7 @@
* @enabled_irqs: Bitmap of currently enabled irqs. * @enabled_irqs: Bitmap of currently enabled irqs.
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection. * detection.
* @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
* @soc; Reference to soc_data of platform specific data. * @soc; Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles. * @regs: Base addresses for the TLMM tiles.
*/ */
@ -61,6 +64,7 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO); DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO); DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc; const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES]; void __iomem *regs[MAX_NR_TILES];
@ -707,6 +711,12 @@ static void msm_gpio_irq_mask(struct irq_data *d)
unsigned long flags; unsigned long flags;
u32 val; u32 val;
if (d->parent_data)
irq_chip_mask_parent(d);
if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
return;
g = &pctrl->soc->groups[d->hwirq]; g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags); raw_spin_lock_irqsave(&pctrl->lock, flags);
@ -751,6 +761,12 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
unsigned long flags; unsigned long flags;
u32 val; u32 val;
if (d->parent_data)
irq_chip_unmask_parent(d);
if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
return;
g = &pctrl->soc->groups[d->hwirq]; g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags); raw_spin_lock_irqsave(&pctrl->lock, flags);
@ -778,10 +794,35 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
static void msm_gpio_irq_enable(struct irq_data *d) static void msm_gpio_irq_enable(struct irq_data *d)
{ {
/*
* Clear the interrupt that may be pending before we enable
* the line.
* This is especially a problem with the GPIOs routed to the
* PDC. These GPIOs are direct-connect interrupts to the GIC.
* Disabling the interrupt line at the PDC does not prevent
* the interrupt from being latched at the GIC. The state at
* GIC needs to be cleared before enabling.
*/
if (d->parent_data) {
irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
irq_chip_enable_parent(d);
}
msm_gpio_irq_clear_unmask(d, true); msm_gpio_irq_clear_unmask(d, true);
} }
static void msm_gpio_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
if (d->parent_data)
irq_chip_disable_parent(d);
if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
msm_gpio_irq_mask(d);
}
static void msm_gpio_irq_unmask(struct irq_data *d) static void msm_gpio_irq_unmask(struct irq_data *d)
{ {
msm_gpio_irq_clear_unmask(d, false); msm_gpio_irq_clear_unmask(d, false);
@ -795,6 +836,9 @@ static void msm_gpio_irq_ack(struct irq_data *d)
unsigned long flags; unsigned long flags;
u32 val; u32 val;
if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
return;
g = &pctrl->soc->groups[d->hwirq]; g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags); raw_spin_lock_irqsave(&pctrl->lock, flags);
@ -820,6 +864,12 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned long flags; unsigned long flags;
u32 val; u32 val;
if (d->parent_data)
irq_chip_set_type_parent(d, type);
if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
return 0;
g = &pctrl->soc->groups[d->hwirq]; g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags); raw_spin_lock_irqsave(&pctrl->lock, flags);
@ -912,6 +962,15 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc); struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags; unsigned long flags;
/*
* While they may not wake up when the TLMM is powered off,
* some GPIOs would like to wakeup the system from suspend
* when TLMM is powered on. To allow that, enable the GPIO
* summary line to be wakeup capable at GIC.
*/
if (d->parent_data)
irq_chip_set_wake_parent(d, on);
raw_spin_lock_irqsave(&pctrl->lock, flags); raw_spin_lock_irqsave(&pctrl->lock, flags);
irq_set_irq_wake(pctrl->irq, on); irq_set_irq_wake(pctrl->irq, on);
@ -990,6 +1049,30 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc); chained_irq_exit(chip, desc);
} }
static int msm_gpio_wakeirq(struct gpio_chip *gc,
unsigned int child,
unsigned int child_type,
unsigned int *parent,
unsigned int *parent_type)
{
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_gpio_wakeirq_map *map;
int i;
*parent = GPIO_NO_WAKE_IRQ;
*parent_type = IRQ_TYPE_EDGE_RISING;
for (i = 0; i < pctrl->soc->nwakeirq_map; i++) {
map = &pctrl->soc->wakeirq_map[i];
if (map->gpio == child) {
*parent = map->wakeirq;
break;
}
}
return 0;
}
static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl) static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl)
{ {
if (pctrl->soc->reserved_gpios) if (pctrl->soc->reserved_gpios)
@ -1002,8 +1085,10 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
{ {
struct gpio_chip *chip; struct gpio_chip *chip;
struct gpio_irq_chip *girq; struct gpio_irq_chip *girq;
int ret; int i, ret;
unsigned ngpio = pctrl->soc->ngpios; unsigned gpio, ngpio = pctrl->soc->ngpios;
struct device_node *np;
bool skip;
if (WARN_ON(ngpio > MAX_NR_GPIO)) if (WARN_ON(ngpio > MAX_NR_GPIO))
return -EINVAL; return -EINVAL;
@ -1020,17 +1105,40 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
pctrl->irq_chip.name = "msmgpio"; pctrl->irq_chip.name = "msmgpio";
pctrl->irq_chip.irq_enable = msm_gpio_irq_enable; pctrl->irq_chip.irq_enable = msm_gpio_irq_enable;
pctrl->irq_chip.irq_disable = msm_gpio_irq_disable;
pctrl->irq_chip.irq_mask = msm_gpio_irq_mask; pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask; pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
pctrl->irq_chip.irq_ack = msm_gpio_irq_ack; pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type; pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake; pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres; pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres; pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
if (np) {
chip->irq.parent_domain = irq_find_matching_host(np,
DOMAIN_BUS_WAKEUP);
of_node_put(np);
if (!chip->irq.parent_domain)
return -EPROBE_DEFER;
chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
/*
* Let's skip handling the GPIOs, if the parent irqchip
* is handling the direct connect IRQ of the GPIO.
*/
skip = irq_domain_qcom_handle_wakeup(chip->irq.parent_domain);
for (i = 0; skip && i < pctrl->soc->nwakeirq_map; i++) {
gpio = pctrl->soc->wakeirq_map[i].gpio;
set_bit(gpio, pctrl->skip_wake_irqs);
}
}
girq = &chip->irq; girq = &chip->irq;
girq->chip = &pctrl->irq_chip; girq->chip = &pctrl->irq_chip;
girq->parent_handler = msm_gpio_irq_handler; girq->parent_handler = msm_gpio_irq_handler;
girq->fwnode = pctrl->dev->fwnode;
girq->num_parents = 1; girq->num_parents = 1;
girq->parents = devm_kcalloc(pctrl->dev, 1, sizeof(*girq->parents), girq->parents = devm_kcalloc(pctrl->dev, 1, sizeof(*girq->parents),
GFP_KERNEL); GFP_KERNEL);

View File

@ -91,6 +91,16 @@ struct msm_pingroup {
unsigned intr_detection_width:5; unsigned intr_detection_width:5;
}; };
/**
* struct msm_gpio_wakeirq_map - Map of GPIOs and their wakeup pins
* @gpio: The GPIOs that are wakeup capable
* @wakeirq: The interrupt at the always-on interrupt controller
*/
struct msm_gpio_wakeirq_map {
unsigned int gpio;
unsigned int wakeirq;
};
/** /**
* struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration * struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration
* @pins: An array describing all pins the pin controller affects. * @pins: An array describing all pins the pin controller affects.
@ -101,6 +111,8 @@ struct msm_pingroup {
* @ngroups: The numbmer of entries in @groups. * @ngroups: The numbmer of entries in @groups.
* @ngpio: The number of pingroups the driver should expose as GPIOs. * @ngpio: The number of pingroups the driver should expose as GPIOs.
* @pull_no_keeper: The SoC does not support keeper bias. * @pull_no_keeper: The SoC does not support keeper bias.
* @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM
* @nwakeirq_map: The number of entries in @wakeirq_map
*/ */
struct msm_pinctrl_soc_data { struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins; const struct pinctrl_pin_desc *pins;
@ -114,6 +126,8 @@ struct msm_pinctrl_soc_data {
const char *const *tiles; const char *const *tiles;
unsigned int ntiles; unsigned int ntiles;
const int *reserved_gpios; const int *reserved_gpios;
const struct msm_gpio_wakeirq_map *wakeirq_map;
unsigned int nwakeirq_map;
}; };
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops; extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/ */
#include <linux/acpi.h> #include <linux/acpi.h>
@ -1282,6 +1282,24 @@ static const int sdm845_acpi_reserved_gpios[] = {
0, 1, 2, 3, 81, 82, 83, 84, -1 0, 1, 2, 3, 81, 82, 83, 84, -1
}; };
static const struct msm_gpio_wakeirq_map sdm845_pdc_map[] = {
{ 1, 30 }, { 3, 31 }, { 5, 32 }, { 10, 33 }, { 11, 34 },
{ 20, 35 }, { 22, 36 }, { 24, 37 }, { 26, 38 }, { 30, 39 },
{ 31, 117 }, { 32, 41 }, { 34, 42 }, { 36, 43 }, { 37, 44 },
{ 38, 45 }, { 39, 46 }, { 40, 47 }, { 41, 115 }, { 43, 49 },
{ 44, 50 }, { 46, 51 }, { 48, 52 }, { 49, 118 }, { 52, 54 },
{ 53, 55 }, { 54, 56 }, { 56, 57 }, { 57, 58 }, { 58, 59 },
{ 59, 60 }, { 60, 61 }, { 61, 62 }, { 62, 63 }, { 63, 64 },
{ 64, 65 }, { 66, 66 }, { 68, 67 }, { 71, 68 }, { 73, 69 },
{ 77, 70 }, { 78, 71 }, { 79, 72 }, { 80, 73 }, { 84, 74 },
{ 85, 75 }, { 86, 76 }, { 88, 77 }, { 89, 116 }, { 91, 79 },
{ 92, 80 }, { 95, 81 }, { 96, 82 }, { 97, 83 }, { 101, 84 },
{ 103, 85 }, { 104, 86 }, { 115, 90 }, { 116, 91 }, { 117, 92 },
{ 118, 93 }, { 119, 94 }, { 120, 95 }, { 121, 96 }, { 122, 97 },
{ 123, 98 }, { 124, 99 }, { 125, 100 }, { 127, 102 }, { 128, 103 },
{ 129, 104 }, { 130, 105 }, { 132, 106 }, { 133, 107 }, { 145, 108 },
};
static const struct msm_pinctrl_soc_data sdm845_pinctrl = { static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.pins = sdm845_pins, .pins = sdm845_pins,
.npins = ARRAY_SIZE(sdm845_pins), .npins = ARRAY_SIZE(sdm845_pins),
@ -1290,6 +1308,9 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.groups = sdm845_groups, .groups = sdm845_groups,
.ngroups = ARRAY_SIZE(sdm845_groups), .ngroups = ARRAY_SIZE(sdm845_groups),
.ngpios = 151, .ngpios = 151,
.wakeirq_map = sdm845_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
}; };
static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = { static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {

View File

@ -610,6 +610,12 @@ extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc); extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc); extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
extern int irq_chip_set_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool val);
extern int irq_chip_get_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state);
extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data); extern void irq_chip_ack_parent(struct irq_data *data);

View File

@ -334,10 +334,10 @@
#define GITS_TYPER_PLPIS (1UL << 0) #define GITS_TYPER_PLPIS (1UL << 0)
#define GITS_TYPER_VLPIS (1UL << 1) #define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1) #define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4)
#define GITS_TYPER_IDBITS_SHIFT 8 #define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13 #define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13)
#define GITS_TYPER_PTA (1UL << 19) #define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HCC_SHIFT 24 #define GITS_TYPER_HCC_SHIFT 24
#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) #define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff)

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*/
#ifndef __LINUX_IRQCHIP_INGENIC_H__
#define __LINUX_IRQCHIP_INGENIC_H__
#include <linux/irq.h>
extern void ingenic_intc_irq_suspend(struct irq_data *data);
extern void ingenic_intc_irq_resume(struct irq_data *data);
#endif

View File

@ -83,6 +83,7 @@ enum irq_domain_bus_token {
DOMAIN_BUS_IPI, DOMAIN_BUS_IPI,
DOMAIN_BUS_FSL_MC_MSI, DOMAIN_BUS_FSL_MC_MSI,
DOMAIN_BUS_TI_SCI_INTA_MSI, DOMAIN_BUS_TI_SCI_INTA_MSI,
DOMAIN_BUS_WAKEUP,
}; };
/** /**

View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __QCOM_IRQ_H
#define __QCOM_IRQ_H
#include <linux/irqdomain.h>
#define GPIO_NO_WAKE_IRQ ~0U
/**
* QCOM specific IRQ domain flags that distinguishes the handling of wakeup
* capable interrupts by different interrupt controllers.
*
* IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP: Line must be masked at TLMM and the
* interrupt configuration is done at PDC
* IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP: Interrupt configuration is handled at TLMM
*/
#define IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP (IRQ_DOMAIN_FLAG_NONCORE << 0)
#define IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP (IRQ_DOMAIN_FLAG_NONCORE << 1)
/**
* irq_domain_qcom_handle_wakeup: Return if the domain handles interrupt
* configuration
* @d: irq domain
*
* This QCOM specific irq domain call returns if the interrupt controller
* requires the interrupt be masked at the child interrupt controller.
*/
static inline bool irq_domain_qcom_handle_wakeup(const struct irq_domain *d)
{
return (d->flags & IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP);
}
#endif

View File

@ -1297,6 +1297,50 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
/**
* irq_chip_set_parent_state - set the state of a parent interrupt.
*
* @data: Pointer to interrupt specific data
* @which: State to be restored (one of IRQCHIP_STATE_*)
* @val: Value corresponding to @which
*
* Conditional success, if the underlying irqchip does not implement it.
*/
int irq_chip_set_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool val)
{
data = data->parent_data;
if (!data || !data->chip->irq_set_irqchip_state)
return 0;
return data->chip->irq_set_irqchip_state(data, which, val);
}
EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
/**
* irq_chip_get_parent_state - get the state of a parent interrupt.
*
* @data: Pointer to interrupt specific data
* @which: one of IRQCHIP_STATE_* the caller wants to know
* @state: a pointer to a boolean where the state is to be stored
*
* Conditional success, if the underlying irqchip does not implement it.
*/
int irq_chip_get_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state)
{
data = data->parent_data;
if (!data || !data->chip->irq_get_irqchip_state)
return 0;
return data->chip->irq_get_irqchip_state(data, which, state);
}
EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
/** /**
* irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
* NULL) * NULL)