Merge branch irq/loongarch into irq/irqchip-next
* irq/loongarch: : . : Merge the long awaited IRQ support for the LoongArch architecture. : : From the cover letter: : : "Currently, LoongArch based processors (e.g. Loongson-3A5000) : can only work together with LS7A chipsets. The irq chips in : LoongArch computers include CPUINTC (CPU Core Interrupt : Controller), LIOINTC (Legacy I/O Interrupt Controller), : EIOINTC (Extended I/O Interrupt Controller), PCH-PIC (Main : Interrupt Controller in LS7A chipset), PCH-LPC (LPC Interrupt : Controller in LS7A chipset) and PCH-MSI (MSI Interrupt Controller)." : : Note that this comes with non-official, arch private ACPICA : definitions until the official ACPICA update is realeased. : . irqchip / ACPI: Introduce ACPI_IRQ_MODEL_LPIC for LoongArch irqchip: Add LoongArch CPU interrupt controller support irqchip: Add Loongson Extended I/O interrupt controller support irqchip/loongson-liointc: Add ACPI init support irqchip/loongson-pch-msi: Add ACPI init support irqchip/loongson-pch-pic: Add ACPI init support irqchip: Add Loongson PCH LPC controller support LoongArch: Prepare to support multiple pch-pic and pch-msi irqdomain LoongArch: Use ACPI_GENERIC_GSI for gsi handling genirq/generic_chip: Export irq_unmap_generic_chip ACPI: irq: Allow acpi_gsi_to_irq() to have an arch-specific fallback APCI: irq: Add support for multiple GSI domains LoongArch: Provisionally add ACPICA data structures Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
0fa72ed05e
|
@ -2,6 +2,7 @@
|
|||
config LOONGARCH
|
||||
bool
|
||||
default y
|
||||
select ACPI_GENERIC_GSI if ACPI
|
||||
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
|
||||
select ARCH_BINFMT_ELF_STATE
|
||||
select ARCH_ENABLE_MEMORY_HOTPLUG
|
||||
|
|
|
@ -31,6 +31,148 @@ static inline bool acpi_has_cpu_in_madt(void)
|
|||
|
||||
extern struct list_head acpi_wakeup_device_list;
|
||||
|
||||
/*
|
||||
* Temporary definitions until the core ACPICA code gets updated (see
|
||||
* 1656837932-18257-1-git-send-email-lvjianmin@loongson.cn and its
|
||||
* follow-ups for the "rationale").
|
||||
*
|
||||
* Once the "legal reasons" are cleared and that the code is merged,
|
||||
* this can be dropped entierely.
|
||||
*/
|
||||
#if (ACPI_CA_VERSION == 0x20220331 && !defined(LOONGARCH_ACPICA_EXT))
|
||||
|
||||
#define LOONGARCH_ACPICA_EXT 1
|
||||
|
||||
#define ACPI_MADT_TYPE_CORE_PIC 17
|
||||
#define ACPI_MADT_TYPE_LIO_PIC 18
|
||||
#define ACPI_MADT_TYPE_HT_PIC 19
|
||||
#define ACPI_MADT_TYPE_EIO_PIC 20
|
||||
#define ACPI_MADT_TYPE_MSI_PIC 21
|
||||
#define ACPI_MADT_TYPE_BIO_PIC 22
|
||||
#define ACPI_MADT_TYPE_LPC_PIC 23
|
||||
|
||||
/* Values for Version field above */
|
||||
|
||||
enum acpi_madt_core_pic_version {
|
||||
ACPI_MADT_CORE_PIC_VERSION_NONE = 0,
|
||||
ACPI_MADT_CORE_PIC_VERSION_V1 = 1,
|
||||
ACPI_MADT_CORE_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
|
||||
};
|
||||
|
||||
enum acpi_madt_lio_pic_version {
|
||||
ACPI_MADT_LIO_PIC_VERSION_NONE = 0,
|
||||
ACPI_MADT_LIO_PIC_VERSION_V1 = 1,
|
||||
ACPI_MADT_LIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
|
||||
};
|
||||
|
||||
enum acpi_madt_eio_pic_version {
|
||||
ACPI_MADT_EIO_PIC_VERSION_NONE = 0,
|
||||
ACPI_MADT_EIO_PIC_VERSION_V1 = 1,
|
||||
ACPI_MADT_EIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
|
||||
};
|
||||
|
||||
enum acpi_madt_ht_pic_version {
|
||||
ACPI_MADT_HT_PIC_VERSION_NONE = 0,
|
||||
ACPI_MADT_HT_PIC_VERSION_V1 = 1,
|
||||
ACPI_MADT_HT_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
|
||||
};
|
||||
|
||||
enum acpi_madt_bio_pic_version {
|
||||
ACPI_MADT_BIO_PIC_VERSION_NONE = 0,
|
||||
ACPI_MADT_BIO_PIC_VERSION_V1 = 1,
|
||||
ACPI_MADT_BIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
|
||||
};
|
||||
|
||||
enum acpi_madt_msi_pic_version {
|
||||
ACPI_MADT_MSI_PIC_VERSION_NONE = 0,
|
||||
ACPI_MADT_MSI_PIC_VERSION_V1 = 1,
|
||||
ACPI_MADT_MSI_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
|
||||
};
|
||||
|
||||
enum acpi_madt_lpc_pic_version {
|
||||
ACPI_MADT_LPC_PIC_VERSION_NONE = 0,
|
||||
ACPI_MADT_LPC_PIC_VERSION_V1 = 1,
|
||||
ACPI_MADT_LPC_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
|
||||
};
|
||||
|
||||
#pragma pack(1)
|
||||
|
||||
/* Core Interrupt Controller */
|
||||
|
||||
struct acpi_madt_core_pic {
|
||||
struct acpi_subtable_header header;
|
||||
u8 version;
|
||||
u32 processor_id;
|
||||
u32 core_id;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
/* Legacy I/O Interrupt Controller */
|
||||
|
||||
struct acpi_madt_lio_pic {
|
||||
struct acpi_subtable_header header;
|
||||
u8 version;
|
||||
u64 address;
|
||||
u16 size;
|
||||
u8 cascade[2];
|
||||
u32 cascade_map[2];
|
||||
};
|
||||
|
||||
/* Extend I/O Interrupt Controller */
|
||||
|
||||
struct acpi_madt_eio_pic {
|
||||
struct acpi_subtable_header header;
|
||||
u8 version;
|
||||
u8 cascade;
|
||||
u8 node;
|
||||
u64 node_map;
|
||||
};
|
||||
|
||||
/* HT Interrupt Controller */
|
||||
|
||||
struct acpi_madt_ht_pic {
|
||||
struct acpi_subtable_header header;
|
||||
u8 version;
|
||||
u64 address;
|
||||
u16 size;
|
||||
u8 cascade[8];
|
||||
};
|
||||
|
||||
/* Bridge I/O Interrupt Controller */
|
||||
|
||||
struct acpi_madt_bio_pic {
|
||||
struct acpi_subtable_header header;
|
||||
u8 version;
|
||||
u64 address;
|
||||
u16 size;
|
||||
u16 id;
|
||||
u16 gsi_base;
|
||||
};
|
||||
|
||||
/* MSI Interrupt Controller */
|
||||
|
||||
struct acpi_madt_msi_pic {
|
||||
struct acpi_subtable_header header;
|
||||
u8 version;
|
||||
u64 msg_address;
|
||||
u32 start;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
/* LPC Interrupt Controller */
|
||||
|
||||
struct acpi_madt_lpc_pic {
|
||||
struct acpi_subtable_header header;
|
||||
u8 version;
|
||||
u64 address;
|
||||
u16 size;
|
||||
u8 cascade;
|
||||
};
|
||||
|
||||
#pragma pack()
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* !CONFIG_ACPI */
|
||||
|
||||
#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT
|
||||
|
|
|
@ -35,9 +35,6 @@ static inline bool on_irq_stack(int cpu, unsigned long sp)
|
|||
return (low <= sp && sp <= high);
|
||||
}
|
||||
|
||||
int get_ipi_irq(void);
|
||||
int get_pmc_irq(void);
|
||||
int get_timer_irq(void);
|
||||
void spurious_interrupt(void);
|
||||
|
||||
#define NR_IRQS_LEGACY 16
|
||||
|
@ -48,6 +45,14 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_sel
|
|||
#define MAX_IO_PICS 2
|
||||
#define NR_IRQS (64 + (256 * MAX_IO_PICS))
|
||||
|
||||
struct acpi_vector_group {
|
||||
int node;
|
||||
int pci_segment;
|
||||
struct irq_domain *parent;
|
||||
};
|
||||
extern struct acpi_vector_group pch_group[MAX_IO_PICS];
|
||||
extern struct acpi_vector_group msi_group[MAX_IO_PICS];
|
||||
|
||||
#define CORES_PER_EIO_NODE 4
|
||||
|
||||
#define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */
|
||||
|
@ -79,15 +84,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_sel
|
|||
extern int find_pch_pic(u32 gsi);
|
||||
extern int eiointc_get_node(int id);
|
||||
|
||||
static inline void eiointc_enable(void)
|
||||
{
|
||||
uint64_t misc;
|
||||
|
||||
misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
|
||||
misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
|
||||
iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
|
||||
}
|
||||
|
||||
struct acpi_madt_lio_pic;
|
||||
struct acpi_madt_eio_pic;
|
||||
struct acpi_madt_ht_pic;
|
||||
|
@ -95,21 +91,29 @@ struct acpi_madt_bio_pic;
|
|||
struct acpi_madt_msi_pic;
|
||||
struct acpi_madt_lpc_pic;
|
||||
|
||||
struct irq_domain *loongarch_cpu_irq_init(void);
|
||||
|
||||
struct irq_domain *liointc_acpi_init(struct irq_domain *parent,
|
||||
int liointc_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_lio_pic *acpi_liointc);
|
||||
struct irq_domain *eiointc_acpi_init(struct irq_domain *parent,
|
||||
int eiointc_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_eio_pic *acpi_eiointc);
|
||||
|
||||
struct irq_domain *htvec_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_ht_pic *acpi_htvec);
|
||||
struct irq_domain *pch_lpc_acpi_init(struct irq_domain *parent,
|
||||
int pch_lpc_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_lpc_pic *acpi_pchlpc);
|
||||
struct irq_domain *pch_msi_acpi_init(struct irq_domain *parent,
|
||||
#if IS_ENABLED(CONFIG_LOONGSON_PCH_MSI)
|
||||
int pch_msi_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_msi_pic *acpi_pchmsi);
|
||||
struct irq_domain *pch_pic_acpi_init(struct irq_domain *parent,
|
||||
#else
|
||||
static inline int pch_msi_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_msi_pic *acpi_pchmsi)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
int pch_pic_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_bio_pic *acpi_pchpic);
|
||||
int find_pch_pic(u32 gsi);
|
||||
struct fwnode_handle *get_pch_msi_handle(int pci_segment);
|
||||
|
||||
extern struct acpi_madt_lio_pic *acpi_liointc;
|
||||
extern struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS];
|
||||
|
@ -119,11 +123,10 @@ extern struct acpi_madt_lpc_pic *acpi_pchlpc;
|
|||
extern struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS];
|
||||
extern struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS];
|
||||
|
||||
extern struct irq_domain *cpu_domain;
|
||||
extern struct irq_domain *liointc_domain;
|
||||
extern struct irq_domain *pch_lpc_domain;
|
||||
extern struct irq_domain *pch_msi_domain[MAX_IO_PICS];
|
||||
extern struct irq_domain *pch_pic_domain[MAX_IO_PICS];
|
||||
extern struct fwnode_handle *cpuintc_handle;
|
||||
extern struct fwnode_handle *liointc_handle;
|
||||
extern struct fwnode_handle *pch_lpc_handle;
|
||||
extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
|
||||
|
||||
extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ EXPORT_SYMBOL(acpi_pci_disabled);
|
|||
int acpi_strict = 1; /* We have no workarounds on LoongArch */
|
||||
int num_processors;
|
||||
int disabled_cpus;
|
||||
enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
|
||||
|
||||
u64 acpi_saved_sp;
|
||||
|
||||
|
@ -33,70 +32,6 @@ u64 acpi_saved_sp;
|
|||
|
||||
#define PREFIX "ACPI: "
|
||||
|
||||
int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
|
||||
{
|
||||
if (irqp != NULL)
|
||||
*irqp = acpi_register_gsi(NULL, gsi, -1, -1);
|
||||
return (*irqp >= 0) ? 0 : -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
|
||||
|
||||
int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi)
|
||||
{
|
||||
if (gsi)
|
||||
*gsi = isa_irq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* success: return IRQ number (>=0)
|
||||
* failure: return < 0
|
||||
*/
|
||||
int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
|
||||
{
|
||||
struct irq_fwspec fwspec;
|
||||
|
||||
switch (gsi) {
|
||||
case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
|
||||
fwspec.fwnode = liointc_domain->fwnode;
|
||||
fwspec.param[0] = gsi - GSI_MIN_CPU_IRQ;
|
||||
fwspec.param_count = 1;
|
||||
|
||||
return irq_create_fwspec_mapping(&fwspec);
|
||||
|
||||
case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
|
||||
if (!pch_lpc_domain)
|
||||
return -EINVAL;
|
||||
|
||||
fwspec.fwnode = pch_lpc_domain->fwnode;
|
||||
fwspec.param[0] = gsi - GSI_MIN_LPC_IRQ;
|
||||
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
|
||||
fwspec.param_count = 2;
|
||||
|
||||
return irq_create_fwspec_mapping(&fwspec);
|
||||
|
||||
case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
|
||||
if (!pch_pic_domain[0])
|
||||
return -EINVAL;
|
||||
|
||||
fwspec.fwnode = pch_pic_domain[0]->fwnode;
|
||||
fwspec.param[0] = gsi - GSI_MIN_PCH_IRQ;
|
||||
fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
|
||||
fwspec.param_count = 2;
|
||||
|
||||
return irq_create_fwspec_mapping(&fwspec);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_register_gsi);
|
||||
|
||||
void acpi_unregister_gsi(u32 gsi)
|
||||
{
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
|
||||
|
||||
void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
|
||||
{
|
||||
|
||||
|
|
|
@ -25,12 +25,8 @@ DEFINE_PER_CPU(unsigned long, irq_stack);
|
|||
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
||||
|
||||
struct irq_domain *cpu_domain;
|
||||
struct irq_domain *liointc_domain;
|
||||
struct irq_domain *pch_lpc_domain;
|
||||
struct irq_domain *pch_msi_domain[MAX_IO_PICS];
|
||||
struct irq_domain *pch_pic_domain[MAX_IO_PICS];
|
||||
|
||||
struct acpi_vector_group pch_group[MAX_IO_PICS];
|
||||
struct acpi_vector_group msi_group[MAX_IO_PICS];
|
||||
/*
|
||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||
* each architecture has to answer this themselves.
|
||||
|
@ -56,6 +52,51 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init early_pci_mcfg_parse(struct acpi_table_header *header)
|
||||
{
|
||||
struct acpi_table_mcfg *mcfg;
|
||||
struct acpi_mcfg_allocation *mptr;
|
||||
int i, n;
|
||||
|
||||
if (header->length < sizeof(struct acpi_table_mcfg))
|
||||
return -EINVAL;
|
||||
|
||||
n = (header->length - sizeof(struct acpi_table_mcfg)) /
|
||||
sizeof(struct acpi_mcfg_allocation);
|
||||
mcfg = (struct acpi_table_mcfg *)header;
|
||||
mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
|
||||
|
||||
for (i = 0; i < n; i++, mptr++) {
|
||||
msi_group[i].pci_segment = mptr->pci_segment;
|
||||
pch_group[i].node = msi_group[i].node = (mptr->address >> 44) & 0xf;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init init_vec_parent_group(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_IO_PICS; i++) {
|
||||
msi_group[i].pci_segment = -1;
|
||||
msi_group[i].node = -1;
|
||||
pch_group[i].node = -1;
|
||||
}
|
||||
|
||||
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
|
||||
}
|
||||
|
||||
static int __init get_ipi_irq(void)
|
||||
{
|
||||
struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
|
||||
|
||||
if (d)
|
||||
return irq_create_mapping(d, EXCCODE_IPI - EXCCODE_INT_START);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
|
@ -69,9 +110,12 @@ void __init init_IRQ(void)
|
|||
clear_csr_ecfg(ECFG0_IM);
|
||||
clear_csr_estat(ESTATF_IP);
|
||||
|
||||
init_vec_parent_group();
|
||||
irqchip_init();
|
||||
#ifdef CONFIG_SMP
|
||||
ipi_irq = EXCCODE_IPI - EXCCODE_INT_START;
|
||||
ipi_irq = get_ipi_irq();
|
||||
if (ipi_irq < 0)
|
||||
panic("IPI IRQ mapping failed\n");
|
||||
irq_set_percpu_devid(ipi_irq);
|
||||
r = request_percpu_irq(ipi_irq, loongson3_ipi_interrupt, "IPI", &ipi_dummy_dev);
|
||||
if (r < 0)
|
||||
|
|
|
@ -123,6 +123,16 @@ void sync_counter(void)
|
|||
csr_write64(-init_timeval, LOONGARCH_CSR_CNTC);
|
||||
}
|
||||
|
||||
static int get_timer_irq(void)
|
||||
{
|
||||
struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
|
||||
|
||||
if (d)
|
||||
return irq_create_mapping(d, EXCCODE_TIMER - EXCCODE_INT_START);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int constant_clockevent_init(void)
|
||||
{
|
||||
unsigned int irq;
|
||||
|
@ -132,7 +142,9 @@ int constant_clockevent_init(void)
|
|||
struct clock_event_device *cd;
|
||||
static int timer_irq_installed = 0;
|
||||
|
||||
irq = EXCCODE_TIMER - EXCCODE_INT_START;
|
||||
irq = get_timer_irq();
|
||||
if (irq < 0)
|
||||
pr_err("Failed to map irq %d (timer)\n", irq);
|
||||
|
||||
cd = &per_cpu(constant_clockevent_device, cpu);
|
||||
|
||||
|
|
|
@ -7,8 +7,9 @@
|
|||
#define NR_MIPS_CPU_IRQS 8
|
||||
#define NR_MAX_CHAINED_IRQS 40 /* Chained IRQs means those not directly used by devices */
|
||||
#define NR_IRQS (NR_IRQS_LEGACY + NR_MIPS_CPU_IRQS + NR_MAX_CHAINED_IRQS + 256)
|
||||
|
||||
#define MAX_IO_PICS 1
|
||||
#define MIPS_CPU_IRQ_BASE NR_IRQS_LEGACY
|
||||
#define GSI_MIN_CPU_IRQ 0
|
||||
|
||||
#include <asm/mach-generic/irq.h>
|
||||
|
||||
|
|
|
@ -1145,6 +1145,9 @@ static int __init acpi_bus_init_irq(void)
|
|||
case ACPI_IRQ_MODEL_PLATFORM:
|
||||
message = "platform specific model";
|
||||
break;
|
||||
case ACPI_IRQ_MODEL_LPIC:
|
||||
message = "LPIC";
|
||||
break;
|
||||
default:
|
||||
pr_info("Unknown interrupt routing model\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -12,7 +12,8 @@
|
|||
|
||||
enum acpi_irq_model_id acpi_irq_model;
|
||||
|
||||
static struct fwnode_handle *acpi_gsi_domain_id;
|
||||
static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
|
||||
static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
|
||||
|
||||
/**
|
||||
* acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
|
||||
|
@ -26,14 +27,18 @@ static struct fwnode_handle *acpi_gsi_domain_id;
|
|||
*/
|
||||
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
|
||||
{
|
||||
struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
|
||||
DOMAIN_BUS_ANY);
|
||||
struct irq_domain *d;
|
||||
|
||||
d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi),
|
||||
DOMAIN_BUS_ANY);
|
||||
*irq = irq_find_mapping(d, gsi);
|
||||
/*
|
||||
* *irq == 0 means no mapping, that should
|
||||
* be reported as a failure
|
||||
* *irq == 0 means no mapping, that should be reported as a
|
||||
* failure, unless there is an arch-specific fallback handler.
|
||||
*/
|
||||
if (!*irq && acpi_gsi_to_irq_fallback)
|
||||
*irq = acpi_gsi_to_irq_fallback(gsi);
|
||||
|
||||
return (*irq > 0) ? 0 : -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
|
||||
|
@ -53,12 +58,12 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
|
|||
{
|
||||
struct irq_fwspec fwspec;
|
||||
|
||||
if (WARN_ON(!acpi_gsi_domain_id)) {
|
||||
fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
|
||||
if (WARN_ON(!fwspec.fwnode)) {
|
||||
pr_warn("GSI: No registered irqchip, giving up\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fwspec.fwnode = acpi_gsi_domain_id;
|
||||
fwspec.param[0] = gsi;
|
||||
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
|
||||
fwspec.param_count = 2;
|
||||
|
@ -73,13 +78,14 @@ EXPORT_SYMBOL_GPL(acpi_register_gsi);
|
|||
*/
|
||||
void acpi_unregister_gsi(u32 gsi)
|
||||
{
|
||||
struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
|
||||
DOMAIN_BUS_ANY);
|
||||
struct irq_domain *d;
|
||||
int irq;
|
||||
|
||||
if (WARN_ON(acpi_irq_model == ACPI_IRQ_MODEL_GIC && gsi < 16))
|
||||
return;
|
||||
|
||||
d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi),
|
||||
DOMAIN_BUS_ANY);
|
||||
irq = irq_find_mapping(d, gsi);
|
||||
irq_dispose_mapping(irq);
|
||||
}
|
||||
|
@ -97,7 +103,8 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
|
|||
* The referenced device fwhandle or NULL on failure
|
||||
*/
|
||||
static struct fwnode_handle *
|
||||
acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source)
|
||||
acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source,
|
||||
u32 gsi)
|
||||
{
|
||||
struct fwnode_handle *result;
|
||||
struct acpi_device *device;
|
||||
|
@ -105,7 +112,7 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source)
|
|||
acpi_status status;
|
||||
|
||||
if (!source->string_length)
|
||||
return acpi_gsi_domain_id;
|
||||
return acpi_get_gsi_domain_id(gsi);
|
||||
|
||||
status = acpi_get_handle(NULL, source->string_ptr, &handle);
|
||||
if (WARN_ON(ACPI_FAILURE(status)))
|
||||
|
@ -194,7 +201,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
|
|||
ctx->index -= irq->interrupt_count;
|
||||
return AE_OK;
|
||||
}
|
||||
fwnode = acpi_gsi_domain_id;
|
||||
fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]);
|
||||
acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
|
||||
irq->triggering, irq->polarity,
|
||||
irq->shareable, ctx);
|
||||
|
@ -207,7 +214,8 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
|
|||
ctx->index -= eirq->interrupt_count;
|
||||
return AE_OK;
|
||||
}
|
||||
fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source);
|
||||
fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source,
|
||||
eirq->interrupts[ctx->index]);
|
||||
acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
|
||||
eirq->triggering, eirq->polarity,
|
||||
eirq->shareable, ctx);
|
||||
|
@ -291,10 +299,20 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
|
|||
* GSI interrupts
|
||||
*/
|
||||
void __init acpi_set_irq_model(enum acpi_irq_model_id model,
|
||||
struct fwnode_handle *fwnode)
|
||||
struct fwnode_handle *(*fn)(u32))
|
||||
{
|
||||
acpi_irq_model = model;
|
||||
acpi_gsi_domain_id = fwnode;
|
||||
acpi_get_gsi_domain_id = fn;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_set_gsi_to_irq_fallback - Register a GSI transfer
|
||||
* callback to fallback to arch specified implementation.
|
||||
* @fn: arch-specific fallback handler
|
||||
*/
|
||||
void __init acpi_set_gsi_to_irq_fallback(u32 (*fn)(u32))
|
||||
{
|
||||
acpi_gsi_to_irq_fallback = fn;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -312,8 +330,14 @@ struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
|
|||
const struct irq_domain_ops *ops,
|
||||
void *host_data)
|
||||
{
|
||||
struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id,
|
||||
DOMAIN_BUS_ANY);
|
||||
struct irq_domain *d;
|
||||
|
||||
/* This only works for the GIC model... */
|
||||
if (acpi_irq_model != ACPI_IRQ_MODEL_GIC)
|
||||
return NULL;
|
||||
|
||||
d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(0),
|
||||
DOMAIN_BUS_ANY);
|
||||
|
||||
if (!d)
|
||||
return NULL;
|
||||
|
|
|
@ -556,6 +556,16 @@ config EXYNOS_IRQ_COMBINER
|
|||
Say yes here to add support for the IRQ combiner devices embedded
|
||||
in Samsung Exynos chips.
|
||||
|
||||
config IRQ_LOONGARCH_CPU
|
||||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
help
|
||||
Support for the LoongArch CPU Interrupt Controller. For details of
|
||||
irq chip hierarchy on LoongArch platforms please read the document
|
||||
Documentation/loongarch/irq-chip-model.rst.
|
||||
|
||||
config LOONGSON_LIOINTC
|
||||
bool "Loongson Local I/O Interrupt Controller"
|
||||
depends on MACH_LOONGSON64
|
||||
|
@ -565,6 +575,16 @@ config LOONGSON_LIOINTC
|
|||
help
|
||||
Support for the Loongson Local I/O Interrupt Controller.
|
||||
|
||||
config LOONGSON_EIOINTC
|
||||
bool "Loongson Extend I/O Interrupt Controller"
|
||||
depends on LOONGARCH
|
||||
depends on MACH_LOONGSON64
|
||||
default MACH_LOONGSON64
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select GENERIC_IRQ_CHIP
|
||||
help
|
||||
Support for the Loongson3 Extend I/O Interrupt Vector Controller.
|
||||
|
||||
config LOONGSON_HTPIC
|
||||
bool "Loongson3 HyperTransport PIC Controller"
|
||||
depends on MACH_LOONGSON64 && MIPS
|
||||
|
@ -584,7 +604,7 @@ config LOONGSON_HTVEC
|
|||
|
||||
config LOONGSON_PCH_PIC
|
||||
bool "Loongson PCH PIC Controller"
|
||||
depends on MACH_LOONGSON64 || COMPILE_TEST
|
||||
depends on MACH_LOONGSON64
|
||||
default MACH_LOONGSON64
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select IRQ_FASTEOI_HIERARCHY_HANDLERS
|
||||
|
@ -593,7 +613,7 @@ config LOONGSON_PCH_PIC
|
|||
|
||||
config LOONGSON_PCH_MSI
|
||||
bool "Loongson PCH MSI Controller"
|
||||
depends on MACH_LOONGSON64 || COMPILE_TEST
|
||||
depends on MACH_LOONGSON64
|
||||
depends on PCI
|
||||
default MACH_LOONGSON64
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
|
@ -601,6 +621,14 @@ config LOONGSON_PCH_MSI
|
|||
help
|
||||
Support for the Loongson PCH MSI Controller.
|
||||
|
||||
config LOONGSON_PCH_LPC
|
||||
bool "Loongson PCH LPC Controller"
|
||||
depends on MACH_LOONGSON64
|
||||
default (MACH_LOONGSON64 && LOONGARCH)
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
help
|
||||
Support for the Loongson PCH LPC Controller.
|
||||
|
||||
config MST_IRQ
|
||||
bool "MStar Interrupt Controller"
|
||||
depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST
|
||||
|
|
|
@ -104,11 +104,14 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
|
|||
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
|
||||
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
|
||||
obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o
|
||||
obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o
|
||||
obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
|
||||
obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o
|
||||
obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
|
||||
obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o
|
||||
obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o
|
||||
obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
|
||||
obj-$(CONFIG_LOONGSON_PCH_LPC) += irq-loongson-pch-lpc.o
|
||||
obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o
|
||||
obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o
|
||||
obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
|
||||
|
|
|
@ -2360,11 +2360,17 @@ static void __init gic_acpi_setup_kvm_info(void)
|
|||
vgic_set_kvm_info(&gic_v3_kvm_info);
|
||||
}
|
||||
|
||||
static struct fwnode_handle *gsi_domain_handle;
|
||||
|
||||
static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
|
||||
{
|
||||
return gsi_domain_handle;
|
||||
}
|
||||
|
||||
static int __init
|
||||
gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_generic_distributor *dist;
|
||||
struct fwnode_handle *domain_handle;
|
||||
size_t size;
|
||||
int i, err;
|
||||
|
||||
|
@ -2396,18 +2402,18 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
|
|||
if (err)
|
||||
goto out_redist_unmap;
|
||||
|
||||
domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
|
||||
if (!domain_handle) {
|
||||
gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
|
||||
if (!gsi_domain_handle) {
|
||||
err = -ENOMEM;
|
||||
goto out_redist_unmap;
|
||||
}
|
||||
|
||||
err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
|
||||
acpi_data.nr_redist_regions, 0, domain_handle);
|
||||
acpi_data.nr_redist_regions, 0, gsi_domain_handle);
|
||||
if (err)
|
||||
goto out_fwhandle_free;
|
||||
|
||||
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
|
||||
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
|
||||
|
||||
if (static_branch_likely(&supports_deactivate_key))
|
||||
gic_acpi_setup_kvm_info();
|
||||
|
@ -2415,7 +2421,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
|
|||
return 0;
|
||||
|
||||
out_fwhandle_free:
|
||||
irq_domain_free_fwnode(domain_handle);
|
||||
irq_domain_free_fwnode(gsi_domain_handle);
|
||||
out_redist_unmap:
|
||||
for (i = 0; i < acpi_data.nr_redist_regions; i++)
|
||||
if (acpi_data.redist_regs[i].redist_base)
|
||||
|
|
|
@ -1682,11 +1682,17 @@ static void __init gic_acpi_setup_kvm_info(void)
|
|||
vgic_set_kvm_info(&gic_v2_kvm_info);
|
||||
}
|
||||
|
||||
static struct fwnode_handle *gsi_domain_handle;
|
||||
|
||||
static struct fwnode_handle *gic_v2_get_gsi_domain_id(u32 gsi)
|
||||
{
|
||||
return gsi_domain_handle;
|
||||
}
|
||||
|
||||
static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_generic_distributor *dist;
|
||||
struct fwnode_handle *domain_handle;
|
||||
struct gic_chip_data *gic = &gic_data[0];
|
||||
int count, ret;
|
||||
|
||||
|
@ -1724,22 +1730,22 @@ static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
|
|||
/*
|
||||
* Initialize GIC instance zero (no multi-GIC support).
|
||||
*/
|
||||
domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
|
||||
if (!domain_handle) {
|
||||
gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
|
||||
if (!gsi_domain_handle) {
|
||||
pr_err("Unable to allocate domain handle\n");
|
||||
gic_teardown(gic);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = __gic_init_bases(gic, domain_handle);
|
||||
ret = __gic_init_bases(gic, gsi_domain_handle);
|
||||
if (ret) {
|
||||
pr_err("Failed to initialise GIC\n");
|
||||
irq_domain_free_fwnode(domain_handle);
|
||||
irq_domain_free_fwnode(gsi_domain_handle);
|
||||
gic_teardown(gic);
|
||||
return ret;
|
||||
}
|
||||
|
||||
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
|
||||
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v2_get_gsi_domain_id);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
|
||||
gicv2m_init(NULL, gic_data[0].domain);
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
#include <asm/loongarch.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
static struct irq_domain *irq_domain;
|
||||
struct fwnode_handle *cpuintc_handle;
|
||||
|
||||
static u32 lpic_gsi_to_irq(u32 gsi)
|
||||
{
|
||||
/* Only pch irqdomain transferring is required for LoongArch. */
|
||||
if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
|
||||
return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
|
||||
{
|
||||
int id;
|
||||
struct fwnode_handle *domain_handle = NULL;
|
||||
|
||||
switch (gsi) {
|
||||
case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
|
||||
if (liointc_handle)
|
||||
domain_handle = liointc_handle;
|
||||
break;
|
||||
|
||||
case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
|
||||
if (pch_lpc_handle)
|
||||
domain_handle = pch_lpc_handle;
|
||||
break;
|
||||
|
||||
case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
|
||||
id = find_pch_pic(gsi);
|
||||
if (id >= 0 && pch_pic_handle[id])
|
||||
domain_handle = pch_pic_handle[id];
|
||||
break;
|
||||
}
|
||||
|
||||
return domain_handle;
|
||||
}
|
||||
|
||||
static void mask_loongarch_irq(struct irq_data *d)
|
||||
{
|
||||
clear_csr_ecfg(ECFGF(d->hwirq));
|
||||
}
|
||||
|
||||
static void unmask_loongarch_irq(struct irq_data *d)
|
||||
{
|
||||
set_csr_ecfg(ECFGF(d->hwirq));
|
||||
}
|
||||
|
||||
static struct irq_chip cpu_irq_controller = {
|
||||
.name = "CPUINTC",
|
||||
.irq_mask = mask_loongarch_irq,
|
||||
.irq_unmask = unmask_loongarch_irq,
|
||||
};
|
||||
|
||||
static void handle_cpu_irq(struct pt_regs *regs)
|
||||
{
|
||||
int hwirq;
|
||||
unsigned int estat = read_csr_estat() & CSR_ESTAT_IS;
|
||||
|
||||
while ((hwirq = ffs(estat))) {
|
||||
estat &= ~BIT(hwirq - 1);
|
||||
generic_handle_domain_irq(irq_domain, hwirq - 1);
|
||||
}
|
||||
}
|
||||
|
||||
static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
irq_set_noprobe(irq);
|
||||
irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
|
||||
.map = loongarch_cpu_intc_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
};
|
||||
|
||||
static int __init
|
||||
liointc_parse_madt(union acpi_subtable_headers *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header;
|
||||
|
||||
return liointc_acpi_init(irq_domain, liointc_entry);
|
||||
}
|
||||
|
||||
static int __init
|
||||
eiointc_parse_madt(union acpi_subtable_headers *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header;
|
||||
|
||||
return eiointc_acpi_init(irq_domain, eiointc_entry);
|
||||
}
|
||||
|
||||
static int __init acpi_cascade_irqdomain_init(void)
|
||||
{
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC,
|
||||
liointc_parse_madt, 0);
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
|
||||
eiointc_parse_madt, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
if (irq_domain)
|
||||
return 0;
|
||||
|
||||
/* Mask interrupts. */
|
||||
clear_csr_ecfg(ECFG0_IM);
|
||||
clear_csr_estat(ESTATF_IP);
|
||||
|
||||
cpuintc_handle = irq_domain_alloc_fwnode(NULL);
|
||||
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
|
||||
&loongarch_cpu_intc_irq_domain_ops, NULL);
|
||||
|
||||
if (!irq_domain)
|
||||
panic("Failed to add irqdomain for LoongArch CPU");
|
||||
|
||||
set_handle_irq(&handle_cpu_irq);
|
||||
acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id);
|
||||
acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq);
|
||||
acpi_cascade_irqdomain_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC,
|
||||
NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init);
|
|
@ -0,0 +1,395 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Loongson Extend I/O Interrupt Controller support
|
||||
*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "eiointc: " fmt
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#define EIOINTC_REG_NODEMAP 0x14a0
|
||||
#define EIOINTC_REG_IPMAP 0x14c0
|
||||
#define EIOINTC_REG_ENABLE 0x1600
|
||||
#define EIOINTC_REG_BOUNCE 0x1680
|
||||
#define EIOINTC_REG_ISR 0x1800
|
||||
#define EIOINTC_REG_ROUTE 0x1c00
|
||||
|
||||
#define VEC_REG_COUNT 4
|
||||
#define VEC_COUNT_PER_REG 64
|
||||
#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
|
||||
#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
|
||||
#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
|
||||
#define EIOINTC_ALL_ENABLE 0xffffffff
|
||||
|
||||
#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
|
||||
|
||||
static int nr_pics;
|
||||
|
||||
struct eiointc_priv {
|
||||
u32 node;
|
||||
nodemask_t node_map;
|
||||
cpumask_t cpuspan_map;
|
||||
struct fwnode_handle *domain_handle;
|
||||
struct irq_domain *eiointc_domain;
|
||||
};
|
||||
|
||||
static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
|
||||
|
||||
static void eiointc_enable(void)
|
||||
{
|
||||
uint64_t misc;
|
||||
|
||||
misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
|
||||
misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
|
||||
iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
|
||||
}
|
||||
|
||||
static int cpu_to_eio_node(int cpu)
|
||||
{
|
||||
return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
|
||||
}
|
||||
|
||||
static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
|
||||
{
|
||||
int i, node, cpu_node, route_node;
|
||||
unsigned char coremap;
|
||||
uint32_t pos_off, data, data_byte, data_mask;
|
||||
|
||||
pos_off = pos & ~3;
|
||||
data_byte = pos & 3;
|
||||
data_mask = ~BIT_MASK(data_byte) & 0xf;
|
||||
|
||||
/* Calculate node and coremap of target irq */
|
||||
cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
|
||||
coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
node = cpu_to_eio_node(i);
|
||||
if (!node_isset(node, *node_map))
|
||||
continue;
|
||||
|
||||
/* EIO node 0 is in charge of inter-node interrupt dispatch */
|
||||
route_node = (node == mnode) ? cpu_node : node;
|
||||
data = ((coremap | (route_node << 4)) << (data_byte * 8));
|
||||
csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(affinity_lock);
|
||||
|
||||
static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned long flags;
|
||||
uint32_t vector, regaddr;
|
||||
struct cpumask intersect_affinity;
|
||||
struct eiointc_priv *priv = d->domain->host_data;
|
||||
|
||||
raw_spin_lock_irqsave(&affinity_lock, flags);
|
||||
|
||||
cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
|
||||
cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
|
||||
|
||||
if (cpumask_empty(&intersect_affinity)) {
|
||||
raw_spin_unlock_irqrestore(&affinity_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
cpu = cpumask_first(&intersect_affinity);
|
||||
|
||||
vector = d->hwirq;
|
||||
regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
|
||||
|
||||
/* Mask target vector */
|
||||
csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 0x0, 0);
|
||||
/* Set route for target vector */
|
||||
eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
|
||||
/* Unmask target vector */
|
||||
csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 0x0, 0);
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
raw_spin_unlock_irqrestore(&affinity_lock, flags);
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static int eiointc_index(int node)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_pics; i++) {
|
||||
if (node_isset(node, eiointc_priv[i]->node_map))
|
||||
return i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int eiointc_router_init(unsigned int cpu)
|
||||
{
|
||||
int i, bit;
|
||||
uint32_t data;
|
||||
uint32_t node = cpu_to_eio_node(cpu);
|
||||
uint32_t index = eiointc_index(node);
|
||||
|
||||
if (index < 0) {
|
||||
pr_err("Error: invalid nodemap!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
|
||||
eiointc_enable();
|
||||
|
||||
for (i = 0; i < VEC_COUNT / 32; i++) {
|
||||
data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
|
||||
iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
|
||||
}
|
||||
|
||||
for (i = 0; i < VEC_COUNT / 32 / 4; i++) {
|
||||
bit = BIT(1 + index); /* Route to IP[1 + index] */
|
||||
data = bit | (bit << 8) | (bit << 16) | (bit << 24);
|
||||
iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
|
||||
}
|
||||
|
||||
for (i = 0; i < VEC_COUNT / 4; i++) {
|
||||
/* Route to Node-0 Core-0 */
|
||||
if (index == 0)
|
||||
bit = BIT(cpu_logical_map(0));
|
||||
else
|
||||
bit = (eiointc_priv[index]->node << 4) | 1;
|
||||
|
||||
data = bit | (bit << 8) | (bit << 16) | (bit << 24);
|
||||
iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
|
||||
}
|
||||
|
||||
for (i = 0; i < VEC_COUNT / 32; i++) {
|
||||
data = 0xffffffff;
|
||||
iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
|
||||
iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void eiointc_irq_dispatch(struct irq_desc *desc)
|
||||
{
|
||||
int i;
|
||||
u64 pending;
|
||||
bool handled = false;
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
for (i = 0; i < VEC_REG_COUNT; i++) {
|
||||
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
|
||||
iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
|
||||
while (pending) {
|
||||
int bit = __ffs(pending);
|
||||
int irq = bit + VEC_COUNT_PER_REG * i;
|
||||
|
||||
generic_handle_domain_irq(priv->eiointc_domain, irq);
|
||||
pending &= ~BIT(bit);
|
||||
handled = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!handled)
|
||||
spurious_interrupt();
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void eiointc_ack_irq(struct irq_data *d)
|
||||
{
|
||||
}
|
||||
|
||||
static void eiointc_mask_irq(struct irq_data *d)
|
||||
{
|
||||
}
|
||||
|
||||
static void eiointc_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
}
|
||||
|
||||
static struct irq_chip eiointc_irq_chip = {
|
||||
.name = "EIOINTC",
|
||||
.irq_ack = eiointc_ack_irq,
|
||||
.irq_mask = eiointc_mask_irq,
|
||||
.irq_unmask = eiointc_unmask_irq,
|
||||
.irq_set_affinity = eiointc_set_irq_affinity,
|
||||
};
|
||||
|
||||
static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs, void *arg)
|
||||
{
|
||||
int ret;
|
||||
unsigned int i, type;
|
||||
unsigned long hwirq = 0;
|
||||
struct eiointc *priv = domain->host_data;
|
||||
|
||||
ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
|
||||
priv, handle_edge_irq, NULL, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
|
||||
|
||||
irq_set_handler(virq + i, NULL);
|
||||
irq_domain_reset_irq_data(d);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops eiointc_domain_ops = {
|
||||
.translate = irq_domain_translate_onecell,
|
||||
.alloc = eiointc_domain_alloc,
|
||||
.free = eiointc_domain_free,
|
||||
};
|
||||
|
||||
static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (cpu_has_flatmode)
|
||||
node = cpu_to_node(node * CORES_PER_EIO_NODE);
|
||||
|
||||
for (i = 0; i < MAX_IO_PICS; i++) {
|
||||
if (node == vec_group[i].node) {
|
||||
vec_group[i].parent = parent;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_IO_PICS; i++) {
|
||||
if (node == vec_group[i].node)
|
||||
return vec_group[i].parent;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int __init
|
||||
pch_pic_parse_madt(union acpi_subtable_headers *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
|
||||
unsigned int node = (pchpic_entry->address >> 44) & 0xf;
|
||||
struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
|
||||
|
||||
if (parent)
|
||||
return pch_pic_acpi_init(parent, pchpic_entry);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __init
|
||||
pch_msi_parse_madt(union acpi_subtable_headers *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
|
||||
struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group);
|
||||
|
||||
if (parent)
|
||||
return pch_msi_acpi_init(parent, pchmsi_entry);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __init acpi_cascade_irqdomain_init(void)
|
||||
{
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC,
|
||||
pch_pic_parse_madt, 0);
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC,
|
||||
pch_msi_parse_madt, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init eiointc_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_eio_pic *acpi_eiointc)
|
||||
{
|
||||
int i, parent_irq;
|
||||
unsigned long node_map;
|
||||
struct eiointc_priv *priv;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc);
|
||||
if (!priv->domain_handle) {
|
||||
pr_err("Unable to allocate domain handle\n");
|
||||
goto out_free_priv;
|
||||
}
|
||||
|
||||
priv->node = acpi_eiointc->node;
|
||||
node_map = acpi_eiointc->node_map ? : -1ULL;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
if (node_map & (1ULL << cpu_to_eio_node(i))) {
|
||||
node_set(cpu_to_eio_node(i), priv->node_map);
|
||||
cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i));
|
||||
}
|
||||
}
|
||||
|
||||
/* Setup IRQ domain */
|
||||
priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT,
|
||||
&eiointc_domain_ops, priv);
|
||||
if (!priv->eiointc_domain) {
|
||||
pr_err("loongson-eiointc: cannot add IRQ domain\n");
|
||||
goto out_free_handle;
|
||||
}
|
||||
|
||||
eiointc_priv[nr_pics++] = priv;
|
||||
|
||||
eiointc_router_init(0);
|
||||
|
||||
parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
|
||||
irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
|
||||
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
|
||||
"irqchip/loongarch/intc:starting",
|
||||
eiointc_router_init, NULL);
|
||||
|
||||
acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group);
|
||||
acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group);
|
||||
acpi_cascade_irqdomain_init();
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_handle:
|
||||
irq_domain_free_fwnode(priv->domain_handle);
|
||||
priv->domain_handle = NULL;
|
||||
out_free_priv:
|
||||
kfree(priv);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
|
@ -23,7 +23,7 @@
|
|||
#endif
|
||||
|
||||
#define LIOINTC_CHIP_IRQ 32
|
||||
#define LIOINTC_NUM_PARENT 4
|
||||
#define LIOINTC_NUM_PARENT 4
|
||||
#define LIOINTC_NUM_CORES 4
|
||||
|
||||
#define LIOINTC_INTC_CHIP_START 0x20
|
||||
|
@ -58,6 +58,8 @@ struct liointc_priv {
|
|||
bool has_lpc_irq_errata;
|
||||
};
|
||||
|
||||
struct fwnode_handle *liointc_handle;
|
||||
|
||||
static void liointc_chained_handle_irq(struct irq_desc *desc)
|
||||
{
|
||||
struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
|
||||
|
@ -153,97 +155,79 @@ static void liointc_resume(struct irq_chip_generic *gc)
|
|||
irq_gc_unlock_irqrestore(gc, flags);
|
||||
}
|
||||
|
||||
static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
|
||||
static const char * const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
|
||||
static int parent_irq[LIOINTC_NUM_PARENT];
|
||||
static u32 parent_int_map[LIOINTC_NUM_PARENT];
|
||||
static const char *const parent_names[] = {"int0", "int1", "int2", "int3"};
|
||||
static const char *const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"};
|
||||
|
||||
static void __iomem *liointc_get_reg_byname(struct device_node *node,
|
||||
const char *name)
|
||||
static int liointc_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
int index = of_property_match_string(node, "reg-names", name);
|
||||
|
||||
if (index < 0)
|
||||
return NULL;
|
||||
|
||||
return of_iomap(node, index);
|
||||
if (WARN_ON(intsize < 1))
|
||||
return -EINVAL;
|
||||
*out_hwirq = intspec[0] - GSI_MIN_CPU_IRQ;
|
||||
*out_type = IRQ_TYPE_NONE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init liointc_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
static const struct irq_domain_ops acpi_irq_gc_ops = {
|
||||
.map = irq_map_generic_chip,
|
||||
.unmap = irq_unmap_generic_chip,
|
||||
.xlate = liointc_domain_xlate,
|
||||
};
|
||||
|
||||
static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
|
||||
struct fwnode_handle *domain_handle, struct device_node *node)
|
||||
{
|
||||
int i, err;
|
||||
void __iomem *base;
|
||||
struct irq_chip_type *ct;
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_domain *domain;
|
||||
struct irq_chip_type *ct;
|
||||
struct liointc_priv *priv;
|
||||
void __iomem *base;
|
||||
u32 of_parent_int_map[LIOINTC_NUM_PARENT];
|
||||
int parent_irq[LIOINTC_NUM_PARENT];
|
||||
bool have_parent = FALSE;
|
||||
int sz, i, err = 0;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (of_device_is_compatible(node, "loongson,liointc-2.0")) {
|
||||
base = liointc_get_reg_byname(node, "main");
|
||||
if (!base) {
|
||||
err = -ENODEV;
|
||||
goto out_free_priv;
|
||||
}
|
||||
base = ioremap(addr, size);
|
||||
if (!base)
|
||||
goto out_free_priv;
|
||||
|
||||
for (i = 0; i < LIOINTC_NUM_CORES; i++)
|
||||
priv->core_isr[i] = liointc_get_reg_byname(node, core_reg_names[i]);
|
||||
if (!priv->core_isr[0]) {
|
||||
err = -ENODEV;
|
||||
goto out_iounmap_base;
|
||||
}
|
||||
} else {
|
||||
base = of_iomap(node, 0);
|
||||
if (!base) {
|
||||
err = -ENODEV;
|
||||
goto out_free_priv;
|
||||
}
|
||||
|
||||
for (i = 0; i < LIOINTC_NUM_CORES; i++)
|
||||
priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
|
||||
}
|
||||
|
||||
for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
|
||||
parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
|
||||
if (parent_irq[i] > 0)
|
||||
have_parent = TRUE;
|
||||
}
|
||||
if (!have_parent) {
|
||||
err = -ENODEV;
|
||||
goto out_iounmap_isr;
|
||||
}
|
||||
|
||||
sz = of_property_read_variable_u32_array(node,
|
||||
"loongson,parent_int_map",
|
||||
&of_parent_int_map[0],
|
||||
LIOINTC_NUM_PARENT,
|
||||
LIOINTC_NUM_PARENT);
|
||||
if (sz < 4) {
|
||||
pr_err("loongson-liointc: No parent_int_map\n");
|
||||
err = -ENODEV;
|
||||
goto out_iounmap_isr;
|
||||
}
|
||||
for (i = 0; i < LIOINTC_NUM_CORES; i++)
|
||||
priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS;
|
||||
|
||||
for (i = 0; i < LIOINTC_NUM_PARENT; i++)
|
||||
priv->handler[i].parent_int_map = of_parent_int_map[i];
|
||||
priv->handler[i].parent_int_map = parent_int_map[i];
|
||||
|
||||
if (revision > 1) {
|
||||
for (i = 0; i < LIOINTC_NUM_CORES; i++) {
|
||||
int index = of_property_match_string(node,
|
||||
"reg-names", core_reg_names[i]);
|
||||
|
||||
if (index < 0)
|
||||
return -EINVAL;
|
||||
|
||||
priv->core_isr[i] = of_iomap(node, index);
|
||||
}
|
||||
}
|
||||
|
||||
/* Setup IRQ domain */
|
||||
domain = irq_domain_add_linear(node, 32,
|
||||
if (!acpi_disabled)
|
||||
domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
|
||||
&acpi_irq_gc_ops, priv);
|
||||
else
|
||||
domain = irq_domain_create_linear(domain_handle, LIOINTC_CHIP_IRQ,
|
||||
&irq_generic_chip_ops, priv);
|
||||
if (!domain) {
|
||||
pr_err("loongson-liointc: cannot add IRQ domain\n");
|
||||
err = -EINVAL;
|
||||
goto out_iounmap_isr;
|
||||
goto out_iounmap;
|
||||
}
|
||||
|
||||
err = irq_alloc_domain_generic_chips(domain, 32, 1,
|
||||
node->full_name, handle_level_irq,
|
||||
IRQ_NOPROBE, 0, 0);
|
||||
err = irq_alloc_domain_generic_chips(domain, LIOINTC_CHIP_IRQ, 1,
|
||||
(node ? node->full_name : "LIOINTC"),
|
||||
handle_level_irq, 0, IRQ_NOPROBE, 0);
|
||||
if (err) {
|
||||
pr_err("loongson-liointc: unable to register IRQ domain\n");
|
||||
goto out_free_domain;
|
||||
|
@ -299,24 +283,93 @@ static int __init liointc_of_init(struct device_node *node,
|
|||
liointc_chained_handle_irq, &priv->handler[i]);
|
||||
}
|
||||
|
||||
liointc_handle = domain_handle;
|
||||
return 0;
|
||||
|
||||
out_free_domain:
|
||||
irq_domain_remove(domain);
|
||||
out_iounmap_isr:
|
||||
for (i = 0; i < LIOINTC_NUM_CORES; i++) {
|
||||
if (!priv->core_isr[i])
|
||||
continue;
|
||||
iounmap(priv->core_isr[i]);
|
||||
}
|
||||
out_iounmap_base:
|
||||
out_iounmap:
|
||||
iounmap(base);
|
||||
out_free_priv:
|
||||
kfree(priv);
|
||||
|
||||
return err;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
|
||||
static int __init liointc_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
bool have_parent = FALSE;
|
||||
int sz, i, index, revision, err = 0;
|
||||
struct resource res;
|
||||
|
||||
if (!of_device_is_compatible(node, "loongson,liointc-2.0")) {
|
||||
index = 0;
|
||||
revision = 1;
|
||||
} else {
|
||||
index = of_property_match_string(node, "reg-names", "main");
|
||||
revision = 2;
|
||||
}
|
||||
|
||||
if (of_address_to_resource(node, index, &res))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < LIOINTC_NUM_PARENT; i++) {
|
||||
parent_irq[i] = of_irq_get_byname(node, parent_names[i]);
|
||||
if (parent_irq[i] > 0)
|
||||
have_parent = TRUE;
|
||||
}
|
||||
if (!have_parent)
|
||||
return -ENODEV;
|
||||
|
||||
sz = of_property_read_variable_u32_array(node,
|
||||
"loongson,parent_int_map",
|
||||
&parent_int_map[0],
|
||||
LIOINTC_NUM_PARENT,
|
||||
LIOINTC_NUM_PARENT);
|
||||
if (sz < 4) {
|
||||
pr_err("loongson-liointc: No parent_int_map\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
err = liointc_init(res.start, resource_size(&res),
|
||||
revision, of_node_to_fwnode(node), node);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init);
|
||||
IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init);
|
||||
IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc)
|
||||
{
|
||||
int ret;
|
||||
struct fwnode_handle *domain_handle;
|
||||
|
||||
parent_int_map[0] = acpi_liointc->cascade_map[0];
|
||||
parent_int_map[1] = acpi_liointc->cascade_map[1];
|
||||
|
||||
parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
|
||||
parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
|
||||
|
||||
domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_liointc);
|
||||
if (!domain_handle) {
|
||||
pr_err("Unable to allocate domain handle\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = liointc_init(acpi_liointc->address, acpi_liointc->size,
|
||||
1, domain_handle, NULL);
|
||||
if (ret)
|
||||
irq_domain_free_fwnode(domain_handle);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,205 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Loongson LPC Interrupt Controller support
|
||||
*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "lpc: " fmt
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
/* Registers */
|
||||
#define LPC_INT_CTL 0x00
|
||||
#define LPC_INT_ENA 0x04
|
||||
#define LPC_INT_STS 0x08
|
||||
#define LPC_INT_CLR 0x0c
|
||||
#define LPC_INT_POL 0x10
|
||||
#define LPC_COUNT 16
|
||||
|
||||
/* LPC_INT_CTL */
|
||||
#define LPC_INT_CTL_EN BIT(31)
|
||||
|
||||
struct pch_lpc {
|
||||
void __iomem *base;
|
||||
struct irq_domain *lpc_domain;
|
||||
raw_spinlock_t lpc_lock;
|
||||
u32 saved_reg_ctl;
|
||||
u32 saved_reg_ena;
|
||||
u32 saved_reg_pol;
|
||||
};
|
||||
|
||||
struct fwnode_handle *pch_lpc_handle;
|
||||
|
||||
static void lpc_irq_ack(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct pch_lpc *priv = d->domain->host_data;
|
||||
|
||||
raw_spin_lock_irqsave(&priv->lpc_lock, flags);
|
||||
writel(0x1 << d->hwirq, priv->base + LPC_INT_CLR);
|
||||
raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
|
||||
}
|
||||
|
||||
static void lpc_irq_mask(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct pch_lpc *priv = d->domain->host_data;
|
||||
|
||||
raw_spin_lock_irqsave(&priv->lpc_lock, flags);
|
||||
writel(readl(priv->base + LPC_INT_ENA) & (~(0x1 << (d->hwirq))),
|
||||
priv->base + LPC_INT_ENA);
|
||||
raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
|
||||
}
|
||||
|
||||
static void lpc_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct pch_lpc *priv = d->domain->host_data;
|
||||
|
||||
raw_spin_lock_irqsave(&priv->lpc_lock, flags);
|
||||
writel(readl(priv->base + LPC_INT_ENA) | (0x1 << (d->hwirq)),
|
||||
priv->base + LPC_INT_ENA);
|
||||
raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
|
||||
}
|
||||
|
||||
static int lpc_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
u32 val;
|
||||
u32 mask = 0x1 << (d->hwirq);
|
||||
struct pch_lpc *priv = d->domain->host_data;
|
||||
|
||||
if (!(type & IRQ_TYPE_LEVEL_MASK))
|
||||
return 0;
|
||||
|
||||
val = readl(priv->base + LPC_INT_POL);
|
||||
|
||||
if (type == IRQ_TYPE_LEVEL_HIGH)
|
||||
val |= mask;
|
||||
else
|
||||
val &= ~mask;
|
||||
|
||||
writel(val, priv->base + LPC_INT_POL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_chip pch_lpc_irq_chip = {
|
||||
.name = "PCH LPC",
|
||||
.irq_mask = lpc_irq_mask,
|
||||
.irq_unmask = lpc_irq_unmask,
|
||||
.irq_ack = lpc_irq_ack,
|
||||
.irq_set_type = lpc_irq_set_type,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
static void lpc_irq_dispatch(struct irq_desc *desc)
|
||||
{
|
||||
u32 pending, bit;
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct pch_lpc *priv = irq_desc_get_handler_data(desc);
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
pending = readl(priv->base + LPC_INT_ENA);
|
||||
pending &= readl(priv->base + LPC_INT_STS);
|
||||
if (!pending)
|
||||
spurious_interrupt();
|
||||
|
||||
while (pending) {
|
||||
bit = __ffs(pending);
|
||||
|
||||
generic_handle_domain_irq(priv->lpc_domain, bit);
|
||||
pending &= ~BIT(bit);
|
||||
}
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int pch_lpc_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &pch_lpc_irq_chip, handle_level_irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops pch_lpc_domain_ops = {
|
||||
.map = pch_lpc_map,
|
||||
.translate = irq_domain_translate_twocell,
|
||||
};
|
||||
|
||||
static void pch_lpc_reset(struct pch_lpc *priv)
|
||||
{
|
||||
/* Enable the LPC interrupt, bit31: en bit30: edge */
|
||||
writel(LPC_INT_CTL_EN, priv->base + LPC_INT_CTL);
|
||||
writel(0, priv->base + LPC_INT_ENA);
|
||||
/* Clear all 18-bit interrpt bit */
|
||||
writel(GENMASK(17, 0), priv->base + LPC_INT_CLR);
|
||||
}
|
||||
|
||||
static int pch_lpc_disabled(struct pch_lpc *priv)
|
||||
{
|
||||
return (readl(priv->base + LPC_INT_ENA) == 0xffffffff) &&
|
||||
(readl(priv->base + LPC_INT_STS) == 0xffffffff);
|
||||
}
|
||||
|
||||
int __init pch_lpc_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_lpc_pic *acpi_pchlpc)
|
||||
{
|
||||
int parent_irq;
|
||||
struct pch_lpc *priv;
|
||||
struct irq_fwspec fwspec;
|
||||
struct fwnode_handle *irq_handle;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_init(&priv->lpc_lock);
|
||||
|
||||
priv->base = ioremap(acpi_pchlpc->address, acpi_pchlpc->size);
|
||||
if (!priv->base)
|
||||
goto free_priv;
|
||||
|
||||
if (pch_lpc_disabled(priv)) {
|
||||
pr_err("Failed to get LPC status\n");
|
||||
goto iounmap_base;
|
||||
}
|
||||
|
||||
irq_handle = irq_domain_alloc_named_fwnode("lpcintc");
|
||||
if (!irq_handle) {
|
||||
pr_err("Unable to allocate domain handle\n");
|
||||
goto iounmap_base;
|
||||
}
|
||||
|
||||
priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT,
|
||||
&pch_lpc_domain_ops, priv);
|
||||
if (!priv->lpc_domain) {
|
||||
pr_err("Failed to create IRQ domain\n");
|
||||
goto free_irq_handle;
|
||||
}
|
||||
pch_lpc_reset(priv);
|
||||
|
||||
fwspec.fwnode = parent->fwnode;
|
||||
fwspec.param[0] = acpi_pchlpc->cascade + GSI_MIN_PCH_IRQ;
|
||||
fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
|
||||
fwspec.param_count = 2;
|
||||
parent_irq = irq_create_fwspec_mapping(&fwspec);
|
||||
irq_set_chained_handler_and_data(parent_irq, lpc_irq_dispatch, priv);
|
||||
|
||||
pch_lpc_handle = irq_handle;
|
||||
return 0;
|
||||
|
||||
free_irq_handle:
|
||||
irq_domain_free_fwnode(irq_handle);
|
||||
iounmap_base:
|
||||
iounmap(priv->base);
|
||||
free_priv:
|
||||
kfree(priv);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
|
@ -15,6 +15,8 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static int nr_pics;
|
||||
|
||||
struct pch_msi_data {
|
||||
struct mutex msi_map_lock;
|
||||
phys_addr_t doorbell;
|
||||
|
@ -23,6 +25,8 @@ struct pch_msi_data {
|
|||
unsigned long *msi_map;
|
||||
};
|
||||
|
||||
static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS];
|
||||
|
||||
static void pch_msi_mask_msi_irq(struct irq_data *d)
|
||||
{
|
||||
pci_msi_mask_irq(d);
|
||||
|
@ -154,12 +158,12 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = {
|
|||
};
|
||||
|
||||
static int pch_msi_init_domains(struct pch_msi_data *priv,
|
||||
struct device_node *node,
|
||||
struct irq_domain *parent)
|
||||
struct irq_domain *parent,
|
||||
struct fwnode_handle *domain_handle)
|
||||
{
|
||||
struct irq_domain *middle_domain, *msi_domain;
|
||||
|
||||
middle_domain = irq_domain_create_linear(of_node_to_fwnode(node),
|
||||
middle_domain = irq_domain_create_linear(domain_handle,
|
||||
priv->num_irqs,
|
||||
&pch_msi_middle_domain_ops,
|
||||
priv);
|
||||
|
@ -171,7 +175,7 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
|
|||
middle_domain->parent = parent;
|
||||
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
|
||||
|
||||
msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
|
||||
msi_domain = pci_msi_create_irq_domain(domain_handle,
|
||||
&pch_msi_domain_info,
|
||||
middle_domain);
|
||||
if (!msi_domain) {
|
||||
|
@ -183,19 +187,11 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int pch_msi_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
static int pch_msi_init(phys_addr_t msg_address, int irq_base, int irq_count,
|
||||
struct irq_domain *parent_domain, struct fwnode_handle *domain_handle)
|
||||
{
|
||||
struct pch_msi_data *priv;
|
||||
struct irq_domain *parent_domain;
|
||||
struct resource res;
|
||||
int ret;
|
||||
|
||||
parent_domain = irq_find_host(parent);
|
||||
if (!parent_domain) {
|
||||
pr_err("Failed to find the parent domain\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
struct pch_msi_data *priv;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
|
@ -203,48 +199,95 @@ static int pch_msi_init(struct device_node *node,
|
|||
|
||||
mutex_init(&priv->msi_map_lock);
|
||||
|
||||
ret = of_address_to_resource(node, 0, &res);
|
||||
if (ret) {
|
||||
pr_err("Failed to allocate resource\n");
|
||||
goto err_priv;
|
||||
}
|
||||
|
||||
priv->doorbell = res.start;
|
||||
|
||||
if (of_property_read_u32(node, "loongson,msi-base-vec",
|
||||
&priv->irq_first)) {
|
||||
pr_err("Unable to parse MSI vec base\n");
|
||||
ret = -EINVAL;
|
||||
goto err_priv;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(node, "loongson,msi-num-vecs",
|
||||
&priv->num_irqs)) {
|
||||
pr_err("Unable to parse MSI vec number\n");
|
||||
ret = -EINVAL;
|
||||
goto err_priv;
|
||||
}
|
||||
priv->doorbell = msg_address;
|
||||
priv->irq_first = irq_base;
|
||||
priv->num_irqs = irq_count;
|
||||
|
||||
priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL);
|
||||
if (!priv->msi_map) {
|
||||
ret = -ENOMEM;
|
||||
if (!priv->msi_map)
|
||||
goto err_priv;
|
||||
}
|
||||
|
||||
pr_debug("Registering %d MSIs, starting at %d\n",
|
||||
priv->num_irqs, priv->irq_first);
|
||||
|
||||
ret = pch_msi_init_domains(priv, node, parent_domain);
|
||||
ret = pch_msi_init_domains(priv, parent_domain, domain_handle);
|
||||
if (ret)
|
||||
goto err_map;
|
||||
|
||||
pch_msi_handle[nr_pics++] = domain_handle;
|
||||
return 0;
|
||||
|
||||
err_map:
|
||||
bitmap_free(priv->msi_map);
|
||||
err_priv:
|
||||
kfree(priv);
|
||||
return ret;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_init);
|
||||
#ifdef CONFIG_OF
|
||||
static int pch_msi_of_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
int err;
|
||||
int irq_base, irq_count;
|
||||
struct resource res;
|
||||
struct irq_domain *parent_domain;
|
||||
|
||||
parent_domain = irq_find_host(parent);
|
||||
if (!parent_domain) {
|
||||
pr_err("Failed to find the parent domain\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (of_address_to_resource(node, 0, &res)) {
|
||||
pr_err("Failed to allocate resource\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(node, "loongson,msi-base-vec", &irq_base)) {
|
||||
pr_err("Unable to parse MSI vec base\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(node, "loongson,msi-num-vecs", &irq_count)) {
|
||||
pr_err("Unable to parse MSI vec number\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_node_to_fwnode(node));
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
struct fwnode_handle *get_pch_msi_handle(int pci_segment)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_IO_PICS; i++) {
|
||||
if (msi_group[i].pci_segment == pci_segment)
|
||||
return pch_msi_handle[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int __init pch_msi_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_msi_pic *acpi_pchmsi)
|
||||
{
|
||||
int ret;
|
||||
struct fwnode_handle *domain_handle;
|
||||
|
||||
domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchmsi);
|
||||
ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
|
||||
acpi_pchmsi->count, parent, domain_handle);
|
||||
if (ret < 0)
|
||||
irq_domain_free_fwnode(domain_handle);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -33,13 +33,40 @@
|
|||
#define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG)
|
||||
#define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG)
|
||||
|
||||
static int nr_pics;
|
||||
|
||||
struct pch_pic {
|
||||
void __iomem *base;
|
||||
struct irq_domain *pic_domain;
|
||||
u32 ht_vec_base;
|
||||
raw_spinlock_t pic_lock;
|
||||
u32 vec_count;
|
||||
u32 gsi_base;
|
||||
};
|
||||
|
||||
static struct pch_pic *pch_pic_priv[MAX_IO_PICS];
|
||||
|
||||
struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
|
||||
|
||||
int find_pch_pic(u32 gsi)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Find the PCH_PIC that manages this GSI. */
|
||||
for (i = 0; i < MAX_IO_PICS; i++) {
|
||||
struct pch_pic *priv = pch_pic_priv[i];
|
||||
|
||||
if (!priv)
|
||||
return -1;
|
||||
|
||||
if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count))
|
||||
return i;
|
||||
}
|
||||
|
||||
pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit)
|
||||
{
|
||||
u32 reg;
|
||||
|
@ -139,6 +166,28 @@ static struct irq_chip pch_pic_irq_chip = {
|
|||
.irq_set_type = pch_pic_set_type,
|
||||
};
|
||||
|
||||
static int pch_pic_domain_translate(struct irq_domain *d,
|
||||
struct irq_fwspec *fwspec,
|
||||
unsigned long *hwirq,
|
||||
unsigned int *type)
|
||||
{
|
||||
struct pch_pic *priv = d->host_data;
|
||||
struct device_node *of_node = to_of_node(fwspec->fwnode);
|
||||
|
||||
if (fwspec->param_count < 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (of_node) {
|
||||
*hwirq = fwspec->param[0] + priv->ht_vec_base;
|
||||
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
|
||||
} else {
|
||||
*hwirq = fwspec->param[0] - priv->gsi_base;
|
||||
*type = IRQ_TYPE_NONE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs, void *arg)
|
||||
{
|
||||
|
@ -149,13 +198,13 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
struct irq_fwspec parent_fwspec;
|
||||
struct pch_pic *priv = domain->host_data;
|
||||
|
||||
err = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
|
||||
err = pch_pic_domain_translate(domain, fwspec, &hwirq, &type);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
parent_fwspec.fwnode = domain->parent->fwnode;
|
||||
parent_fwspec.param_count = 1;
|
||||
parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
|
||||
parent_fwspec.param[0] = hwirq;
|
||||
|
||||
err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
|
||||
if (err)
|
||||
|
@ -170,7 +219,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
}
|
||||
|
||||
static const struct irq_domain_ops pch_pic_domain_ops = {
|
||||
.translate = irq_domain_translate_twocell,
|
||||
.translate = pch_pic_domain_translate,
|
||||
.alloc = pch_pic_alloc,
|
||||
.free = irq_domain_free_irqs_parent,
|
||||
};
|
||||
|
@ -180,7 +229,7 @@ static void pch_pic_reset(struct pch_pic *priv)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < PIC_COUNT; i++) {
|
||||
/* Write vectored ID */
|
||||
/* Write vector ID */
|
||||
writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i));
|
||||
/* Hardcode route to HT0 Lo */
|
||||
writeb(1, priv->base + PCH_INT_ROUTE(i));
|
||||
|
@ -198,50 +247,37 @@ static void pch_pic_reset(struct pch_pic *priv)
|
|||
}
|
||||
}
|
||||
|
||||
static int pch_pic_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
|
||||
struct irq_domain *parent_domain, struct fwnode_handle *domain_handle,
|
||||
u32 gsi_base)
|
||||
{
|
||||
struct pch_pic *priv;
|
||||
struct irq_domain *parent_domain;
|
||||
int err;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_init(&priv->pic_lock);
|
||||
priv->base = of_iomap(node, 0);
|
||||
if (!priv->base) {
|
||||
err = -ENOMEM;
|
||||
priv->base = ioremap(addr, size);
|
||||
if (!priv->base)
|
||||
goto free_priv;
|
||||
}
|
||||
|
||||
parent_domain = irq_find_host(parent);
|
||||
if (!parent_domain) {
|
||||
pr_err("Failed to find the parent domain\n");
|
||||
err = -ENXIO;
|
||||
goto iounmap_base;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(node, "loongson,pic-base-vec",
|
||||
&priv->ht_vec_base)) {
|
||||
pr_err("Failed to determine pic-base-vec\n");
|
||||
err = -EINVAL;
|
||||
goto iounmap_base;
|
||||
}
|
||||
priv->ht_vec_base = vec_base;
|
||||
priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1;
|
||||
priv->gsi_base = gsi_base;
|
||||
|
||||
priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0,
|
||||
PIC_COUNT,
|
||||
of_node_to_fwnode(node),
|
||||
&pch_pic_domain_ops,
|
||||
priv);
|
||||
priv->vec_count, domain_handle,
|
||||
&pch_pic_domain_ops, priv);
|
||||
|
||||
if (!priv->pic_domain) {
|
||||
pr_err("Failed to create IRQ domain\n");
|
||||
err = -ENOMEM;
|
||||
goto iounmap_base;
|
||||
}
|
||||
|
||||
pch_pic_reset(priv);
|
||||
pch_pic_handle[nr_pics] = domain_handle;
|
||||
pch_pic_priv[nr_pics++] = priv;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -250,7 +286,86 @@ iounmap_base:
|
|||
free_priv:
|
||||
kfree(priv);
|
||||
|
||||
return err;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
|
||||
static int pch_pic_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
int err, vec_base;
|
||||
struct resource res;
|
||||
struct irq_domain *parent_domain;
|
||||
|
||||
if (of_address_to_resource(node, 0, &res))
|
||||
return -EINVAL;
|
||||
|
||||
parent_domain = irq_find_host(parent);
|
||||
if (!parent_domain) {
|
||||
pr_err("Failed to find the parent domain\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(node, "loongson,pic-base-vec", &vec_base)) {
|
||||
pr_err("Failed to determine pic-base-vec\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = pch_pic_init(res.start, resource_size(&res), vec_base,
|
||||
parent_domain, of_node_to_fwnode(node), 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(pch_pic, "loongson,pch-pic-1.0", pch_pic_of_init);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static int __init
|
||||
pch_lpc_parse_madt(union acpi_subtable_headers *header,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_lpc_pic *pchlpc_entry = (struct acpi_madt_lpc_pic *)header;
|
||||
|
||||
return pch_lpc_acpi_init(pch_pic_priv[0]->pic_domain, pchlpc_entry);
|
||||
}
|
||||
|
||||
static int __init acpi_cascade_irqdomain_init(void)
|
||||
{
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_LPC_PIC,
|
||||
pch_lpc_parse_madt, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init pch_pic_acpi_init(struct irq_domain *parent,
|
||||
struct acpi_madt_bio_pic *acpi_pchpic)
|
||||
{
|
||||
int ret, vec_base;
|
||||
struct fwnode_handle *domain_handle;
|
||||
|
||||
vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
|
||||
|
||||
domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_pchpic);
|
||||
if (!domain_handle) {
|
||||
pr_err("Unable to allocate domain handle\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size,
|
||||
vec_base, parent, domain_handle, acpi_pchpic->gsi_base);
|
||||
|
||||
if (ret < 0) {
|
||||
irq_domain_free_fwnode(domain_handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (acpi_pchpic->id == 0)
|
||||
acpi_cascade_irqdomain_init();
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -105,6 +105,7 @@ enum acpi_irq_model_id {
|
|||
ACPI_IRQ_MODEL_IOSAPIC,
|
||||
ACPI_IRQ_MODEL_PLATFORM,
|
||||
ACPI_IRQ_MODEL_GIC,
|
||||
ACPI_IRQ_MODEL_LPIC,
|
||||
ACPI_IRQ_MODEL_COUNT
|
||||
};
|
||||
|
||||
|
@ -356,7 +357,8 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
|
|||
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
|
||||
|
||||
void acpi_set_irq_model(enum acpi_irq_model_id model,
|
||||
struct fwnode_handle *fwnode);
|
||||
struct fwnode_handle *(*)(u32));
|
||||
void acpi_set_gsi_to_irq_fallback(u32 (*)(u32));
|
||||
|
||||
struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
|
||||
unsigned int size,
|
||||
|
|
|
@ -151,6 +151,7 @@ enum cpuhp_state {
|
|||
CPUHP_AP_IRQ_BCM2836_STARTING,
|
||||
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
|
||||
CPUHP_AP_IRQ_RISCV_STARTING,
|
||||
CPUHP_AP_IRQ_LOONGARCH_STARTING,
|
||||
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
|
||||
CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||
CPUHP_AP_MICROCODE_LOADER,
|
||||
|
|
|
@ -1137,6 +1137,7 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on);
|
|||
/* Setup functions for irq_chip_generic */
|
||||
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
|
||||
irq_hw_number_t hw_irq);
|
||||
void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq);
|
||||
struct irq_chip_generic *
|
||||
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
|
||||
void __iomem *reg_base, irq_flow_handler_t handler);
|
||||
|
|
|
@ -431,7 +431,7 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
|
||||
void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
|
||||
{
|
||||
struct irq_data *data = irq_domain_get_irq_data(d, virq);
|
||||
struct irq_domain_chip_generic *dgc = d->gc;
|
||||
|
|
Loading…
Reference in New Issue