Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "The irq department provides: - a major update to the auto affinity management code, which is used by multi-queue devices - move of the microblaze irq chip driver into the common driver code so it can be shared between microblaze, powerpc and MIPS - a series of updates to the ARM GICV3 interrupt controller - the usual pile of fixes and small improvements all over the place" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits) powerpc/virtex: Use generic xilinx irqchip driver irqchip/xilinx: Try to fall back if xlnx,kind-of-intr not provided irqchip/xilinx: Add support for parent intc irqchip/xilinx: Rename get_irq to xintc_get_irq irqchip/xilinx: Restructure and use jump label api irqchip/xilinx: Clean up print messages microblaze/irqchip: Move intc driver to irqchip ARM: virt: Select ARM_GIC_V3_ITS ARM: gic-v3-its: Add 32bit support to GICv3 ITS irqchip/gic-v3-its: Specialise readq and writeq accesses irqchip/gic-v3-its: Specialise flush_dcache operation irqchip/gic-v3-its: Narrow down Entry Size when used as a divider irqchip/gic-v3-its: Change unsigned types for AArch32 compatibility irqchip/gic-v3: Use nops macro for Cavium ThunderX erratum 23154 irqchip/gic-v3: Convert arm64 GIC accessors to {read,write}_sysreg_s genirq/msi: Drop artificial PCI dependency irqchip/bcm7038-l1: Implement irq_cpu_offline() callback genirq/affinity: Use default affinity mask for reserved vectors genirq/affinity: Take reserved vectors into account when spreading irqs PCI: Remove the irq_affinity mask from struct pci_dev ...
This commit is contained in:
commit
f082f02c47
|
@ -703,6 +703,7 @@ config ARCH_VIRT
|
|||
select ARM_GIC
|
||||
select ARM_GIC_V2M if PCI
|
||||
select ARM_GIC_V3
|
||||
select ARM_GIC_V3_ITS if PCI
|
||||
select ARM_PSCI
|
||||
select HAVE_ARM_ARCH_TIMER
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include <linux/io.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cp15.h>
|
||||
|
||||
#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
|
||||
|
@ -230,19 +231,14 @@ static inline void gic_write_bpr1(u32 val)
|
|||
* AArch32, since the syndrome register doesn't provide any information for
|
||||
* them.
|
||||
* Consequently, the following IO helpers use 32bit accesses.
|
||||
*
|
||||
* There are only two registers that need 64bit accesses in this driver:
|
||||
* - GICD_IROUTERn, contain the affinity values associated to each interrupt.
|
||||
* The upper-word (aff3) will always be 0, so there is no need for a lock.
|
||||
* - GICR_TYPER is an ID register and doesn't need atomicity.
|
||||
*/
|
||||
static inline void gic_write_irouter(u64 val, volatile void __iomem *addr)
|
||||
static inline void __gic_writeq_nonatomic(u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
writel_relaxed((u32)val, addr);
|
||||
writel_relaxed((u32)(val >> 32), addr + 4);
|
||||
}
|
||||
|
||||
static inline u64 gic_read_typer(const volatile void __iomem *addr)
|
||||
static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
|
@ -251,5 +247,49 @@ static inline u64 gic_read_typer(const volatile void __iomem *addr)
|
|||
return val;
|
||||
}
|
||||
|
||||
#define gic_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
|
||||
|
||||
/*
|
||||
* GICD_IROUTERn, contain the affinity values associated to each interrupt.
|
||||
* The upper-word (aff3) will always be 0, so there is no need for a lock.
|
||||
*/
|
||||
#define gic_write_irouter(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
/*
|
||||
* GICR_TYPER is an ID register and doesn't need atomicity.
|
||||
*/
|
||||
#define gic_read_typer(c) __gic_readq_nonatomic(c)
|
||||
|
||||
/*
|
||||
* GITS_BASER - hi and lo bits may be accessed independently.
|
||||
*/
|
||||
#define gits_read_baser(c) __gic_readq_nonatomic(c)
|
||||
#define gits_write_baser(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
/*
|
||||
* GICR_PENDBASER and GICR_PROPBASE are changed with LPIs disabled, so they
|
||||
* won't be being used during any updates and can be changed non-atomically
|
||||
*/
|
||||
#define gicr_read_propbaser(c) __gic_readq_nonatomic(c)
|
||||
#define gicr_write_propbaser(v, c) __gic_writeq_nonatomic(v, c)
|
||||
#define gicr_read_pendbaser(c) __gic_readq_nonatomic(c)
|
||||
#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
/*
|
||||
* GITS_TYPER is an ID register and doesn't need atomicity.
|
||||
*/
|
||||
#define gits_read_typer(c) __gic_readq_nonatomic(c)
|
||||
|
||||
/*
|
||||
* GITS_CBASER - hi and lo bits may be accessed independently.
|
||||
*/
|
||||
#define gits_read_cbaser(c) __gic_readq_nonatomic(c)
|
||||
#define gits_write_cbaser(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
/*
|
||||
* GITS_CWRITER - hi and lo bits may be accessed independently.
|
||||
*/
|
||||
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* !__ASM_ARCH_GICV3_H */
|
||||
|
|
|
@ -79,19 +79,10 @@
|
|||
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define read_gicreg(r) \
|
||||
({ \
|
||||
u64 reg; \
|
||||
asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
|
||||
reg; \
|
||||
})
|
||||
|
||||
#define write_gicreg(v,r) \
|
||||
do { \
|
||||
u64 __val = (v); \
|
||||
asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
|
||||
} while (0)
|
||||
#define read_gicreg read_sysreg_s
|
||||
#define write_gicreg write_sysreg_s
|
||||
|
||||
/*
|
||||
* Low-level accessors
|
||||
|
@ -102,13 +93,13 @@
|
|||
|
||||
static inline void gic_write_eoir(u32 irq)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" ((u64)irq));
|
||||
write_sysreg_s(irq, ICC_EOIR1_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_dir(u32 irq)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" ((u64)irq));
|
||||
write_sysreg_s(irq, ICC_DIR_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
|
@ -116,7 +107,7 @@ static inline u64 gic_read_iar_common(void)
|
|||
{
|
||||
u64 irqstat;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
|
||||
irqstat = read_sysreg_s(ICC_IAR1_EL1);
|
||||
dsb(sy);
|
||||
return irqstat;
|
||||
}
|
||||
|
@ -132,12 +123,9 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
|
|||
{
|
||||
u64 irqstat;
|
||||
|
||||
asm volatile(
|
||||
"nop;nop;nop;nop\n\t"
|
||||
"nop;nop;nop;nop\n\t"
|
||||
"mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t"
|
||||
"nop;nop;nop;nop"
|
||||
: "=r" (irqstat));
|
||||
nops(8);
|
||||
irqstat = read_sysreg_s(ICC_IAR1_EL1);
|
||||
nops(4);
|
||||
mb();
|
||||
|
||||
return irqstat;
|
||||
|
@ -145,37 +133,34 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
|
|||
|
||||
static inline void gic_write_pmr(u32 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
|
||||
write_sysreg_s(val, ICC_PMR_EL1);
|
||||
}
|
||||
|
||||
static inline void gic_write_ctlr(u32 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" ((u64)val));
|
||||
write_sysreg_s(val, ICC_CTLR_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_grpen1(u32 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" ((u64)val));
|
||||
write_sysreg_s(val, ICC_GRPEN1_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_sgi1r(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
|
||||
write_sysreg_s(val, ICC_SGI1R_EL1);
|
||||
}
|
||||
|
||||
static inline u32 gic_read_sre(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
|
||||
return val;
|
||||
return read_sysreg_s(ICC_SRE_EL1);
|
||||
}
|
||||
|
||||
static inline void gic_write_sre(u32 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((u64)val));
|
||||
write_sysreg_s(val, ICC_SRE_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
|
@ -187,5 +172,21 @@ static inline void gic_write_bpr1(u32 val)
|
|||
#define gic_read_typer(c) readq_relaxed(c)
|
||||
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
|
||||
|
||||
#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
|
||||
|
||||
#define gits_read_baser(c) readq_relaxed(c)
|
||||
#define gits_write_baser(v, c) writeq_relaxed(v, c)
|
||||
|
||||
#define gits_read_cbaser(c) readq_relaxed(c)
|
||||
#define gits_write_cbaser(v, c) writeq_relaxed(v, c)
|
||||
|
||||
#define gits_write_cwriter(v, c) writeq_relaxed(v, c)
|
||||
|
||||
#define gicr_read_propbaser(c) readq_relaxed(c)
|
||||
#define gicr_write_propbaser(v, c) writeq_relaxed(v, c)
|
||||
|
||||
#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
|
||||
#define gicr_read_pendbaser(c) readq_relaxed(c)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_ARCH_GICV3_H */
|
||||
|
|
|
@ -27,6 +27,7 @@ config MICROBLAZE
|
|||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_OPROFILE
|
||||
select IRQ_DOMAIN
|
||||
select XILINX_INTC
|
||||
select MODULES_USE_ELF_RELA
|
||||
select OF
|
||||
select OF_EARLY_FLATTREE
|
||||
|
|
|
@ -16,6 +16,6 @@ struct pt_regs;
|
|||
extern void do_IRQ(struct pt_regs *regs);
|
||||
|
||||
/* should be defined in each interrupt controller driver */
|
||||
extern unsigned int get_irq(void);
|
||||
extern unsigned int xintc_get_irq(void);
|
||||
|
||||
#endif /* _ASM_MICROBLAZE_IRQ_H */
|
||||
|
|
|
@ -15,7 +15,7 @@ endif
|
|||
extra-y := head.o vmlinux.lds
|
||||
|
||||
obj-y += dma.o exceptions.o \
|
||||
hw_exception_handler.o intc.o irq.o \
|
||||
hw_exception_handler.o irq.o \
|
||||
platform.o process.o prom.o ptrace.o \
|
||||
reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
|
||||
|
||||
|
|
|
@ -1,196 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
|
||||
* Copyright (C) 2012-2013 Xilinx, Inc.
|
||||
* Copyright (C) 2007-2009 PetaLogix
|
||||
* Copyright (C) 2006 Atmark Techno, Inc.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
static void __iomem *intc_baseaddr;
|
||||
|
||||
/* No one else should require these constants, so define them locally here. */
|
||||
#define ISR 0x00 /* Interrupt Status Register */
|
||||
#define IPR 0x04 /* Interrupt Pending Register */
|
||||
#define IER 0x08 /* Interrupt Enable Register */
|
||||
#define IAR 0x0c /* Interrupt Acknowledge Register */
|
||||
#define SIE 0x10 /* Set Interrupt Enable bits */
|
||||
#define CIE 0x14 /* Clear Interrupt Enable bits */
|
||||
#define IVR 0x18 /* Interrupt Vector Register */
|
||||
#define MER 0x1c /* Master Enable Register */
|
||||
|
||||
#define MER_ME (1<<0)
|
||||
#define MER_HIE (1<<1)
|
||||
|
||||
static unsigned int (*read_fn)(void __iomem *);
|
||||
static void (*write_fn)(u32, void __iomem *);
|
||||
|
||||
static void intc_write32(u32 val, void __iomem *addr)
|
||||
{
|
||||
iowrite32(val, addr);
|
||||
}
|
||||
|
||||
static unsigned int intc_read32(void __iomem *addr)
|
||||
{
|
||||
return ioread32(addr);
|
||||
}
|
||||
|
||||
static void intc_write32_be(u32 val, void __iomem *addr)
|
||||
{
|
||||
iowrite32be(val, addr);
|
||||
}
|
||||
|
||||
static unsigned int intc_read32_be(void __iomem *addr)
|
||||
{
|
||||
return ioread32be(addr);
|
||||
}
|
||||
|
||||
static void intc_enable_or_unmask(struct irq_data *d)
|
||||
{
|
||||
unsigned long mask = 1 << d->hwirq;
|
||||
|
||||
pr_debug("enable_or_unmask: %ld\n", d->hwirq);
|
||||
|
||||
/* ack level irqs because they can't be acked during
|
||||
* ack function since the handle_level_irq function
|
||||
* acks the irq before calling the interrupt handler
|
||||
*/
|
||||
if (irqd_is_level_type(d))
|
||||
write_fn(mask, intc_baseaddr + IAR);
|
||||
|
||||
write_fn(mask, intc_baseaddr + SIE);
|
||||
}
|
||||
|
||||
static void intc_disable_or_mask(struct irq_data *d)
|
||||
{
|
||||
pr_debug("disable: %ld\n", d->hwirq);
|
||||
write_fn(1 << d->hwirq, intc_baseaddr + CIE);
|
||||
}
|
||||
|
||||
static void intc_ack(struct irq_data *d)
|
||||
{
|
||||
pr_debug("ack: %ld\n", d->hwirq);
|
||||
write_fn(1 << d->hwirq, intc_baseaddr + IAR);
|
||||
}
|
||||
|
||||
static void intc_mask_ack(struct irq_data *d)
|
||||
{
|
||||
unsigned long mask = 1 << d->hwirq;
|
||||
|
||||
pr_debug("disable_and_ack: %ld\n", d->hwirq);
|
||||
write_fn(mask, intc_baseaddr + CIE);
|
||||
write_fn(mask, intc_baseaddr + IAR);
|
||||
}
|
||||
|
||||
static struct irq_chip intc_dev = {
|
||||
.name = "Xilinx INTC",
|
||||
.irq_unmask = intc_enable_or_unmask,
|
||||
.irq_mask = intc_disable_or_mask,
|
||||
.irq_ack = intc_ack,
|
||||
.irq_mask_ack = intc_mask_ack,
|
||||
};
|
||||
|
||||
static struct irq_domain *root_domain;
|
||||
|
||||
unsigned int get_irq(void)
|
||||
{
|
||||
unsigned int hwirq, irq = -1;
|
||||
|
||||
hwirq = read_fn(intc_baseaddr + IVR);
|
||||
if (hwirq != -1U)
|
||||
irq = irq_find_mapping(root_domain, hwirq);
|
||||
|
||||
pr_debug("get_irq: hwirq=%d, irq=%d\n", hwirq, irq);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
|
||||
{
|
||||
u32 intr_mask = (u32)d->host_data;
|
||||
|
||||
if (intr_mask & (1 << hw)) {
|
||||
irq_set_chip_and_handler_name(irq, &intc_dev,
|
||||
handle_edge_irq, "edge");
|
||||
irq_clear_status_flags(irq, IRQ_LEVEL);
|
||||
} else {
|
||||
irq_set_chip_and_handler_name(irq, &intc_dev,
|
||||
handle_level_irq, "level");
|
||||
irq_set_status_flags(irq, IRQ_LEVEL);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops xintc_irq_domain_ops = {
|
||||
.xlate = irq_domain_xlate_onetwocell,
|
||||
.map = xintc_map,
|
||||
};
|
||||
|
||||
static int __init xilinx_intc_of_init(struct device_node *intc,
|
||||
struct device_node *parent)
|
||||
{
|
||||
u32 nr_irq, intr_mask;
|
||||
int ret;
|
||||
|
||||
intc_baseaddr = of_iomap(intc, 0);
|
||||
BUG_ON(!intc_baseaddr);
|
||||
|
||||
ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (intr_mask >> nr_irq)
|
||||
pr_warn("%s: mismatch in kind-of-intr param\n", __func__);
|
||||
|
||||
pr_info("%s: num_irq=%d, edge=0x%x\n",
|
||||
intc->full_name, nr_irq, intr_mask);
|
||||
|
||||
write_fn = intc_write32;
|
||||
read_fn = intc_read32;
|
||||
|
||||
/*
|
||||
* Disable all external interrupts until they are
|
||||
* explicity requested.
|
||||
*/
|
||||
write_fn(0, intc_baseaddr + IER);
|
||||
|
||||
/* Acknowledge any pending interrupts just in case. */
|
||||
write_fn(0xffffffff, intc_baseaddr + IAR);
|
||||
|
||||
/* Turn on the Master Enable. */
|
||||
write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
|
||||
if (!(read_fn(intc_baseaddr + MER) & (MER_HIE | MER_ME))) {
|
||||
write_fn = intc_write32_be;
|
||||
read_fn = intc_read32_be;
|
||||
write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
|
||||
}
|
||||
|
||||
/* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm
|
||||
* lazy and Michal can clean it up to something nicer when he tests
|
||||
* and commits this patch. ~~gcl */
|
||||
root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
|
||||
(void *)intr_mask);
|
||||
|
||||
irq_set_default_host(root_domain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(xilinx_intc, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
|
|
@ -29,12 +29,12 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
|
|||
trace_hardirqs_off();
|
||||
|
||||
irq_enter();
|
||||
irq = get_irq();
|
||||
irq = xintc_get_irq();
|
||||
next_irq:
|
||||
BUG_ON(!irq);
|
||||
generic_handle_irq(irq);
|
||||
|
||||
irq = get_irq();
|
||||
irq = xintc_get_irq();
|
||||
if (irq != -1U) {
|
||||
pr_debug("next irq: %d\n", irq);
|
||||
++concurrent_irq;
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#ifdef __KERNEL__
|
||||
|
||||
extern void __init xilinx_intc_init_tree(void);
|
||||
extern unsigned int xilinx_intc_get_irq(void);
|
||||
extern unsigned int xintc_get_irq(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_XILINX_INTC_H */
|
||||
|
|
|
@ -64,6 +64,7 @@ config XILINX_VIRTEX_GENERIC_BOARD
|
|||
default n
|
||||
select XILINX_VIRTEX_II_PRO
|
||||
select XILINX_VIRTEX_4_FX
|
||||
select XILINX_INTC
|
||||
help
|
||||
This option enables generic support for Xilinx Virtex based boards.
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ define_machine(virtex) {
|
|||
.probe = virtex_probe,
|
||||
.setup_arch = xilinx_pci_init,
|
||||
.init_IRQ = xilinx_intc_init_tree,
|
||||
.get_irq = xilinx_intc_get_irq,
|
||||
.get_irq = xintc_get_irq,
|
||||
.restart = ppc4xx_reset_system,
|
||||
.calibrate_decr = generic_calibrate_decr,
|
||||
};
|
||||
|
|
|
@ -241,6 +241,7 @@ config XILINX_VIRTEX440_GENERIC_BOARD
|
|||
depends on 44x
|
||||
default n
|
||||
select XILINX_VIRTEX_5_FXT
|
||||
select XILINX_INTC
|
||||
help
|
||||
This option enables generic support for Xilinx Virtex based boards
|
||||
that use a 440 based processor in the Virtex 5 FXT FPGA architecture.
|
||||
|
|
|
@ -54,7 +54,7 @@ define_machine(virtex) {
|
|||
.probe = virtex_probe,
|
||||
.setup_arch = xilinx_pci_init,
|
||||
.init_IRQ = xilinx_intc_init_tree,
|
||||
.get_irq = xilinx_intc_get_irq,
|
||||
.get_irq = xintc_get_irq,
|
||||
.calibrate_decr = generic_calibrate_decr,
|
||||
.restart = ppc4xx_reset_system,
|
||||
};
|
||||
|
|
|
@ -29,194 +29,7 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
/*
|
||||
* INTC Registers
|
||||
*/
|
||||
#define XINTC_ISR 0 /* Interrupt Status */
|
||||
#define XINTC_IPR 4 /* Interrupt Pending */
|
||||
#define XINTC_IER 8 /* Interrupt Enable */
|
||||
#define XINTC_IAR 12 /* Interrupt Acknowledge */
|
||||
#define XINTC_SIE 16 /* Set Interrupt Enable bits */
|
||||
#define XINTC_CIE 20 /* Clear Interrupt Enable bits */
|
||||
#define XINTC_IVR 24 /* Interrupt Vector */
|
||||
#define XINTC_MER 28 /* Master Enable */
|
||||
|
||||
static struct irq_domain *master_irqhost;
|
||||
|
||||
#define XILINX_INTC_MAXIRQS (32)
|
||||
|
||||
/* The following table allows the interrupt type, edge or level,
|
||||
* to be cached after being read from the device tree until the interrupt
|
||||
* is mapped
|
||||
*/
|
||||
static int xilinx_intc_typetable[XILINX_INTC_MAXIRQS];
|
||||
|
||||
/* Map the interrupt type from the device tree to the interrupt types
|
||||
* used by the interrupt subsystem
|
||||
*/
|
||||
static unsigned char xilinx_intc_map_senses[] = {
|
||||
IRQ_TYPE_EDGE_RISING,
|
||||
IRQ_TYPE_EDGE_FALLING,
|
||||
IRQ_TYPE_LEVEL_HIGH,
|
||||
IRQ_TYPE_LEVEL_LOW,
|
||||
};
|
||||
|
||||
/*
|
||||
* The interrupt controller is setup such that it doesn't work well with
|
||||
* the level interrupt handler in the kernel because the handler acks the
|
||||
* interrupt before calling the application interrupt handler. To deal with
|
||||
* that, we use 2 different irq chips so that different functions can be
|
||||
* used for level and edge type interrupts.
|
||||
*
|
||||
* IRQ Chip common (across level and edge) operations
|
||||
*/
|
||||
static void xilinx_intc_mask(struct irq_data *d)
|
||||
{
|
||||
int irq = irqd_to_hwirq(d);
|
||||
void * regs = irq_data_get_irq_chip_data(d);
|
||||
pr_debug("mask: %d\n", irq);
|
||||
out_be32(regs + XINTC_CIE, 1 << irq);
|
||||
}
|
||||
|
||||
static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ Chip level operations
|
||||
*/
|
||||
static void xilinx_intc_level_unmask(struct irq_data *d)
|
||||
{
|
||||
int irq = irqd_to_hwirq(d);
|
||||
void * regs = irq_data_get_irq_chip_data(d);
|
||||
pr_debug("unmask: %d\n", irq);
|
||||
out_be32(regs + XINTC_SIE, 1 << irq);
|
||||
|
||||
/* ack level irqs because they can't be acked during
|
||||
* ack function since the handle_level_irq function
|
||||
* acks the irq before calling the inerrupt handler
|
||||
*/
|
||||
out_be32(regs + XINTC_IAR, 1 << irq);
|
||||
}
|
||||
|
||||
static struct irq_chip xilinx_intc_level_irqchip = {
|
||||
.name = "Xilinx Level INTC",
|
||||
.irq_mask = xilinx_intc_mask,
|
||||
.irq_mask_ack = xilinx_intc_mask,
|
||||
.irq_unmask = xilinx_intc_level_unmask,
|
||||
.irq_set_type = xilinx_intc_set_type,
|
||||
};
|
||||
|
||||
/*
|
||||
* IRQ Chip edge operations
|
||||
*/
|
||||
static void xilinx_intc_edge_unmask(struct irq_data *d)
|
||||
{
|
||||
int irq = irqd_to_hwirq(d);
|
||||
void *regs = irq_data_get_irq_chip_data(d);
|
||||
pr_debug("unmask: %d\n", irq);
|
||||
out_be32(regs + XINTC_SIE, 1 << irq);
|
||||
}
|
||||
|
||||
static void xilinx_intc_edge_ack(struct irq_data *d)
|
||||
{
|
||||
int irq = irqd_to_hwirq(d);
|
||||
void * regs = irq_data_get_irq_chip_data(d);
|
||||
pr_debug("ack: %d\n", irq);
|
||||
out_be32(regs + XINTC_IAR, 1 << irq);
|
||||
}
|
||||
|
||||
static struct irq_chip xilinx_intc_edge_irqchip = {
|
||||
.name = "Xilinx Edge INTC",
|
||||
.irq_mask = xilinx_intc_mask,
|
||||
.irq_unmask = xilinx_intc_edge_unmask,
|
||||
.irq_ack = xilinx_intc_edge_ack,
|
||||
.irq_set_type = xilinx_intc_set_type,
|
||||
};
|
||||
|
||||
/*
|
||||
* IRQ Host operations
|
||||
*/
|
||||
|
||||
/**
|
||||
* xilinx_intc_xlate - translate virq# from device tree interrupts property
|
||||
*/
|
||||
static int xilinx_intc_xlate(struct irq_domain *h, struct device_node *ct,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq,
|
||||
unsigned int *out_flags)
|
||||
{
|
||||
if ((intsize < 2) || (intspec[0] >= XILINX_INTC_MAXIRQS))
|
||||
return -EINVAL;
|
||||
|
||||
/* keep a copy of the interrupt type til the interrupt is mapped
|
||||
*/
|
||||
xilinx_intc_typetable[intspec[0]] = xilinx_intc_map_senses[intspec[1]];
|
||||
|
||||
/* Xilinx uses 2 interrupt entries, the 1st being the h/w
|
||||
* interrupt number, the 2nd being the interrupt type, edge or level
|
||||
*/
|
||||
*out_hwirq = intspec[0];
|
||||
*out_flags = xilinx_intc_map_senses[intspec[1]];
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int xilinx_intc_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t irq)
|
||||
{
|
||||
irq_set_chip_data(virq, h->host_data);
|
||||
|
||||
if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH ||
|
||||
xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) {
|
||||
irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip,
|
||||
handle_level_irq);
|
||||
} else {
|
||||
irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip,
|
||||
handle_edge_irq);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops xilinx_intc_ops = {
|
||||
.map = xilinx_intc_map,
|
||||
.xlate = xilinx_intc_xlate,
|
||||
};
|
||||
|
||||
struct irq_domain * __init
|
||||
xilinx_intc_init(struct device_node *np)
|
||||
{
|
||||
struct irq_domain * irq;
|
||||
void * regs;
|
||||
|
||||
/* Find and map the intc registers */
|
||||
regs = of_iomap(np, 0);
|
||||
if (!regs) {
|
||||
pr_err("xilinx_intc: could not map registers\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Setup interrupt controller */
|
||||
out_be32(regs + XINTC_IER, 0); /* disable all irqs */
|
||||
out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */
|
||||
out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */
|
||||
|
||||
/* Allocate and initialize an irq_domain structure. */
|
||||
irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops,
|
||||
regs);
|
||||
if (!irq)
|
||||
panic(__FILE__ ": Cannot allocate IRQ host\n");
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
int xilinx_intc_get_irq(void)
|
||||
{
|
||||
void * regs = master_irqhost->host_data;
|
||||
pr_debug("get_irq:\n");
|
||||
return irq_linear_revmap(master_irqhost, in_be32(regs + XINTC_IVR));
|
||||
}
|
||||
#include <linux/irqchip.h>
|
||||
|
||||
#if defined(CONFIG_PPC_I8259)
|
||||
/*
|
||||
|
@ -265,31 +78,11 @@ static void __init xilinx_i8259_setup_cascade(void)
|
|||
static inline void xilinx_i8259_setup_cascade(void) { return; }
|
||||
#endif /* defined(CONFIG_PPC_I8259) */
|
||||
|
||||
static const struct of_device_id xilinx_intc_match[] __initconst = {
|
||||
{ .compatible = "xlnx,opb-intc-1.00.c", },
|
||||
{ .compatible = "xlnx,xps-intc-1.00.a", },
|
||||
{}
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialize master Xilinx interrupt controller
|
||||
*/
|
||||
void __init xilinx_intc_init_tree(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
|
||||
/* find top level interrupt controller */
|
||||
for_each_matching_node(np, xilinx_intc_match) {
|
||||
if (!of_get_property(np, "interrupts", NULL))
|
||||
break;
|
||||
}
|
||||
BUG_ON(!np);
|
||||
|
||||
master_irqhost = xilinx_intc_init(np);
|
||||
BUG_ON(!master_irqhost);
|
||||
|
||||
irq_set_default_host(master_irqhost);
|
||||
of_node_put(np);
|
||||
|
||||
irqchip_init();
|
||||
xilinx_i8259_setup_cascade();
|
||||
}
|
||||
|
|
|
@ -211,6 +211,10 @@ config XTENSA_MX
|
|||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config XILINX_INTC
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config IRQ_CROSSBAR
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -52,6 +52,7 @@ obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
|
|||
obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
|
||||
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
|
||||
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
|
||||
obj-$(CONFIG_XILINX_INTC) += irq-xilinx-intc.o
|
||||
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
|
||||
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
|
||||
obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
|
||||
|
|
|
@ -215,6 +215,31 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bcm7038_l1_cpu_offline(struct irq_data *d)
|
||||
{
|
||||
struct cpumask *mask = irq_data_get_affinity_mask(d);
|
||||
int cpu = smp_processor_id();
|
||||
cpumask_t new_affinity;
|
||||
|
||||
/* This CPU was not on the affinity mask */
|
||||
if (!cpumask_test_cpu(cpu, mask))
|
||||
return;
|
||||
|
||||
if (cpumask_weight(mask) > 1) {
|
||||
/*
|
||||
* Multiple CPU affinity, remove this CPU from the affinity
|
||||
* mask
|
||||
*/
|
||||
cpumask_copy(&new_affinity, mask);
|
||||
cpumask_clear_cpu(cpu, &new_affinity);
|
||||
} else {
|
||||
/* Only CPU, put on the lowest online CPU */
|
||||
cpumask_clear(&new_affinity);
|
||||
cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
|
||||
}
|
||||
irq_set_affinity_locked(d, &new_affinity, false);
|
||||
}
|
||||
|
||||
static int __init bcm7038_l1_init_one(struct device_node *dn,
|
||||
unsigned int idx,
|
||||
struct bcm7038_l1_chip *intc)
|
||||
|
@ -266,6 +291,7 @@ static struct irq_chip bcm7038_l1_irq_chip = {
|
|||
.irq_mask = bcm7038_l1_mask,
|
||||
.irq_unmask = bcm7038_l1_unmask,
|
||||
.irq_set_affinity = bcm7038_l1_set_affinity,
|
||||
.irq_cpu_offline = bcm7038_l1_cpu_offline,
|
||||
};
|
||||
|
||||
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/exception.h>
|
||||
|
||||
|
@ -196,7 +195,7 @@ typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
|
|||
|
||||
static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
|
||||
{
|
||||
cmd->raw_cmd[0] &= ~0xffUL;
|
||||
cmd->raw_cmd[0] &= ~0xffULL;
|
||||
cmd->raw_cmd[0] |= cmd_nr;
|
||||
}
|
||||
|
||||
|
@ -208,43 +207,43 @@ static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
|
|||
|
||||
static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
|
||||
{
|
||||
cmd->raw_cmd[1] &= ~0xffffffffUL;
|
||||
cmd->raw_cmd[1] &= ~0xffffffffULL;
|
||||
cmd->raw_cmd[1] |= id;
|
||||
}
|
||||
|
||||
static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
|
||||
{
|
||||
cmd->raw_cmd[1] &= 0xffffffffUL;
|
||||
cmd->raw_cmd[1] &= 0xffffffffULL;
|
||||
cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
|
||||
}
|
||||
|
||||
static void its_encode_size(struct its_cmd_block *cmd, u8 size)
|
||||
{
|
||||
cmd->raw_cmd[1] &= ~0x1fUL;
|
||||
cmd->raw_cmd[1] &= ~0x1fULL;
|
||||
cmd->raw_cmd[1] |= size & 0x1f;
|
||||
}
|
||||
|
||||
static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
|
||||
{
|
||||
cmd->raw_cmd[2] &= ~0xffffffffffffUL;
|
||||
cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
|
||||
cmd->raw_cmd[2] &= ~0xffffffffffffULL;
|
||||
cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00ULL;
|
||||
}
|
||||
|
||||
static void its_encode_valid(struct its_cmd_block *cmd, int valid)
|
||||
{
|
||||
cmd->raw_cmd[2] &= ~(1UL << 63);
|
||||
cmd->raw_cmd[2] &= ~(1ULL << 63);
|
||||
cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
|
||||
}
|
||||
|
||||
static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
|
||||
{
|
||||
cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
|
||||
cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
|
||||
cmd->raw_cmd[2] &= ~(0xffffffffULL << 16);
|
||||
cmd->raw_cmd[2] |= (target_addr & (0xffffffffULL << 16));
|
||||
}
|
||||
|
||||
static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
|
||||
{
|
||||
cmd->raw_cmd[2] &= ~0xffffUL;
|
||||
cmd->raw_cmd[2] &= ~0xffffULL;
|
||||
cmd->raw_cmd[2] |= col;
|
||||
}
|
||||
|
||||
|
@ -433,7 +432,7 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
|
|||
* the ITS.
|
||||
*/
|
||||
if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
|
||||
__flush_dcache_area(cmd, sizeof(*cmd));
|
||||
gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
|
||||
else
|
||||
dsb(ishst);
|
||||
}
|
||||
|
@ -602,7 +601,7 @@ static void lpi_set_config(struct irq_data *d, bool enable)
|
|||
* Humpf...
|
||||
*/
|
||||
if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
|
||||
__flush_dcache_area(cfg, sizeof(*cfg));
|
||||
gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
|
||||
else
|
||||
dsb(ishst);
|
||||
its_send_inv(its_dev, id);
|
||||
|
@ -657,8 +656,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
|
|||
its = its_dev->its;
|
||||
addr = its->phys_base + GITS_TRANSLATER;
|
||||
|
||||
msg->address_lo = addr & ((1UL << 32) - 1);
|
||||
msg->address_hi = addr >> 32;
|
||||
msg->address_lo = lower_32_bits(addr);
|
||||
msg->address_hi = upper_32_bits(addr);
|
||||
msg->data = its_get_event_id(d);
|
||||
|
||||
iommu_dma_map_msi_msg(d->irq, msg);
|
||||
|
@ -817,7 +816,7 @@ static int __init its_alloc_lpi_tables(void)
|
|||
LPI_PROPBASE_SZ);
|
||||
|
||||
/* Make sure the GIC will observe the written configuration */
|
||||
__flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
|
||||
gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -836,7 +835,7 @@ static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
|
|||
{
|
||||
u32 idx = baser - its->tables;
|
||||
|
||||
return readq_relaxed(its->base + GITS_BASER + (idx << 3));
|
||||
return gits_read_baser(its->base + GITS_BASER + (idx << 3));
|
||||
}
|
||||
|
||||
static void its_write_baser(struct its_node *its, struct its_baser *baser,
|
||||
|
@ -844,7 +843,7 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser,
|
|||
{
|
||||
u32 idx = baser - its->tables;
|
||||
|
||||
writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
|
||||
gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
|
||||
baser->val = its_read_baser(its, baser);
|
||||
}
|
||||
|
||||
|
@ -910,7 +909,7 @@ retry_baser:
|
|||
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
|
||||
if (!shr) {
|
||||
cache = GITS_BASER_nC;
|
||||
__flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
|
||||
gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
|
||||
}
|
||||
goto retry_baser;
|
||||
}
|
||||
|
@ -935,9 +934,9 @@ retry_baser:
|
|||
}
|
||||
|
||||
if (val != tmp) {
|
||||
pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
|
||||
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
|
||||
&its->phys_base, its_base_type_string[type],
|
||||
(unsigned long) val, (unsigned long) tmp);
|
||||
val, tmp);
|
||||
free_pages((unsigned long)base, order);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@ -948,7 +947,7 @@ retry_baser:
|
|||
tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
|
||||
|
||||
pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
|
||||
&its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
|
||||
&its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
|
||||
its_base_type_string[type],
|
||||
(unsigned long)virt_to_phys(base),
|
||||
indirect ? "indirect" : "flat", (int)esz,
|
||||
|
@ -983,7 +982,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
|
|||
* which is reported by ITS hardware times lvl1 table
|
||||
* entry size.
|
||||
*/
|
||||
ids -= ilog2(psz / esz);
|
||||
ids -= ilog2(psz / (int)esz);
|
||||
esz = GITS_LVL1_ENTRY_SIZE;
|
||||
}
|
||||
}
|
||||
|
@ -998,7 +997,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
|
|||
new_order = max_t(u32, get_order(esz << ids), new_order);
|
||||
if (new_order >= MAX_ORDER) {
|
||||
new_order = MAX_ORDER - 1;
|
||||
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
|
||||
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
|
||||
pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
|
||||
&its->phys_base, its->device_ids, ids);
|
||||
}
|
||||
|
@ -1102,7 +1101,7 @@ static void its_cpu_init_lpis(void)
|
|||
}
|
||||
|
||||
/* Make sure the GIC will observe the zero-ed page */
|
||||
__flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
|
||||
gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
|
||||
|
||||
paddr = page_to_phys(pend_page);
|
||||
pr_info("CPU%d: using LPI pending table @%pa\n",
|
||||
|
@ -1126,8 +1125,8 @@ static void its_cpu_init_lpis(void)
|
|||
GICR_PROPBASER_WaWb |
|
||||
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
|
||||
|
||||
writeq_relaxed(val, rbase + GICR_PROPBASER);
|
||||
tmp = readq_relaxed(rbase + GICR_PROPBASER);
|
||||
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
|
||||
tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
|
||||
|
||||
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
|
||||
if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
|
||||
|
@ -1139,7 +1138,7 @@ static void its_cpu_init_lpis(void)
|
|||
val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
|
||||
GICR_PROPBASER_CACHEABILITY_MASK);
|
||||
val |= GICR_PROPBASER_nC;
|
||||
writeq_relaxed(val, rbase + GICR_PROPBASER);
|
||||
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
|
||||
}
|
||||
pr_info_once("GIC: using cache flushing for LPI property table\n");
|
||||
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
|
||||
|
@ -1150,8 +1149,8 @@ static void its_cpu_init_lpis(void)
|
|||
GICR_PENDBASER_InnerShareable |
|
||||
GICR_PENDBASER_WaWb);
|
||||
|
||||
writeq_relaxed(val, rbase + GICR_PENDBASER);
|
||||
tmp = readq_relaxed(rbase + GICR_PENDBASER);
|
||||
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
|
||||
tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
|
||||
|
||||
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
|
||||
/*
|
||||
|
@ -1161,7 +1160,7 @@ static void its_cpu_init_lpis(void)
|
|||
val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
|
||||
GICR_PENDBASER_CACHEABILITY_MASK);
|
||||
val |= GICR_PENDBASER_nC;
|
||||
writeq_relaxed(val, rbase + GICR_PENDBASER);
|
||||
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
|
||||
}
|
||||
|
||||
/* Enable LPIs */
|
||||
|
@ -1287,13 +1286,13 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
|
|||
|
||||
/* Flush Lvl2 table to PoC if hw doesn't support coherency */
|
||||
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
|
||||
__flush_dcache_area(page_address(page), baser->psz);
|
||||
gic_flush_dcache_to_poc(page_address(page), baser->psz);
|
||||
|
||||
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
|
||||
|
||||
/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
|
||||
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
|
||||
__flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
|
||||
gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
|
||||
|
||||
/* Ensure updated table contents are visible to ITS hardware */
|
||||
dsb(sy);
|
||||
|
@ -1340,7 +1339,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
__flush_dcache_area(itt, sz);
|
||||
gic_flush_dcache_to_poc(itt, sz);
|
||||
|
||||
dev->its = its;
|
||||
dev->itt = itt;
|
||||
|
@ -1717,8 +1716,8 @@ static int __init its_probe_one(struct resource *res,
|
|||
(ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
|
||||
GITS_CBASER_VALID);
|
||||
|
||||
writeq_relaxed(baser, its->base + GITS_CBASER);
|
||||
tmp = readq_relaxed(its->base + GITS_CBASER);
|
||||
gits_write_cbaser(baser, its->base + GITS_CBASER);
|
||||
tmp = gits_read_cbaser(its->base + GITS_CBASER);
|
||||
|
||||
if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
|
||||
if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
|
||||
|
@ -1730,13 +1729,13 @@ static int __init its_probe_one(struct resource *res,
|
|||
baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
|
||||
GITS_CBASER_CACHEABILITY_MASK);
|
||||
baser |= GITS_CBASER_nC;
|
||||
writeq_relaxed(baser, its->base + GITS_CBASER);
|
||||
gits_write_cbaser(baser, its->base + GITS_CBASER);
|
||||
}
|
||||
pr_info("ITS: using cache flushing for cmd queue\n");
|
||||
its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
|
||||
}
|
||||
|
||||
writeq_relaxed(0, its->base + GITS_CWRITER);
|
||||
gits_write_cwriter(0, its->base + GITS_CWRITER);
|
||||
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
|
||||
|
||||
err = its_init_domain(handle, its);
|
||||
|
|
|
@ -0,0 +1,241 @@
|
|||
/*
|
||||
* Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
|
||||
* Copyright (C) 2012-2013 Xilinx, Inc.
|
||||
* Copyright (C) 2007-2009 PetaLogix
|
||||
* Copyright (C) 2006 Atmark Techno, Inc.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
/* No one else should require these constants, so define them locally here. */
|
||||
#define ISR 0x00 /* Interrupt Status Register */
|
||||
#define IPR 0x04 /* Interrupt Pending Register */
|
||||
#define IER 0x08 /* Interrupt Enable Register */
|
||||
#define IAR 0x0c /* Interrupt Acknowledge Register */
|
||||
#define SIE 0x10 /* Set Interrupt Enable bits */
|
||||
#define CIE 0x14 /* Clear Interrupt Enable bits */
|
||||
#define IVR 0x18 /* Interrupt Vector Register */
|
||||
#define MER 0x1c /* Master Enable Register */
|
||||
|
||||
#define MER_ME (1<<0)
|
||||
#define MER_HIE (1<<1)
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
|
||||
|
||||
struct xintc_irq_chip {
|
||||
void __iomem *base;
|
||||
struct irq_domain *root_domain;
|
||||
u32 intr_mask;
|
||||
};
|
||||
|
||||
static struct xintc_irq_chip *xintc_irqc;
|
||||
|
||||
static void xintc_write(int reg, u32 data)
|
||||
{
|
||||
if (static_branch_unlikely(&xintc_is_be))
|
||||
iowrite32be(data, xintc_irqc->base + reg);
|
||||
else
|
||||
iowrite32(data, xintc_irqc->base + reg);
|
||||
}
|
||||
|
||||
static unsigned int xintc_read(int reg)
|
||||
{
|
||||
if (static_branch_unlikely(&xintc_is_be))
|
||||
return ioread32be(xintc_irqc->base + reg);
|
||||
else
|
||||
return ioread32(xintc_irqc->base + reg);
|
||||
}
|
||||
|
||||
static void intc_enable_or_unmask(struct irq_data *d)
|
||||
{
|
||||
unsigned long mask = 1 << d->hwirq;
|
||||
|
||||
pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
|
||||
|
||||
/* ack level irqs because they can't be acked during
|
||||
* ack function since the handle_level_irq function
|
||||
* acks the irq before calling the interrupt handler
|
||||
*/
|
||||
if (irqd_is_level_type(d))
|
||||
xintc_write(IAR, mask);
|
||||
|
||||
xintc_write(SIE, mask);
|
||||
}
|
||||
|
||||
static void intc_disable_or_mask(struct irq_data *d)
|
||||
{
|
||||
pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
|
||||
xintc_write(CIE, 1 << d->hwirq);
|
||||
}
|
||||
|
||||
static void intc_ack(struct irq_data *d)
|
||||
{
|
||||
pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
|
||||
xintc_write(IAR, 1 << d->hwirq);
|
||||
}
|
||||
|
||||
static void intc_mask_ack(struct irq_data *d)
|
||||
{
|
||||
unsigned long mask = 1 << d->hwirq;
|
||||
|
||||
pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
|
||||
xintc_write(CIE, mask);
|
||||
xintc_write(IAR, mask);
|
||||
}
|
||||
|
||||
static struct irq_chip intc_dev = {
|
||||
.name = "Xilinx INTC",
|
||||
.irq_unmask = intc_enable_or_unmask,
|
||||
.irq_mask = intc_disable_or_mask,
|
||||
.irq_ack = intc_ack,
|
||||
.irq_mask_ack = intc_mask_ack,
|
||||
};
|
||||
|
||||
unsigned int xintc_get_irq(void)
|
||||
{
|
||||
unsigned int hwirq, irq = -1;
|
||||
|
||||
hwirq = xintc_read(IVR);
|
||||
if (hwirq != -1U)
|
||||
irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
|
||||
|
||||
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
|
||||
{
|
||||
if (xintc_irqc->intr_mask & (1 << hw)) {
|
||||
irq_set_chip_and_handler_name(irq, &intc_dev,
|
||||
handle_edge_irq, "edge");
|
||||
irq_clear_status_flags(irq, IRQ_LEVEL);
|
||||
} else {
|
||||
irq_set_chip_and_handler_name(irq, &intc_dev,
|
||||
handle_level_irq, "level");
|
||||
irq_set_status_flags(irq, IRQ_LEVEL);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops xintc_irq_domain_ops = {
|
||||
.xlate = irq_domain_xlate_onetwocell,
|
||||
.map = xintc_map,
|
||||
};
|
||||
|
||||
static void xil_intc_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
u32 pending;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
do {
|
||||
pending = xintc_get_irq();
|
||||
if (pending == -1U)
|
||||
break;
|
||||
generic_handle_irq(pending);
|
||||
} while (true);
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int __init xilinx_intc_of_init(struct device_node *intc,
|
||||
struct device_node *parent)
|
||||
{
|
||||
u32 nr_irq;
|
||||
int ret, irq;
|
||||
struct xintc_irq_chip *irqc;
|
||||
|
||||
if (xintc_irqc) {
|
||||
pr_err("irq-xilinx: Multiple instances aren't supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
|
||||
if (!irqc)
|
||||
return -ENOMEM;
|
||||
|
||||
xintc_irqc = irqc;
|
||||
|
||||
irqc->base = of_iomap(intc, 0);
|
||||
BUG_ON(!irqc->base);
|
||||
|
||||
ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
|
||||
if (ret < 0) {
|
||||
pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
|
||||
if (ret < 0) {
|
||||
pr_warn("irq-xilinx: unable to read xlnx,kind-of-intr\n");
|
||||
irqc->intr_mask = 0;
|
||||
}
|
||||
|
||||
if (irqc->intr_mask >> nr_irq)
|
||||
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
|
||||
|
||||
pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n",
|
||||
intc->full_name, nr_irq, irqc->intr_mask);
|
||||
|
||||
|
||||
/*
|
||||
* Disable all external interrupts until they are
|
||||
* explicity requested.
|
||||
*/
|
||||
xintc_write(IER, 0);
|
||||
|
||||
/* Acknowledge any pending interrupts just in case. */
|
||||
xintc_write(IAR, 0xffffffff);
|
||||
|
||||
/* Turn on the Master Enable. */
|
||||
xintc_write(MER, MER_HIE | MER_ME);
|
||||
if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
|
||||
static_branch_enable(&xintc_is_be);
|
||||
xintc_write(MER, MER_HIE | MER_ME);
|
||||
}
|
||||
|
||||
irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
|
||||
&xintc_irq_domain_ops, irqc);
|
||||
if (!irqc->root_domain) {
|
||||
pr_err("irq-xilinx: Unable to create IRQ domain\n");
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
if (parent) {
|
||||
irq = irq_of_parse_and_map(intc, 0);
|
||||
if (irq) {
|
||||
irq_set_chained_handler_and_data(irq,
|
||||
xil_intc_irq_handler,
|
||||
irqc);
|
||||
} else {
|
||||
pr_err("irq-xilinx: interrupts property not in DT\n");
|
||||
ret = -EINVAL;
|
||||
goto err_alloc;
|
||||
}
|
||||
} else {
|
||||
irq_set_default_host(irqc->root_domain);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_alloc:
|
||||
xintc_irqc = NULL;
|
||||
kfree(irqc);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(xilinx_intc_xps, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
|
||||
IRQCHIP_DECLARE(xilinx_intc_opb, "xlnx,opb-intc-1.00.c", xilinx_intc_of_init);
|
|
@ -551,14 +551,14 @@ error_attrs:
|
|||
}
|
||||
|
||||
static struct msi_desc *
|
||||
msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity)
|
||||
msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
|
||||
{
|
||||
struct cpumask *masks = NULL;
|
||||
struct msi_desc *entry;
|
||||
u16 control;
|
||||
|
||||
if (affinity) {
|
||||
masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
|
||||
if (affd) {
|
||||
masks = irq_create_affinity_masks(nvec, affd);
|
||||
if (!masks)
|
||||
pr_err("Unable to allocate affinity masks, ignoring\n");
|
||||
}
|
||||
|
@ -618,7 +618,8 @@ static int msi_verify_entries(struct pci_dev *dev)
|
|||
* an error, and a positive return value indicates the number of interrupts
|
||||
* which could have been allocated.
|
||||
*/
|
||||
static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
|
||||
static int msi_capability_init(struct pci_dev *dev, int nvec,
|
||||
const struct irq_affinity *affd)
|
||||
{
|
||||
struct msi_desc *entry;
|
||||
int ret;
|
||||
|
@ -626,7 +627,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
|
|||
|
||||
pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
|
||||
|
||||
entry = msi_setup_entry(dev, nvec, affinity);
|
||||
entry = msi_setup_entry(dev, nvec, affd);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -690,14 +691,14 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
|
|||
|
||||
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
||||
struct msix_entry *entries, int nvec,
|
||||
bool affinity)
|
||||
const struct irq_affinity *affd)
|
||||
{
|
||||
struct cpumask *curmsk, *masks = NULL;
|
||||
struct msi_desc *entry;
|
||||
int ret, i;
|
||||
|
||||
if (affinity) {
|
||||
masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
|
||||
if (affd) {
|
||||
masks = irq_create_affinity_masks(nvec, affd);
|
||||
if (!masks)
|
||||
pr_err("Unable to allocate affinity masks, ignoring\n");
|
||||
}
|
||||
|
@ -753,14 +754,14 @@ static void msix_program_entries(struct pci_dev *dev,
|
|||
* @dev: pointer to the pci_dev data structure of MSI-X device function
|
||||
* @entries: pointer to an array of struct msix_entry entries
|
||||
* @nvec: number of @entries
|
||||
* @affinity: flag to indicate cpu irq affinity mask should be set
|
||||
* @affd: Optional pointer to enable automatic affinity assignement
|
||||
*
|
||||
* Setup the MSI-X capability structure of device function with a
|
||||
* single MSI-X irq. A return of zero indicates the successful setup of
|
||||
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
|
||||
**/
|
||||
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
|
||||
int nvec, bool affinity)
|
||||
int nvec, const struct irq_affinity *affd)
|
||||
{
|
||||
int ret;
|
||||
u16 control;
|
||||
|
@ -775,7 +776,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
|
|||
if (!base)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = msix_setup_entries(dev, base, entries, nvec, affinity);
|
||||
ret = msix_setup_entries(dev, base, entries, nvec, affd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -956,7 +957,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
|
|||
EXPORT_SYMBOL(pci_msix_vec_count);
|
||||
|
||||
static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
|
||||
int nvec, bool affinity)
|
||||
int nvec, const struct irq_affinity *affd)
|
||||
{
|
||||
int nr_entries;
|
||||
int i, j;
|
||||
|
@ -988,7 +989,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
|
|||
dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return msix_capability_init(dev, entries, nvec, affinity);
|
||||
return msix_capability_init(dev, entries, nvec, affd);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1008,7 +1009,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
|
|||
**/
|
||||
int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
|
||||
{
|
||||
return __pci_enable_msix(dev, entries, nvec, false);
|
||||
return __pci_enable_msix(dev, entries, nvec, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_enable_msix);
|
||||
|
||||
|
@ -1059,9 +1060,8 @@ int pci_msi_enabled(void)
|
|||
EXPORT_SYMBOL(pci_msi_enabled);
|
||||
|
||||
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
|
||||
unsigned int flags)
|
||||
const struct irq_affinity *affd)
|
||||
{
|
||||
bool affinity = flags & PCI_IRQ_AFFINITY;
|
||||
int nvec;
|
||||
int rc;
|
||||
|
||||
|
@ -1090,14 +1090,13 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
|
|||
nvec = maxvec;
|
||||
|
||||
for (;;) {
|
||||
if (affinity) {
|
||||
nvec = irq_calc_affinity_vectors(dev->irq_affinity,
|
||||
nvec);
|
||||
if (affd) {
|
||||
nvec = irq_calc_affinity_vectors(nvec, affd);
|
||||
if (nvec < minvec)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
rc = msi_capability_init(dev, nvec, affinity);
|
||||
rc = msi_capability_init(dev, nvec, affd);
|
||||
if (rc == 0)
|
||||
return nvec;
|
||||
|
||||
|
@ -1124,29 +1123,27 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
|
|||
**/
|
||||
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
|
||||
{
|
||||
return __pci_enable_msi_range(dev, minvec, maxvec, 0);
|
||||
return __pci_enable_msi_range(dev, minvec, maxvec, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_enable_msi_range);
|
||||
|
||||
static int __pci_enable_msix_range(struct pci_dev *dev,
|
||||
struct msix_entry *entries, int minvec, int maxvec,
|
||||
unsigned int flags)
|
||||
struct msix_entry *entries, int minvec,
|
||||
int maxvec, const struct irq_affinity *affd)
|
||||
{
|
||||
bool affinity = flags & PCI_IRQ_AFFINITY;
|
||||
int rc, nvec = maxvec;
|
||||
|
||||
if (maxvec < minvec)
|
||||
return -ERANGE;
|
||||
|
||||
for (;;) {
|
||||
if (affinity) {
|
||||
nvec = irq_calc_affinity_vectors(dev->irq_affinity,
|
||||
nvec);
|
||||
if (affd) {
|
||||
nvec = irq_calc_affinity_vectors(nvec, affd);
|
||||
if (nvec < minvec)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
rc = __pci_enable_msix(dev, entries, nvec, affinity);
|
||||
rc = __pci_enable_msix(dev, entries, nvec, affd);
|
||||
if (rc == 0)
|
||||
return nvec;
|
||||
|
||||
|
@ -1177,16 +1174,17 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
|
|||
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
|
||||
int minvec, int maxvec)
|
||||
{
|
||||
return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
|
||||
return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_enable_msix_range);
|
||||
|
||||
/**
|
||||
* pci_alloc_irq_vectors - allocate multiple IRQs for a device
|
||||
* pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device
|
||||
* @dev: PCI device to operate on
|
||||
* @min_vecs: minimum number of vectors required (must be >= 1)
|
||||
* @max_vecs: maximum (desired) number of vectors
|
||||
* @flags: flags or quirks for the allocation
|
||||
* @affd: optional description of the affinity requirements
|
||||
*
|
||||
* Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
|
||||
* vectors if available, and fall back to a single legacy vector
|
||||
|
@ -1198,20 +1196,30 @@ EXPORT_SYMBOL(pci_enable_msix_range);
|
|||
* To get the Linux IRQ number used for a vector that can be passed to
|
||||
* request_irq() use the pci_irq_vector() helper.
|
||||
*/
|
||||
int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
|
||||
unsigned int max_vecs, unsigned int flags)
|
||||
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
|
||||
unsigned int max_vecs, unsigned int flags,
|
||||
const struct irq_affinity *affd)
|
||||
{
|
||||
static const struct irq_affinity msi_default_affd;
|
||||
int vecs = -ENOSPC;
|
||||
|
||||
if (flags & PCI_IRQ_AFFINITY) {
|
||||
if (!affd)
|
||||
affd = &msi_default_affd;
|
||||
} else {
|
||||
if (WARN_ON(affd))
|
||||
affd = NULL;
|
||||
}
|
||||
|
||||
if (flags & PCI_IRQ_MSIX) {
|
||||
vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
|
||||
flags);
|
||||
affd);
|
||||
if (vecs > 0)
|
||||
return vecs;
|
||||
}
|
||||
|
||||
if (flags & PCI_IRQ_MSI) {
|
||||
vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
|
||||
vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
|
||||
if (vecs > 0)
|
||||
return vecs;
|
||||
}
|
||||
|
@ -1224,7 +1232,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
|
|||
|
||||
return vecs;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_alloc_irq_vectors);
|
||||
EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
|
||||
|
||||
/**
|
||||
* pci_free_irq_vectors - free previously allocated IRQs for a device
|
||||
|
|
|
@ -232,6 +232,18 @@ struct irq_affinity_notify {
|
|||
void (*release)(struct kref *ref);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct irq_affinity - Description for automatic irq affinity assignements
|
||||
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
|
||||
* the MSI(-X) vector space
|
||||
* @post_vectors: Don't apply affinity to @post_vectors at end of
|
||||
* the MSI(-X) vector space
|
||||
*/
|
||||
struct irq_affinity {
|
||||
int pre_vectors;
|
||||
int post_vectors;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
|
||||
extern cpumask_var_t irq_default_affinity;
|
||||
|
@ -278,8 +290,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
|
|||
extern int
|
||||
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
|
||||
|
||||
struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
|
||||
int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
|
||||
struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
|
||||
int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
|
@ -313,13 +325,13 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
|
|||
}
|
||||
|
||||
static inline struct cpumask *
|
||||
irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
|
||||
irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
|
||||
irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
|
||||
{
|
||||
return maxvec;
|
||||
}
|
||||
|
|
|
@ -239,7 +239,7 @@
|
|||
#define GITS_TYPER_PTA (1UL << 19)
|
||||
#define GITS_TYPER_HWCOLLCNT_SHIFT 24
|
||||
|
||||
#define GITS_CBASER_VALID (1UL << 63)
|
||||
#define GITS_CBASER_VALID (1ULL << 63)
|
||||
#define GITS_CBASER_SHAREABILITY_SHIFT (10)
|
||||
#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59)
|
||||
#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53)
|
||||
|
@ -265,7 +265,7 @@
|
|||
|
||||
#define GITS_BASER_NR_REGS 8
|
||||
|
||||
#define GITS_BASER_VALID (1UL << 63)
|
||||
#define GITS_BASER_VALID (1ULL << 63)
|
||||
#define GITS_BASER_INDIRECT (1ULL << 62)
|
||||
|
||||
#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59)
|
||||
|
|
|
@ -244,6 +244,7 @@ struct pci_cap_saved_state {
|
|||
struct pci_cap_saved_data cap;
|
||||
};
|
||||
|
||||
struct irq_affinity;
|
||||
struct pcie_link_state;
|
||||
struct pci_vpd;
|
||||
struct pci_sriov;
|
||||
|
@ -332,7 +333,6 @@ struct pci_dev {
|
|||
* directly, use the values stored here. They might be different!
|
||||
*/
|
||||
unsigned int irq;
|
||||
struct cpumask *irq_affinity;
|
||||
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
|
||||
|
||||
bool match_driver; /* Skip attaching driver */
|
||||
|
@ -1310,8 +1310,10 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
|
|||
return rc;
|
||||
return 0;
|
||||
}
|
||||
int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
|
||||
unsigned int max_vecs, unsigned int flags);
|
||||
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
|
||||
unsigned int max_vecs, unsigned int flags,
|
||||
const struct irq_affinity *affd);
|
||||
|
||||
void pci_free_irq_vectors(struct pci_dev *dev);
|
||||
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
|
||||
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
|
||||
|
@ -1339,14 +1341,17 @@ static inline int pci_enable_msix_range(struct pci_dev *dev,
|
|||
static inline int pci_enable_msix_exact(struct pci_dev *dev,
|
||||
struct msix_entry *entries, int nvec)
|
||||
{ return -ENOSYS; }
|
||||
static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
|
||||
unsigned int min_vecs, unsigned int max_vecs,
|
||||
unsigned int flags)
|
||||
|
||||
static inline int
|
||||
pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
|
||||
unsigned int max_vecs, unsigned int flags,
|
||||
const struct irq_affinity *aff_desc)
|
||||
{
|
||||
if (min_vecs > 1)
|
||||
return -EINVAL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void pci_free_irq_vectors(struct pci_dev *dev)
|
||||
{
|
||||
}
|
||||
|
@ -1364,6 +1369,14 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
|
||||
unsigned int max_vecs, unsigned int flags)
|
||||
{
|
||||
return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
|
||||
NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCIEPORTBUS
|
||||
extern bool pcie_ports_disabled;
|
||||
extern bool pcie_ports_auto;
|
||||
|
|
|
@ -51,16 +51,17 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
|
|||
|
||||
/**
|
||||
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
|
||||
* @affinity: The affinity mask to spread. If NULL cpu_online_mask
|
||||
* is used
|
||||
* @nvecs: The number of vectors
|
||||
* @nvecs: The total number of vectors
|
||||
* @affd: Description of the affinity requirements
|
||||
*
|
||||
* Returns the masks pointer or NULL if allocation failed.
|
||||
*/
|
||||
struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
|
||||
int nvec)
|
||||
struct cpumask *
|
||||
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
{
|
||||
int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0;
|
||||
int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
|
||||
int affv = nvecs - affd->pre_vectors - affd->post_vectors;
|
||||
int last_affv = affv + affd->pre_vectors;
|
||||
nodemask_t nodemsk = NODE_MASK_NONE;
|
||||
struct cpumask *masks;
|
||||
cpumask_var_t nmsk;
|
||||
|
@ -68,46 +69,47 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
|
|||
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
|
||||
return NULL;
|
||||
|
||||
masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL);
|
||||
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
|
||||
if (!masks)
|
||||
goto out;
|
||||
|
||||
/* Fill out vectors at the beginning that don't need affinity */
|
||||
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
|
||||
cpumask_copy(masks + curvec, irq_default_affinity);
|
||||
|
||||
/* Stabilize the cpumasks */
|
||||
get_online_cpus();
|
||||
/* If the supplied affinity mask is NULL, use cpu online mask */
|
||||
if (!affinity)
|
||||
affinity = cpu_online_mask;
|
||||
|
||||
nodes = get_nodes_in_cpumask(affinity, &nodemsk);
|
||||
nodes = get_nodes_in_cpumask(cpu_online_mask, &nodemsk);
|
||||
|
||||
/*
|
||||
* If the number of nodes in the mask is less than or equal the
|
||||
* number of vectors we just spread the vectors across the nodes.
|
||||
*/
|
||||
if (nvec <= nodes) {
|
||||
if (affv <= nodes) {
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
cpumask_copy(masks + curvec, cpumask_of_node(n));
|
||||
if (++curvec == nvec)
|
||||
if (++curvec == last_affv)
|
||||
break;
|
||||
}
|
||||
goto outonl;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Spread the vectors per node */
|
||||
vecs_per_node = nvec / nodes;
|
||||
vecs_per_node = affv / nodes;
|
||||
/* Account for rounding errors */
|
||||
extra_vecs = nvec - (nodes * vecs_per_node);
|
||||
extra_vecs = affv - (nodes * vecs_per_node);
|
||||
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
int ncpus, v, vecs_to_assign = vecs_per_node;
|
||||
|
||||
/* Get the cpus on this node which are in the mask */
|
||||
cpumask_and(nmsk, affinity, cpumask_of_node(n));
|
||||
cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
|
||||
|
||||
/* Calculate the number of cpus per vector */
|
||||
ncpus = cpumask_weight(nmsk);
|
||||
|
||||
for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) {
|
||||
for (v = 0; curvec < last_affv && v < vecs_to_assign;
|
||||
curvec++, v++) {
|
||||
cpus_per_vec = ncpus / vecs_to_assign;
|
||||
|
||||
/* Account for extra vectors to compensate rounding errors */
|
||||
|
@ -119,36 +121,36 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
|
|||
irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
|
||||
}
|
||||
|
||||
if (curvec >= nvec)
|
||||
if (curvec >= last_affv)
|
||||
break;
|
||||
}
|
||||
|
||||
outonl:
|
||||
done:
|
||||
put_online_cpus();
|
||||
|
||||
/* Fill out vectors at the end that don't need affinity */
|
||||
for (; curvec < nvecs; curvec++)
|
||||
cpumask_copy(masks + curvec, irq_default_affinity);
|
||||
out:
|
||||
free_cpumask_var(nmsk);
|
||||
return masks;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_calc_affinity_vectors - Calculate to optimal number of vectors for a given affinity mask
|
||||
* @affinity: The affinity mask to spread. If NULL cpu_online_mask
|
||||
* is used
|
||||
* @maxvec: The maximum number of vectors available
|
||||
* irq_calc_affinity_vectors - Calculate the optimal number of vectors
|
||||
* @maxvec: The maximum number of vectors available
|
||||
* @affd: Description of the affinity requirements
|
||||
*/
|
||||
int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
|
||||
int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
|
||||
{
|
||||
int cpus, ret;
|
||||
int resv = affd->pre_vectors + affd->post_vectors;
|
||||
int vecs = maxvec - resv;
|
||||
int cpus;
|
||||
|
||||
/* Stabilize the cpumasks */
|
||||
get_online_cpus();
|
||||
/* If the supplied affinity mask is NULL, use cpu online mask */
|
||||
if (!affinity)
|
||||
affinity = cpu_online_mask;
|
||||
|
||||
cpus = cpumask_weight(affinity);
|
||||
ret = (cpus < maxvec) ? cpus : maxvec;
|
||||
|
||||
cpus = cpumask_weight(cpu_online_mask);
|
||||
put_online_cpus();
|
||||
return ret;
|
||||
|
||||
return min(cpus, vecs) + resv;
|
||||
}
|
||||
|
|
|
@ -14,9 +14,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/msi.h>
|
||||
|
||||
/* Temparory solution for building, will be removed later */
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/**
|
||||
* alloc_msi_entry - Allocate an initialize msi_entry
|
||||
|
|
Loading…
Reference in New Issue