2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2001 MontaVista Software Inc.
|
|
|
|
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001 Ralf Baechle
|
2013-01-22 19:59:30 +08:00
|
|
|
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
|
|
|
|
* Author: Maciej W. Rozycki <macro@mips.com>
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This file define the irq handler for MIPS CPU interrupts.
|
|
|
|
*
|
2013-01-22 19:59:30 +08:00
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
2005-04-17 06:20:36 +08:00
|
|
|
* Free Software Foundation; either version 2 of the License, or (at your
|
|
|
|
* option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Almost all MIPS CPUs define 8 interrupt sources. They are typically
|
|
|
|
* level triggered (i.e., cannot be cleared from CPU; must be cleared from
|
2017-03-31 03:06:11 +08:00
|
|
|
* device).
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2017-03-31 03:06:11 +08:00
|
|
|
* The first two are software interrupts (i.e. not exposed as pins) which
|
|
|
|
* may be used for IPIs in multi-threaded single-core systems.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2017-03-31 03:06:11 +08:00
|
|
|
* The last one is usually the CPU timer interrupt if the counter register
|
|
|
|
* is present, or for old CPUs with an external FPU by convention it's the
|
|
|
|
* FPU exception interrupt.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kernel.h>
|
2010-10-07 21:08:54 +08:00
|
|
|
#include <linux/irq.h>
|
2015-07-08 05:11:46 +08:00
|
|
|
#include <linux/irqchip.h>
|
2013-01-31 20:20:43 +08:00
|
|
|
#include <linux/irqdomain.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/irq_cpu.h>
|
|
|
|
#include <asm/mipsregs.h>
|
2005-08-17 21:44:26 +08:00
|
|
|
#include <asm/mipsmtregs.h>
|
2014-09-19 05:47:10 +08:00
|
|
|
#include <asm/setup.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-03-31 03:06:10 +08:00
|
|
|
static struct irq_domain *irq_domain;
|
2017-03-31 03:06:11 +08:00
|
|
|
static struct irq_domain *ipi_domain;
|
2017-03-31 03:06:10 +08:00
|
|
|
|
2011-03-24 05:09:02 +08:00
|
|
|
static inline void unmask_mips_irq(struct irq_data *d)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-03-31 03:06:10 +08:00
|
|
|
set_c0_status(IE_SW0 << d->hwirq);
|
2005-07-14 02:20:33 +08:00
|
|
|
irq_enable_hazard();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-24 05:09:02 +08:00
|
|
|
static inline void mask_mips_irq(struct irq_data *d)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-03-31 03:06:10 +08:00
|
|
|
clear_c0_status(IE_SW0 << d->hwirq);
|
2005-07-14 02:20:33 +08:00
|
|
|
irq_disable_hazard();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-07-02 21:41:42 +08:00
|
|
|
static struct irq_chip mips_cpu_irq_controller = {
|
2007-01-14 23:07:25 +08:00
|
|
|
.name = "MIPS",
|
2011-03-24 05:09:02 +08:00
|
|
|
.irq_ack = mask_mips_irq,
|
|
|
|
.irq_mask = mask_mips_irq,
|
|
|
|
.irq_mask_ack = mask_mips_irq,
|
|
|
|
.irq_unmask = unmask_mips_irq,
|
|
|
|
.irq_eoi = unmask_mips_irq,
|
2015-01-16 02:05:28 +08:00
|
|
|
.irq_disable = mask_mips_irq,
|
|
|
|
.irq_enable = unmask_mips_irq,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2005-08-17 21:44:26 +08:00
|
|
|
/*
|
|
|
|
* Basically the same as above but taking care of all the MT stuff
|
|
|
|
*/
|
|
|
|
|
2011-03-24 05:09:02 +08:00
|
|
|
static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
|
2005-08-17 21:44:26 +08:00
|
|
|
{
|
|
|
|
unsigned int vpflags = dvpe();
|
|
|
|
|
2017-03-31 03:06:10 +08:00
|
|
|
clear_c0_cause(C_SW0 << d->hwirq);
|
2005-08-17 21:44:26 +08:00
|
|
|
evpe(vpflags);
|
2011-03-24 05:09:02 +08:00
|
|
|
unmask_mips_irq(d);
|
2005-08-17 21:44:26 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* While we ack the interrupt interrupts are disabled and thus we don't need
|
|
|
|
* to deal with concurrency issues. Same for mips_cpu_irq_end.
|
|
|
|
*/
|
2011-03-24 05:09:02 +08:00
|
|
|
static void mips_mt_cpu_irq_ack(struct irq_data *d)
|
2005-08-17 21:44:26 +08:00
|
|
|
{
|
|
|
|
unsigned int vpflags = dvpe();
|
2017-03-31 03:06:10 +08:00
|
|
|
clear_c0_cause(C_SW0 << d->hwirq);
|
2005-08-17 21:44:26 +08:00
|
|
|
evpe(vpflags);
|
2011-03-24 05:09:02 +08:00
|
|
|
mask_mips_irq(d);
|
2005-08-17 21:44:26 +08:00
|
|
|
}
|
|
|
|
|
2017-03-31 03:06:11 +08:00
|
|
|
#ifdef CONFIG_GENERIC_IRQ_IPI
|
|
|
|
|
|
|
|
static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
|
|
|
|
{
|
|
|
|
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
|
|
|
unsigned long flags;
|
|
|
|
int vpflags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
/* We can only send IPIs to VPEs within the local core */
|
2017-08-13 10:49:37 +08:00
|
|
|
WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu));
|
2017-03-31 03:06:11 +08:00
|
|
|
|
|
|
|
vpflags = dvpe();
|
|
|
|
settc(cpu_vpe_id(&cpu_data[cpu]));
|
|
|
|
write_vpe_c0_cause(read_vpe_c0_cause() | (C_SW0 << hwirq));
|
|
|
|
evpe(vpflags);
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_GENERIC_IRQ_IPI */
|
|
|
|
|
2006-07-02 21:41:42 +08:00
|
|
|
static struct irq_chip mips_mt_cpu_irq_controller = {
|
2007-01-14 23:07:25 +08:00
|
|
|
.name = "MIPS",
|
2011-03-24 05:09:02 +08:00
|
|
|
.irq_startup = mips_mt_cpu_irq_startup,
|
|
|
|
.irq_ack = mips_mt_cpu_irq_ack,
|
|
|
|
.irq_mask = mask_mips_irq,
|
|
|
|
.irq_mask_ack = mips_mt_cpu_irq_ack,
|
|
|
|
.irq_unmask = unmask_mips_irq,
|
|
|
|
.irq_eoi = unmask_mips_irq,
|
2015-01-16 02:05:28 +08:00
|
|
|
.irq_disable = mask_mips_irq,
|
|
|
|
.irq_enable = unmask_mips_irq,
|
2017-03-31 03:06:11 +08:00
|
|
|
#ifdef CONFIG_GENERIC_IRQ_IPI
|
|
|
|
.ipi_send_single = mips_mt_send_ipi,
|
|
|
|
#endif
|
2005-08-17 21:44:26 +08:00
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-09-19 05:47:09 +08:00
|
|
|
asmlinkage void __weak plat_irq_dispatch(void)
|
|
|
|
{
|
|
|
|
unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
|
2017-03-31 03:06:10 +08:00
|
|
|
unsigned int virq;
|
2014-09-19 05:47:09 +08:00
|
|
|
int irq;
|
|
|
|
|
|
|
|
if (!pending) {
|
|
|
|
spurious_interrupt();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pending >>= CAUSEB_IP;
|
|
|
|
while (pending) {
|
|
|
|
irq = fls(pending) - 1;
|
2017-03-31 03:06:11 +08:00
|
|
|
if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2)
|
|
|
|
virq = irq_linear_revmap(ipi_domain, irq);
|
|
|
|
else
|
|
|
|
virq = irq_linear_revmap(irq_domain, irq);
|
2017-03-31 03:06:10 +08:00
|
|
|
do_IRQ(virq);
|
2014-09-19 05:47:09 +08:00
|
|
|
pending &= ~BIT(irq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-31 20:20:43 +08:00
|
|
|
static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
|
|
|
|
irq_hw_number_t hw)
|
|
|
|
{
|
2017-07-16 04:07:41 +08:00
|
|
|
struct irq_chip *chip;
|
2013-01-31 20:20:43 +08:00
|
|
|
|
|
|
|
if (hw < 2 && cpu_has_mipsmt) {
|
|
|
|
/* Software interrupts are used for MT/CMT IPI */
|
|
|
|
chip = &mips_mt_cpu_irq_controller;
|
|
|
|
} else {
|
|
|
|
chip = &mips_cpu_irq_controller;
|
|
|
|
}
|
|
|
|
|
2014-09-19 05:47:10 +08:00
|
|
|
if (cpu_has_vint)
|
|
|
|
set_vi_handler(hw, plat_irq_dispatch);
|
|
|
|
|
2013-01-31 20:20:43 +08:00
|
|
|
irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
|
|
|
|
.map = mips_cpu_intc_map,
|
|
|
|
.xlate = irq_domain_xlate_onecell,
|
|
|
|
};
|
|
|
|
|
2017-03-31 03:06:11 +08:00
|
|
|
#ifdef CONFIG_GENERIC_IRQ_IPI
|
|
|
|
|
|
|
|
struct cpu_ipi_domain_state {
|
|
|
|
DECLARE_BITMAP(allocated, 2);
|
|
|
|
};
|
|
|
|
|
|
|
|
static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
|
|
|
|
unsigned int nr_irqs, void *arg)
|
|
|
|
{
|
|
|
|
struct cpu_ipi_domain_state *state = domain->host_data;
|
|
|
|
unsigned int i, hwirq;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_irqs; i++) {
|
|
|
|
hwirq = find_first_zero_bit(state->allocated, 2);
|
|
|
|
if (hwirq == 2)
|
|
|
|
return -EBUSY;
|
|
|
|
bitmap_set(state->allocated, hwirq, 1);
|
|
|
|
|
|
|
|
ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq,
|
|
|
|
&mips_mt_cpu_irq_controller,
|
|
|
|
NULL);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mips_cpu_ipi_match(struct irq_domain *d, struct device_node *node,
|
|
|
|
enum irq_domain_bus_token bus_token)
|
|
|
|
{
|
|
|
|
bool is_ipi;
|
|
|
|
|
|
|
|
switch (bus_token) {
|
|
|
|
case DOMAIN_BUS_IPI:
|
|
|
|
is_ipi = d->bus_token == bus_token;
|
|
|
|
return (!node || (to_of_node(d->fwnode) == node)) && is_ipi;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct irq_domain_ops mips_cpu_ipi_chip_ops = {
|
|
|
|
.alloc = mips_cpu_ipi_alloc,
|
|
|
|
.match = mips_cpu_ipi_match,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void mips_cpu_register_ipi_domain(struct device_node *of_node)
|
|
|
|
{
|
|
|
|
struct cpu_ipi_domain_state *ipi_domain_state;
|
|
|
|
|
|
|
|
ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
|
|
|
|
ipi_domain = irq_domain_add_hierarchy(irq_domain,
|
|
|
|
IRQ_DOMAIN_FLAG_IPI_SINGLE,
|
|
|
|
2, of_node,
|
|
|
|
&mips_cpu_ipi_chip_ops,
|
|
|
|
ipi_domain_state);
|
|
|
|
if (!ipi_domain)
|
|
|
|
panic("Failed to add MIPS CPU IPI domain");
|
2017-06-22 18:42:50 +08:00
|
|
|
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
|
2017-03-31 03:06:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#else /* !CONFIG_GENERIC_IRQ_IPI */
|
|
|
|
|
|
|
|
static inline void mips_cpu_register_ipi_domain(struct device_node *of_node) {}
|
|
|
|
|
|
|
|
#endif /* !CONFIG_GENERIC_IRQ_IPI */
|
|
|
|
|
2014-09-19 05:47:07 +08:00
|
|
|
static void __init __mips_cpu_irq_init(struct device_node *of_node)
|
2013-01-31 20:20:43 +08:00
|
|
|
{
|
|
|
|
/* Mask interrupts. */
|
|
|
|
clear_c0_status(ST0_IM);
|
|
|
|
clear_c0_cause(CAUSEF_IP);
|
|
|
|
|
2017-03-31 03:06:10 +08:00
|
|
|
irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
|
|
|
|
&mips_cpu_intc_irq_domain_ops,
|
|
|
|
NULL);
|
|
|
|
if (!irq_domain)
|
2013-09-18 22:05:26 +08:00
|
|
|
panic("Failed to add irqdomain for MIPS CPU");
|
2017-03-31 03:06:11 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only proceed to register the software interrupt IPI implementation
|
|
|
|
* for CPUs which implement the MIPS MT (multi-threading) ASE.
|
|
|
|
*/
|
|
|
|
if (cpu_has_mipsmt)
|
|
|
|
mips_cpu_register_ipi_domain(of_node);
|
2014-09-19 05:47:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init mips_cpu_irq_init(void)
|
|
|
|
{
|
|
|
|
__mips_cpu_irq_init(NULL);
|
|
|
|
}
|
2013-01-31 20:20:43 +08:00
|
|
|
|
2014-09-19 05:47:08 +08:00
|
|
|
int __init mips_cpu_irq_of_init(struct device_node *of_node,
|
|
|
|
struct device_node *parent)
|
2014-09-19 05:47:07 +08:00
|
|
|
{
|
|
|
|
__mips_cpu_irq_init(of_node);
|
2013-01-31 20:20:43 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2015-05-24 23:11:16 +08:00
|
|
|
IRQCHIP_DECLARE(cpu_intc, "mti,cpu-interrupt-controller", mips_cpu_irq_of_init);
|