irqchip/gic-v4.1: Plumb get/set_irqchip_state SGI callbacks
To implement the get/set_irqchip_state callbacks (limited to the PENDING state), we have to use a particular set of hacks: - Reading the pending state is done by using a pair of new redistributor registers (GICR_VSGIR, GICR_VSGIPENDR), which allow the 16 interrupts state to be retrieved. - Setting the pending state is done by generating it as we'd otherwise do for a guest (writing to GITS_SGIR). - Clearing the pending state is done by emitting a VSGI command with the "clear" bit set. This requires some interesting locking though: - When talking to the redistributor, we must make sure that the VPE affinity doesn't change, hence taking the VPE lock. - At the same time, we must ensure that nobody accesses the same redistributor's GICR_VSGIR registers for a different VPE, which would corrupt the reading of the pending bits. We thus take the per-RD spinlock. Much fun. Signed-off-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Zenghui Yu <yuzenghui@huawei.com> Link: https://lore.kernel.org/r/20200304203330.4967-12-maz@kernel.org
This commit is contained in:
parent
b4e8d644ec
commit
7017ff0ee1
|
@ -3972,11 +3972,88 @@ static int its_sgi_set_affinity(struct irq_data *d,
|
||||||
return IRQ_SET_MASK_OK;
|
return IRQ_SET_MASK_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int its_sgi_set_irqchip_state(struct irq_data *d,
|
||||||
|
enum irqchip_irq_state which,
|
||||||
|
bool state)
|
||||||
|
{
|
||||||
|
if (which != IRQCHIP_STATE_PENDING)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (state) {
|
||||||
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
|
struct its_node *its = find_4_1_its();
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
|
||||||
|
val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
|
||||||
|
writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
|
||||||
|
} else {
|
||||||
|
its_configure_sgi(d, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int its_sgi_get_irqchip_state(struct irq_data *d,
|
||||||
|
enum irqchip_irq_state which, bool *val)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
|
void __iomem *base;
|
||||||
|
unsigned long flags;
|
||||||
|
u32 count = 1000000; /* 1s! */
|
||||||
|
u32 status;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
if (which != IRQCHIP_STATE_PENDING)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Locking galore! We can race against two different events:
|
||||||
|
*
|
||||||
|
* - Concurent vPE affinity change: we must make sure it cannot
|
||||||
|
* happen, or we'll talk to the wrong redistributor. This is
|
||||||
|
* identical to what happens with vLPIs.
|
||||||
|
*
|
||||||
|
* - Concurrent VSGIPENDR access: As it involves accessing two
|
||||||
|
* MMIO registers, this must be made atomic one way or another.
|
||||||
|
*/
|
||||||
|
cpu = vpe_to_cpuid_lock(vpe, &flags);
|
||||||
|
raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
|
||||||
|
base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
|
||||||
|
writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
|
||||||
|
do {
|
||||||
|
status = readl_relaxed(base + GICR_VSGIPENDR);
|
||||||
|
if (!(status & GICR_VSGIPENDR_BUSY))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
count--;
|
||||||
|
if (!count) {
|
||||||
|
pr_err_ratelimited("Unable to get SGI status\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
cpu_relax();
|
||||||
|
udelay(1);
|
||||||
|
} while (count);
|
||||||
|
|
||||||
|
out:
|
||||||
|
raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
|
||||||
|
vpe_to_cpuid_unlock(vpe, flags);
|
||||||
|
|
||||||
|
if (!count)
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
|
*val = !!(status & (1 << d->hwirq));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct irq_chip its_sgi_irq_chip = {
|
static struct irq_chip its_sgi_irq_chip = {
|
||||||
.name = "GICv4.1-sgi",
|
.name = "GICv4.1-sgi",
|
||||||
.irq_mask = its_sgi_mask_irq,
|
.irq_mask = its_sgi_mask_irq,
|
||||||
.irq_unmask = its_sgi_unmask_irq,
|
.irq_unmask = its_sgi_unmask_irq,
|
||||||
.irq_set_affinity = its_sgi_set_affinity,
|
.irq_set_affinity = its_sgi_set_affinity,
|
||||||
|
.irq_set_irqchip_state = its_sgi_set_irqchip_state,
|
||||||
|
.irq_get_irqchip_state = its_sgi_get_irqchip_state,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
|
static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
|
||||||
|
|
|
@ -345,6 +345,15 @@
|
||||||
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
|
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
|
||||||
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
|
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
|
||||||
|
|
||||||
|
#define GICR_VSGIR 0x0080
|
||||||
|
|
||||||
|
#define GICR_VSGIR_VPEID GENMASK(15, 0)
|
||||||
|
|
||||||
|
#define GICR_VSGIPENDR 0x0088
|
||||||
|
|
||||||
|
#define GICR_VSGIPENDR_BUSY (1U << 31)
|
||||||
|
#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ITS registers, offsets from ITS_base
|
* ITS registers, offsets from ITS_base
|
||||||
*/
|
*/
|
||||||
|
@ -368,6 +377,11 @@
|
||||||
|
|
||||||
#define GITS_TRANSLATER 0x10040
|
#define GITS_TRANSLATER 0x10040
|
||||||
|
|
||||||
|
#define GITS_SGIR 0x20020
|
||||||
|
|
||||||
|
#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
|
||||||
|
#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
|
||||||
|
|
||||||
#define GITS_CTLR_ENABLE (1U << 0)
|
#define GITS_CTLR_ENABLE (1U << 0)
|
||||||
#define GITS_CTLR_ImDe (1U << 1)
|
#define GITS_CTLR_ImDe (1U << 1)
|
||||||
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
|
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
|
||||||
|
|
Loading…
Reference in New Issue