Merge branch 'kvm-arm64/gic-v4.1' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
cc98702c17
|
@ -39,6 +39,7 @@
|
||||||
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
||||||
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
||||||
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
||||||
|
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
|
||||||
|
|
||||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,8 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
|
||||||
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
|
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.hcr_el2 &= ~HCR_TWE;
|
vcpu->arch.hcr_el2 &= ~HCR_TWE;
|
||||||
if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count))
|
if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
|
||||||
|
vcpu->kvm->arch.vgic.nassgireq)
|
||||||
vcpu->arch.hcr_el2 &= ~HCR_TWI;
|
vcpu->arch.hcr_el2 &= ~HCR_TWI;
|
||||||
else
|
else
|
||||||
vcpu->arch.hcr_el2 |= HCR_TWI;
|
vcpu->arch.hcr_el2 |= HCR_TWI;
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
||||||
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
||||||
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
||||||
|
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
|
||||||
|
|
||||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||||
|
|
||||||
|
|
|
@ -96,6 +96,7 @@ struct its_node {
|
||||||
struct mutex dev_alloc_lock;
|
struct mutex dev_alloc_lock;
|
||||||
struct list_head entry;
|
struct list_head entry;
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
|
void __iomem *sgir_base;
|
||||||
phys_addr_t phys_base;
|
phys_addr_t phys_base;
|
||||||
struct its_cmd_block *cmd_base;
|
struct its_cmd_block *cmd_base;
|
||||||
struct its_cmd_block *cmd_write;
|
struct its_cmd_block *cmd_write;
|
||||||
|
@ -188,6 +189,15 @@ static DEFINE_IDA(its_vpeid_ida);
|
||||||
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
|
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
|
||||||
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
|
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
|
||||||
|
* always have vSGIs mapped.
|
||||||
|
*/
|
||||||
|
static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
|
||||||
|
{
|
||||||
|
return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
|
||||||
|
}
|
||||||
|
|
||||||
static u16 get_its_list(struct its_vm *vm)
|
static u16 get_its_list(struct its_vm *vm)
|
||||||
{
|
{
|
||||||
struct its_node *its;
|
struct its_node *its;
|
||||||
|
@ -197,7 +207,7 @@ static u16 get_its_list(struct its_vm *vm)
|
||||||
if (!is_v4(its))
|
if (!is_v4(its))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (vm->vlpi_count[its->list_nr])
|
if (require_its_list_vmovp(vm, its))
|
||||||
__set_bit(its->list_nr, &its_list);
|
__set_bit(its->list_nr, &its_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -239,15 +249,41 @@ static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int irq_to_cpuid(struct irq_data *d)
|
static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
|
||||||
|
{
|
||||||
|
raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
|
||||||
|
return vpe->col_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
|
||||||
|
{
|
||||||
|
raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
|
||||||
|
{
|
||||||
|
struct its_vlpi_map *map = get_vlpi_map(d);
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
if (map) {
|
||||||
|
cpu = vpe_to_cpuid_lock(map->vpe, flags);
|
||||||
|
} else {
|
||||||
|
/* Physical LPIs are already locked via the irq_desc lock */
|
||||||
|
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
||||||
|
cpu = its_dev->event_map.col_map[its_get_event_id(d)];
|
||||||
|
/* Keep GCC quiet... */
|
||||||
|
*flags = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return cpu;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
|
||||||
{
|
{
|
||||||
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
|
||||||
struct its_vlpi_map *map = get_vlpi_map(d);
|
struct its_vlpi_map *map = get_vlpi_map(d);
|
||||||
|
|
||||||
if (map)
|
if (map)
|
||||||
return map->vpe->col_idx;
|
vpe_to_cpuid_unlock(map->vpe, flags);
|
||||||
|
|
||||||
return its_dev->event_map.col_map[its_get_event_id(d)];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct its_collection *valid_col(struct its_collection *col)
|
static struct its_collection *valid_col(struct its_collection *col)
|
||||||
|
@ -353,6 +389,15 @@ struct its_cmd_desc {
|
||||||
struct {
|
struct {
|
||||||
struct its_vpe *vpe;
|
struct its_vpe *vpe;
|
||||||
} its_invdb_cmd;
|
} its_invdb_cmd;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
struct its_vpe *vpe;
|
||||||
|
u8 sgi;
|
||||||
|
u8 priority;
|
||||||
|
bool enable;
|
||||||
|
bool group;
|
||||||
|
bool clear;
|
||||||
|
} its_vsgi_cmd;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -501,6 +546,31 @@ static void its_encode_db(struct its_cmd_block *cmd, bool db)
|
||||||
its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
|
its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
|
||||||
|
{
|
||||||
|
its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
|
||||||
|
{
|
||||||
|
its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
|
||||||
|
{
|
||||||
|
its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
|
||||||
|
{
|
||||||
|
its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
|
||||||
|
{
|
||||||
|
its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
|
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
|
||||||
{
|
{
|
||||||
/* Let's fixup BE commands */
|
/* Let's fixup BE commands */
|
||||||
|
@ -866,6 +936,26 @@ static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
|
||||||
return valid_vpe(its, desc->its_invdb_cmd.vpe);
|
return valid_vpe(its, desc->its_invdb_cmd.vpe);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
|
||||||
|
struct its_cmd_block *cmd,
|
||||||
|
struct its_cmd_desc *desc)
|
||||||
|
{
|
||||||
|
if (WARN_ON(!is_v4_1(its)))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
its_encode_cmd(cmd, GITS_CMD_VSGI);
|
||||||
|
its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
|
||||||
|
its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
|
||||||
|
its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
|
||||||
|
its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
|
||||||
|
its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
|
||||||
|
its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
|
||||||
|
|
||||||
|
its_fixup_cmd(cmd);
|
||||||
|
|
||||||
|
return valid_vpe(its, desc->its_vsgi_cmd.vpe);
|
||||||
|
}
|
||||||
|
|
||||||
static u64 its_cmd_ptr_to_offset(struct its_node *its,
|
static u64 its_cmd_ptr_to_offset(struct its_node *its,
|
||||||
struct its_cmd_block *ptr)
|
struct its_cmd_block *ptr)
|
||||||
{
|
{
|
||||||
|
@ -1214,7 +1304,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
|
||||||
if (!is_v4(its))
|
if (!is_v4(its))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!vpe->its_vm->vlpi_count[its->list_nr])
|
if (!require_its_list_vmovp(vpe->its_vm, its))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
desc.its_vmovp_cmd.col = &its->collections[col_id];
|
desc.its_vmovp_cmd.col = &its->collections[col_id];
|
||||||
|
@ -1329,7 +1419,9 @@ static void direct_lpi_inv(struct irq_data *d)
|
||||||
{
|
{
|
||||||
struct its_vlpi_map *map = get_vlpi_map(d);
|
struct its_vlpi_map *map = get_vlpi_map(d);
|
||||||
void __iomem *rdbase;
|
void __iomem *rdbase;
|
||||||
|
unsigned long flags;
|
||||||
u64 val;
|
u64 val;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
if (map) {
|
if (map) {
|
||||||
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
||||||
|
@ -1344,10 +1436,14 @@ static void direct_lpi_inv(struct irq_data *d)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Target the redistributor this LPI is currently routed to */
|
/* Target the redistributor this LPI is currently routed to */
|
||||||
rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base;
|
cpu = irq_to_cpuid_lock(d, &flags);
|
||||||
|
raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
|
||||||
|
rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
|
||||||
gic_write_lpir(val, rdbase + GICR_INVLPIR);
|
gic_write_lpir(val, rdbase + GICR_INVLPIR);
|
||||||
|
|
||||||
wait_for_syncr(rdbase);
|
wait_for_syncr(rdbase);
|
||||||
|
raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
|
||||||
|
irq_to_cpuid_unlock(d, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
|
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
|
||||||
|
@ -1499,12 +1595,31 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Two favourable cases:
|
||||||
|
*
|
||||||
|
* (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
|
||||||
|
* for vSGI delivery
|
||||||
|
*
|
||||||
|
* (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
|
||||||
|
* and we're better off mapping all VPEs always
|
||||||
|
*
|
||||||
|
* If neither (a) nor (b) is true, then we map vPEs on demand.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static bool gic_requires_eager_mapping(void)
|
||||||
|
{
|
||||||
|
if (!its_list_map || gic_rdists->has_rvpeid)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void its_map_vm(struct its_node *its, struct its_vm *vm)
|
static void its_map_vm(struct its_node *its, struct its_vm *vm)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Not using the ITS list? Everything is always mapped. */
|
if (gic_requires_eager_mapping())
|
||||||
if (!its_list_map)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&vmovp_lock, flags);
|
raw_spin_lock_irqsave(&vmovp_lock, flags);
|
||||||
|
@ -1538,7 +1653,7 @@ static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Not using the ITS list? Everything is always mapped. */
|
/* Not using the ITS list? Everything is always mapped. */
|
||||||
if (!its_list_map)
|
if (gic_requires_eager_mapping())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&vmovp_lock, flags);
|
raw_spin_lock_irqsave(&vmovp_lock, flags);
|
||||||
|
@ -2452,6 +2567,10 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
|
||||||
if (!gic_rdists->has_rvpeid)
|
if (!gic_rdists->has_rvpeid)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
/* Skip non-present CPUs */
|
||||||
|
if (!base)
|
||||||
|
return true;
|
||||||
|
|
||||||
val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
|
val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
|
||||||
|
|
||||||
esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
|
esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
|
||||||
|
@ -3482,17 +3601,25 @@ static int its_vpe_set_affinity(struct irq_data *d,
|
||||||
{
|
{
|
||||||
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
int from, cpu = cpumask_first(mask_val);
|
int from, cpu = cpumask_first(mask_val);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Changing affinity is mega expensive, so let's be as lazy as
|
* Changing affinity is mega expensive, so let's be as lazy as
|
||||||
* we can and only do it if we really have to. Also, if mapped
|
* we can and only do it if we really have to. Also, if mapped
|
||||||
* into the proxy device, we need to move the doorbell
|
* into the proxy device, we need to move the doorbell
|
||||||
* interrupt to its new location.
|
* interrupt to its new location.
|
||||||
|
*
|
||||||
|
* Another thing is that changing the affinity of a vPE affects
|
||||||
|
* *other interrupts* such as all the vLPIs that are routed to
|
||||||
|
* this vPE. This means that the irq_desc lock is not enough to
|
||||||
|
* protect us, and that we must ensure nobody samples vpe->col_idx
|
||||||
|
* during the update, hence the lock below which must also be
|
||||||
|
* taken on any vLPI handling path that evaluates vpe->col_idx.
|
||||||
*/
|
*/
|
||||||
if (vpe->col_idx == cpu)
|
from = vpe_to_cpuid_lock(vpe, &flags);
|
||||||
|
if (from == cpu)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
from = vpe->col_idx;
|
|
||||||
vpe->col_idx = cpu;
|
vpe->col_idx = cpu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3508,6 +3635,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||||
|
vpe_to_cpuid_unlock(vpe, flags);
|
||||||
|
|
||||||
return IRQ_SET_MASK_OK_DONE;
|
return IRQ_SET_MASK_OK_DONE;
|
||||||
}
|
}
|
||||||
|
@ -3619,9 +3747,11 @@ static void its_vpe_send_inv(struct irq_data *d)
|
||||||
void __iomem *rdbase;
|
void __iomem *rdbase;
|
||||||
|
|
||||||
/* Target the redistributor this VPE is currently known on */
|
/* Target the redistributor this VPE is currently known on */
|
||||||
|
raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
|
||||||
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
|
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
|
||||||
gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
|
gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
|
||||||
wait_for_syncr(rdbase);
|
wait_for_syncr(rdbase);
|
||||||
|
raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
|
||||||
} else {
|
} else {
|
||||||
its_vpe_send_cmd(vpe, its_send_inv);
|
its_vpe_send_cmd(vpe, its_send_inv);
|
||||||
}
|
}
|
||||||
|
@ -3782,8 +3912,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe)
|
||||||
val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
|
val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
|
||||||
|
|
||||||
/* Target the redistributor this vPE is currently known on */
|
/* Target the redistributor this vPE is currently known on */
|
||||||
|
raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
|
||||||
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
|
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
|
||||||
gic_write_lpir(val, rdbase + GICR_INVALLR);
|
gic_write_lpir(val, rdbase + GICR_INVALLR);
|
||||||
|
|
||||||
|
wait_for_syncr(rdbase);
|
||||||
|
raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
|
static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
|
||||||
|
@ -3818,6 +3952,221 @@ static struct irq_chip its_vpe_4_1_irq_chip = {
|
||||||
.irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
|
.irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void its_configure_sgi(struct irq_data *d, bool clear)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
|
struct its_cmd_desc desc;
|
||||||
|
|
||||||
|
desc.its_vsgi_cmd.vpe = vpe;
|
||||||
|
desc.its_vsgi_cmd.sgi = d->hwirq;
|
||||||
|
desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
|
||||||
|
desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
|
||||||
|
desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
|
||||||
|
desc.its_vsgi_cmd.clear = clear;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* GICv4.1 allows us to send VSGI commands to any ITS as long as the
|
||||||
|
* destination VPE is mapped there. Since we map them eagerly at
|
||||||
|
* activation time, we're pretty sure the first GICv4.1 ITS will do.
|
||||||
|
*/
|
||||||
|
its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void its_sgi_mask_irq(struct irq_data *d)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
|
|
||||||
|
vpe->sgi_config[d->hwirq].enabled = false;
|
||||||
|
its_configure_sgi(d, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void its_sgi_unmask_irq(struct irq_data *d)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
|
|
||||||
|
vpe->sgi_config[d->hwirq].enabled = true;
|
||||||
|
its_configure_sgi(d, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int its_sgi_set_affinity(struct irq_data *d,
|
||||||
|
const struct cpumask *mask_val,
|
||||||
|
bool force)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* There is no notion of affinity for virtual SGIs, at least
|
||||||
|
* not on the host (since they can only be targetting a vPE).
|
||||||
|
* Tell the kernel we've done whatever it asked for.
|
||||||
|
*/
|
||||||
|
return IRQ_SET_MASK_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int its_sgi_set_irqchip_state(struct irq_data *d,
|
||||||
|
enum irqchip_irq_state which,
|
||||||
|
bool state)
|
||||||
|
{
|
||||||
|
if (which != IRQCHIP_STATE_PENDING)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (state) {
|
||||||
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
|
struct its_node *its = find_4_1_its();
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
|
||||||
|
val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
|
||||||
|
writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
|
||||||
|
} else {
|
||||||
|
its_configure_sgi(d, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int its_sgi_get_irqchip_state(struct irq_data *d,
|
||||||
|
enum irqchip_irq_state which, bool *val)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
|
void __iomem *base;
|
||||||
|
unsigned long flags;
|
||||||
|
u32 count = 1000000; /* 1s! */
|
||||||
|
u32 status;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
if (which != IRQCHIP_STATE_PENDING)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Locking galore! We can race against two different events:
|
||||||
|
*
|
||||||
|
* - Concurent vPE affinity change: we must make sure it cannot
|
||||||
|
* happen, or we'll talk to the wrong redistributor. This is
|
||||||
|
* identical to what happens with vLPIs.
|
||||||
|
*
|
||||||
|
* - Concurrent VSGIPENDR access: As it involves accessing two
|
||||||
|
* MMIO registers, this must be made atomic one way or another.
|
||||||
|
*/
|
||||||
|
cpu = vpe_to_cpuid_lock(vpe, &flags);
|
||||||
|
raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
|
||||||
|
base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
|
||||||
|
writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
|
||||||
|
do {
|
||||||
|
status = readl_relaxed(base + GICR_VSGIPENDR);
|
||||||
|
if (!(status & GICR_VSGIPENDR_BUSY))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
count--;
|
||||||
|
if (!count) {
|
||||||
|
pr_err_ratelimited("Unable to get SGI status\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
cpu_relax();
|
||||||
|
udelay(1);
|
||||||
|
} while (count);
|
||||||
|
|
||||||
|
out:
|
||||||
|
raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
|
||||||
|
vpe_to_cpuid_unlock(vpe, flags);
|
||||||
|
|
||||||
|
if (!count)
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
|
*val = !!(status & (1 << d->hwirq));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
|
struct its_cmd_info *info = vcpu_info;
|
||||||
|
|
||||||
|
switch (info->cmd_type) {
|
||||||
|
case PROP_UPDATE_VSGI:
|
||||||
|
vpe->sgi_config[d->hwirq].priority = info->priority;
|
||||||
|
vpe->sgi_config[d->hwirq].group = info->group;
|
||||||
|
its_configure_sgi(d, false);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct irq_chip its_sgi_irq_chip = {
|
||||||
|
.name = "GICv4.1-sgi",
|
||||||
|
.irq_mask = its_sgi_mask_irq,
|
||||||
|
.irq_unmask = its_sgi_unmask_irq,
|
||||||
|
.irq_set_affinity = its_sgi_set_affinity,
|
||||||
|
.irq_set_irqchip_state = its_sgi_set_irqchip_state,
|
||||||
|
.irq_get_irqchip_state = its_sgi_get_irqchip_state,
|
||||||
|
.irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
|
||||||
|
unsigned int virq, unsigned int nr_irqs,
|
||||||
|
void *args)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = args;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* Yes, we do want 16 SGIs */
|
||||||
|
WARN_ON(nr_irqs != 16);
|
||||||
|
|
||||||
|
for (i = 0; i < 16; i++) {
|
||||||
|
vpe->sgi_config[i].priority = 0;
|
||||||
|
vpe->sgi_config[i].enabled = false;
|
||||||
|
vpe->sgi_config[i].group = false;
|
||||||
|
|
||||||
|
irq_domain_set_hwirq_and_chip(domain, virq + i, i,
|
||||||
|
&its_sgi_irq_chip, vpe);
|
||||||
|
irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void its_sgi_irq_domain_free(struct irq_domain *domain,
|
||||||
|
unsigned int virq,
|
||||||
|
unsigned int nr_irqs)
|
||||||
|
{
|
||||||
|
/* Nothing to do */
|
||||||
|
}
|
||||||
|
|
||||||
|
static int its_sgi_irq_domain_activate(struct irq_domain *domain,
|
||||||
|
struct irq_data *d, bool reserve)
|
||||||
|
{
|
||||||
|
/* Write out the initial SGI configuration */
|
||||||
|
its_configure_sgi(d, false);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
|
||||||
|
struct irq_data *d)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The VSGI command is awkward:
|
||||||
|
*
|
||||||
|
* - To change the configuration, CLEAR must be set to false,
|
||||||
|
* leaving the pending bit unchanged.
|
||||||
|
* - To clear the pending bit, CLEAR must be set to true, leaving
|
||||||
|
* the configuration unchanged.
|
||||||
|
*
|
||||||
|
* You just can't do both at once, hence the two commands below.
|
||||||
|
*/
|
||||||
|
vpe->sgi_config[d->hwirq].enabled = false;
|
||||||
|
its_configure_sgi(d, false);
|
||||||
|
its_configure_sgi(d, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct irq_domain_ops its_sgi_domain_ops = {
|
||||||
|
.alloc = its_sgi_irq_domain_alloc,
|
||||||
|
.free = its_sgi_irq_domain_free,
|
||||||
|
.activate = its_sgi_irq_domain_activate,
|
||||||
|
.deactivate = its_sgi_irq_domain_deactivate,
|
||||||
|
};
|
||||||
|
|
||||||
static int its_vpe_id_alloc(void)
|
static int its_vpe_id_alloc(void)
|
||||||
{
|
{
|
||||||
return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
|
return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
|
||||||
|
@ -3851,6 +4200,7 @@ static int its_vpe_init(struct its_vpe *vpe)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
raw_spin_lock_init(&vpe->vpe_lock);
|
||||||
vpe->vpe_id = vpe_id;
|
vpe->vpe_id = vpe_id;
|
||||||
vpe->vpt_page = vpt_page;
|
vpe->vpt_page = vpt_page;
|
||||||
if (gic_rdists->has_rvpeid)
|
if (gic_rdists->has_rvpeid)
|
||||||
|
@ -3960,8 +4310,12 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
|
||||||
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||||
struct its_node *its;
|
struct its_node *its;
|
||||||
|
|
||||||
/* If we use the list map, we issue VMAPP on demand... */
|
/*
|
||||||
if (its_list_map)
|
* If we use the list map, we issue VMAPP on demand... Unless
|
||||||
|
* we're on a GICv4.1 and we eagerly map the VPE on all ITSs
|
||||||
|
* so that VSGIs can work.
|
||||||
|
*/
|
||||||
|
if (!gic_requires_eager_mapping())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Map the VPE to the first possible CPU */
|
/* Map the VPE to the first possible CPU */
|
||||||
|
@ -3987,10 +4341,10 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
|
||||||
struct its_node *its;
|
struct its_node *its;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we use the list map, we unmap the VPE once no VLPIs are
|
* If we use the list map on GICv4.0, we unmap the VPE once no
|
||||||
* associated with the VM.
|
* VLPIs are associated with the VM.
|
||||||
*/
|
*/
|
||||||
if (its_list_map)
|
if (!gic_requires_eager_mapping())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry(its, &its_nodes, entry) {
|
list_for_each_entry(its, &its_nodes, entry) {
|
||||||
|
@ -4404,7 +4758,7 @@ static int __init its_probe_one(struct resource *res,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
its_base = ioremap(res->start, resource_size(res));
|
its_base = ioremap(res->start, SZ_64K);
|
||||||
if (!its_base) {
|
if (!its_base) {
|
||||||
pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
|
pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -4455,6 +4809,13 @@ static int __init its_probe_one(struct resource *res,
|
||||||
|
|
||||||
if (is_v4_1(its)) {
|
if (is_v4_1(its)) {
|
||||||
u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
|
u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
|
||||||
|
|
||||||
|
its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
|
||||||
|
if (!its->sgir_base) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_free_its;
|
||||||
|
}
|
||||||
|
|
||||||
its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
|
its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
|
||||||
|
|
||||||
pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
|
pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
|
||||||
|
@ -4468,7 +4829,7 @@ static int __init its_probe_one(struct resource *res,
|
||||||
get_order(ITS_CMD_QUEUE_SZ));
|
get_order(ITS_CMD_QUEUE_SZ));
|
||||||
if (!page) {
|
if (!page) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out_free_its;
|
goto out_unmap_sgir;
|
||||||
}
|
}
|
||||||
its->cmd_base = (void *)page_address(page);
|
its->cmd_base = (void *)page_address(page);
|
||||||
its->cmd_write = its->cmd_base;
|
its->cmd_write = its->cmd_base;
|
||||||
|
@ -4535,6 +4896,9 @@ out_free_tables:
|
||||||
its_free_tables(its);
|
its_free_tables(its);
|
||||||
out_free_cmd:
|
out_free_cmd:
|
||||||
free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
|
free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
|
||||||
|
out_unmap_sgir:
|
||||||
|
if (its->sgir_base)
|
||||||
|
iounmap(its->sgir_base);
|
||||||
out_free_its:
|
out_free_its:
|
||||||
kfree(its);
|
kfree(its);
|
||||||
out_unmap:
|
out_unmap:
|
||||||
|
@ -4818,6 +5182,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
|
||||||
struct device_node *of_node;
|
struct device_node *of_node;
|
||||||
struct its_node *its;
|
struct its_node *its;
|
||||||
bool has_v4 = false;
|
bool has_v4 = false;
|
||||||
|
bool has_v4_1 = false;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
gic_rdists = rdists;
|
gic_rdists = rdists;
|
||||||
|
@ -4838,12 +5203,25 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
list_for_each_entry(its, &its_nodes, entry)
|
list_for_each_entry(its, &its_nodes, entry) {
|
||||||
has_v4 |= is_v4(its);
|
has_v4 |= is_v4(its);
|
||||||
|
has_v4_1 |= is_v4_1(its);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Don't bother with inconsistent systems */
|
||||||
|
if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
|
||||||
|
rdists->has_rvpeid = false;
|
||||||
|
|
||||||
if (has_v4 & rdists->has_vlpis) {
|
if (has_v4 & rdists->has_vlpis) {
|
||||||
|
const struct irq_domain_ops *sgi_ops;
|
||||||
|
|
||||||
|
if (has_v4_1)
|
||||||
|
sgi_ops = &its_sgi_domain_ops;
|
||||||
|
else
|
||||||
|
sgi_ops = NULL;
|
||||||
|
|
||||||
if (its_init_vpe_domain() ||
|
if (its_init_vpe_domain() ||
|
||||||
its_init_v4(parent_domain, &its_vpe_domain_ops)) {
|
its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
|
||||||
rdists->has_vlpis = false;
|
rdists->has_vlpis = false;
|
||||||
pr_err("ITS: Disabling GICv4 support\n");
|
pr_err("ITS: Disabling GICv4 support\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -723,6 +723,7 @@ static void __init gic_dist_init(void)
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
u64 affinity;
|
u64 affinity;
|
||||||
void __iomem *base = gic_data.dist_base;
|
void __iomem *base = gic_data.dist_base;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
/* Disable the distributor */
|
/* Disable the distributor */
|
||||||
writel_relaxed(0, base + GICD_CTLR);
|
writel_relaxed(0, base + GICD_CTLR);
|
||||||
|
@ -755,9 +756,14 @@ static void __init gic_dist_init(void)
|
||||||
/* Now do the common stuff, and wait for the distributor to drain */
|
/* Now do the common stuff, and wait for the distributor to drain */
|
||||||
gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
|
gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
|
||||||
|
|
||||||
|
val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
|
||||||
|
if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
|
||||||
|
pr_info("Enabling SGIs without active state\n");
|
||||||
|
val |= GICD_CTLR_nASSGIreq;
|
||||||
|
}
|
||||||
|
|
||||||
/* Enable distributor with ARE, Group1 */
|
/* Enable distributor with ARE, Group1 */
|
||||||
writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
|
writel_relaxed(val, base + GICD_CTLR);
|
||||||
base + GICD_CTLR);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set all global interrupts to the boot CPU only. ARE must be
|
* Set all global interrupts to the boot CPU only. ARE must be
|
||||||
|
@ -828,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
|
||||||
typer = gic_read_typer(ptr + GICR_TYPER);
|
typer = gic_read_typer(ptr + GICR_TYPER);
|
||||||
if ((typer >> 32) == aff) {
|
if ((typer >> 32) == aff) {
|
||||||
u64 offset = ptr - region->redist_base;
|
u64 offset = ptr - region->redist_base;
|
||||||
|
raw_spin_lock_init(&gic_data_rdist()->rd_lock);
|
||||||
gic_data_rdist_rd_base() = ptr;
|
gic_data_rdist_rd_base() = ptr;
|
||||||
gic_data_rdist()->phys_base = region->phys_base + offset;
|
gic_data_rdist()->phys_base = region->phys_base + offset;
|
||||||
|
|
||||||
|
@ -1757,6 +1764,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
|
||||||
gic_v3_kvm_info.vcpu = r;
|
gic_v3_kvm_info.vcpu = r;
|
||||||
|
|
||||||
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
|
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
|
||||||
|
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
|
||||||
gic_set_kvm_info(&gic_v3_kvm_info);
|
gic_set_kvm_info(&gic_v3_kvm_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2072,6 +2080,7 @@ static void __init gic_acpi_setup_kvm_info(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
|
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
|
||||||
|
gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
|
||||||
gic_set_kvm_info(&gic_v3_kvm_info);
|
gic_set_kvm_info(&gic_v3_kvm_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,6 +85,53 @@
|
||||||
|
|
||||||
static struct irq_domain *gic_domain;
|
static struct irq_domain *gic_domain;
|
||||||
static const struct irq_domain_ops *vpe_domain_ops;
|
static const struct irq_domain_ops *vpe_domain_ops;
|
||||||
|
static const struct irq_domain_ops *sgi_domain_ops;
|
||||||
|
|
||||||
|
static bool has_v4_1(void)
|
||||||
|
{
|
||||||
|
return !!sgi_domain_ops;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
|
||||||
|
{
|
||||||
|
char *name;
|
||||||
|
int sgi_base;
|
||||||
|
|
||||||
|
if (!has_v4_1())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
|
||||||
|
if (!name)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
|
||||||
|
if (!vpe->fwnode)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
kfree(name);
|
||||||
|
name = NULL;
|
||||||
|
|
||||||
|
vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
|
||||||
|
sgi_domain_ops, vpe);
|
||||||
|
if (!vpe->sgi_domain)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
|
||||||
|
NUMA_NO_NODE, vpe,
|
||||||
|
false, NULL);
|
||||||
|
if (sgi_base <= 0)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
if (vpe->sgi_domain)
|
||||||
|
irq_domain_remove(vpe->sgi_domain);
|
||||||
|
if (vpe->fwnode)
|
||||||
|
irq_domain_free_fwnode(vpe->fwnode);
|
||||||
|
kfree(name);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
int its_alloc_vcpu_irqs(struct its_vm *vm)
|
int its_alloc_vcpu_irqs(struct its_vm *vm)
|
||||||
{
|
{
|
||||||
|
@ -112,8 +159,13 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
|
||||||
if (vpe_base_irq <= 0)
|
if (vpe_base_irq <= 0)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
for (i = 0; i < vm->nr_vpes; i++)
|
for (i = 0; i < vm->nr_vpes; i++) {
|
||||||
|
int ret;
|
||||||
vm->vpes[i]->irq = vpe_base_irq + i;
|
vm->vpes[i]->irq = vpe_base_irq + i;
|
||||||
|
ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -126,8 +178,28 @@ err:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void its_free_sgi_irqs(struct its_vm *vm)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!has_v4_1())
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < vm->nr_vpes; i++) {
|
||||||
|
unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
|
||||||
|
|
||||||
|
if (WARN_ON(!irq))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
irq_domain_free_irqs(irq, 16);
|
||||||
|
irq_domain_remove(vm->vpes[i]->sgi_domain);
|
||||||
|
irq_domain_free_fwnode(vm->vpes[i]->fwnode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void its_free_vcpu_irqs(struct its_vm *vm)
|
void its_free_vcpu_irqs(struct its_vm *vm)
|
||||||
{
|
{
|
||||||
|
its_free_sgi_irqs(vm);
|
||||||
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
|
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
|
||||||
irq_domain_remove(vm->domain);
|
irq_domain_remove(vm->domain);
|
||||||
irq_domain_free_fwnode(vm->fwnode);
|
irq_domain_free_fwnode(vm->fwnode);
|
||||||
|
@ -138,18 +210,50 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
|
||||||
return irq_set_vcpu_affinity(vpe->irq, info);
|
return irq_set_vcpu_affinity(vpe->irq, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
int its_schedule_vpe(struct its_vpe *vpe, bool on)
|
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
|
||||||
{
|
{
|
||||||
struct its_cmd_info info;
|
struct irq_desc *desc = irq_to_desc(vpe->irq);
|
||||||
|
struct its_cmd_info info = { };
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
WARN_ON(preemptible());
|
WARN_ON(preemptible());
|
||||||
|
|
||||||
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
|
info.cmd_type = DESCHEDULE_VPE;
|
||||||
|
if (has_v4_1()) {
|
||||||
|
/* GICv4.1 can directly deal with doorbells */
|
||||||
|
info.req_db = db;
|
||||||
|
} else {
|
||||||
|
/* Undo the nested disable_irq() calls... */
|
||||||
|
while (db && irqd_irq_disabled(&desc->irq_data))
|
||||||
|
enable_irq(vpe->irq);
|
||||||
|
}
|
||||||
|
|
||||||
ret = its_send_vpe_cmd(vpe, &info);
|
ret = its_send_vpe_cmd(vpe, &info);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
vpe->resident = on;
|
vpe->resident = false;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
|
||||||
|
{
|
||||||
|
struct its_cmd_info info = { };
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
WARN_ON(preemptible());
|
||||||
|
|
||||||
|
info.cmd_type = SCHEDULE_VPE;
|
||||||
|
if (has_v4_1()) {
|
||||||
|
info.g0en = g0en;
|
||||||
|
info.g1en = g1en;
|
||||||
|
} else {
|
||||||
|
/* Disabled the doorbell, as we're about to enter the guest */
|
||||||
|
disable_irq_nosync(vpe->irq);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = its_send_vpe_cmd(vpe, &info);
|
||||||
|
if (!ret)
|
||||||
|
vpe->resident = true;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -216,12 +320,28 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
|
||||||
return irq_set_vcpu_affinity(irq, &info);
|
return irq_set_vcpu_affinity(irq, &info);
|
||||||
}
|
}
|
||||||
|
|
||||||
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
|
int its_prop_update_vsgi(int irq, u8 priority, bool group)
|
||||||
|
{
|
||||||
|
struct its_cmd_info info = {
|
||||||
|
.cmd_type = PROP_UPDATE_VSGI,
|
||||||
|
{
|
||||||
|
.priority = priority,
|
||||||
|
.group = group,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
return irq_set_vcpu_affinity(irq, &info);
|
||||||
|
}
|
||||||
|
|
||||||
|
int its_init_v4(struct irq_domain *domain,
|
||||||
|
const struct irq_domain_ops *vpe_ops,
|
||||||
|
const struct irq_domain_ops *sgi_ops)
|
||||||
{
|
{
|
||||||
if (domain) {
|
if (domain) {
|
||||||
pr_info("ITS: Enabling GICv4 support\n");
|
pr_info("ITS: Enabling GICv4 support\n");
|
||||||
gic_domain = domain;
|
gic_domain = domain;
|
||||||
vpe_domain_ops = ops;
|
vpe_domain_ops = vpe_ops;
|
||||||
|
sgi_domain_ops = sgi_ops;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -70,6 +70,7 @@ struct vgic_global {
|
||||||
|
|
||||||
/* Hardware has GICv4? */
|
/* Hardware has GICv4? */
|
||||||
bool has_gicv4;
|
bool has_gicv4;
|
||||||
|
bool has_gicv4_1;
|
||||||
|
|
||||||
/* GIC system register CPU interface */
|
/* GIC system register CPU interface */
|
||||||
struct static_key_false gicv3_cpuif;
|
struct static_key_false gicv3_cpuif;
|
||||||
|
@ -230,6 +231,9 @@ struct vgic_dist {
|
||||||
/* distributor enabled */
|
/* distributor enabled */
|
||||||
bool enabled;
|
bool enabled;
|
||||||
|
|
||||||
|
/* Wants SGIs without active state */
|
||||||
|
bool nassgireq;
|
||||||
|
|
||||||
struct vgic_irq *spis;
|
struct vgic_irq *spis;
|
||||||
|
|
||||||
struct vgic_io_device dist_iodev;
|
struct vgic_io_device dist_iodev;
|
||||||
|
|
|
@ -32,6 +32,8 @@ struct gic_kvm_info {
|
||||||
struct resource vctrl;
|
struct resource vctrl;
|
||||||
/* vlpi support */
|
/* vlpi support */
|
||||||
bool has_v4;
|
bool has_v4;
|
||||||
|
/* rvpeid support */
|
||||||
|
bool has_v4_1;
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct gic_kvm_info *gic_get_kvm_info(void);
|
const struct gic_kvm_info *gic_get_kvm_info(void);
|
||||||
|
|
|
@ -57,6 +57,7 @@
|
||||||
#define GICD_SPENDSGIR 0x0F20
|
#define GICD_SPENDSGIR 0x0F20
|
||||||
|
|
||||||
#define GICD_CTLR_RWP (1U << 31)
|
#define GICD_CTLR_RWP (1U << 31)
|
||||||
|
#define GICD_CTLR_nASSGIreq (1U << 8)
|
||||||
#define GICD_CTLR_DS (1U << 6)
|
#define GICD_CTLR_DS (1U << 6)
|
||||||
#define GICD_CTLR_ARE_NS (1U << 4)
|
#define GICD_CTLR_ARE_NS (1U << 4)
|
||||||
#define GICD_CTLR_ENABLE_G1A (1U << 1)
|
#define GICD_CTLR_ENABLE_G1A (1U << 1)
|
||||||
|
@ -90,6 +91,7 @@
|
||||||
#define GICD_TYPER_ESPIS(typer) \
|
#define GICD_TYPER_ESPIS(typer) \
|
||||||
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
|
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
|
||||||
|
|
||||||
|
#define GICD_TYPER2_nASSGIcap (1U << 8)
|
||||||
#define GICD_TYPER2_VIL (1U << 7)
|
#define GICD_TYPER2_VIL (1U << 7)
|
||||||
#define GICD_TYPER2_VID GENMASK(4, 0)
|
#define GICD_TYPER2_VID GENMASK(4, 0)
|
||||||
|
|
||||||
|
@ -343,6 +345,15 @@
|
||||||
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
|
#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
|
||||||
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
|
#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
|
||||||
|
|
||||||
|
#define GICR_VSGIR 0x0080
|
||||||
|
|
||||||
|
#define GICR_VSGIR_VPEID GENMASK(15, 0)
|
||||||
|
|
||||||
|
#define GICR_VSGIPENDR 0x0088
|
||||||
|
|
||||||
|
#define GICR_VSGIPENDR_BUSY (1U << 31)
|
||||||
|
#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ITS registers, offsets from ITS_base
|
* ITS registers, offsets from ITS_base
|
||||||
*/
|
*/
|
||||||
|
@ -366,6 +377,11 @@
|
||||||
|
|
||||||
#define GITS_TRANSLATER 0x10040
|
#define GITS_TRANSLATER 0x10040
|
||||||
|
|
||||||
|
#define GITS_SGIR 0x20020
|
||||||
|
|
||||||
|
#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
|
||||||
|
#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
|
||||||
|
|
||||||
#define GITS_CTLR_ENABLE (1U << 0)
|
#define GITS_CTLR_ENABLE (1U << 0)
|
||||||
#define GITS_CTLR_ImDe (1U << 1)
|
#define GITS_CTLR_ImDe (1U << 1)
|
||||||
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
|
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
|
||||||
|
@ -500,8 +516,9 @@
|
||||||
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
|
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
|
||||||
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
|
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
|
||||||
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
|
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
|
||||||
/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */
|
/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
|
||||||
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
|
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
|
||||||
|
#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
|
||||||
#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
|
#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -650,6 +667,7 @@
|
||||||
|
|
||||||
struct rdists {
|
struct rdists {
|
||||||
struct {
|
struct {
|
||||||
|
raw_spinlock_t rd_lock;
|
||||||
void __iomem *rd_base;
|
void __iomem *rd_base;
|
||||||
struct page *pend_page;
|
struct page *pend_page;
|
||||||
phys_addr_t phys_base;
|
phys_addr_t phys_base;
|
||||||
|
|
|
@ -49,10 +49,22 @@ struct its_vpe {
|
||||||
};
|
};
|
||||||
/* GICv4.1 implementations */
|
/* GICv4.1 implementations */
|
||||||
struct {
|
struct {
|
||||||
|
struct fwnode_handle *fwnode;
|
||||||
|
struct irq_domain *sgi_domain;
|
||||||
|
struct {
|
||||||
|
u8 priority;
|
||||||
|
bool enabled;
|
||||||
|
bool group;
|
||||||
|
} sgi_config[16];
|
||||||
atomic_t vmapp_count;
|
atomic_t vmapp_count;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensures mutual exclusion between affinity setting of the
|
||||||
|
* vPE and vLPI operations using vpe->col_idx.
|
||||||
|
*/
|
||||||
|
raw_spinlock_t vpe_lock;
|
||||||
/*
|
/*
|
||||||
* This collection ID is used to indirect the target
|
* This collection ID is used to indirect the target
|
||||||
* redistributor for this VPE. The ID itself isn't involved in
|
* redistributor for this VPE. The ID itself isn't involved in
|
||||||
|
@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type {
|
||||||
SCHEDULE_VPE,
|
SCHEDULE_VPE,
|
||||||
DESCHEDULE_VPE,
|
DESCHEDULE_VPE,
|
||||||
INVALL_VPE,
|
INVALL_VPE,
|
||||||
|
PROP_UPDATE_VSGI,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct its_cmd_info {
|
struct its_cmd_info {
|
||||||
|
@ -105,19 +118,27 @@ struct its_cmd_info {
|
||||||
bool g0en;
|
bool g0en;
|
||||||
bool g1en;
|
bool g1en;
|
||||||
};
|
};
|
||||||
|
struct {
|
||||||
|
u8 priority;
|
||||||
|
bool group;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
int its_alloc_vcpu_irqs(struct its_vm *vm);
|
int its_alloc_vcpu_irqs(struct its_vm *vm);
|
||||||
void its_free_vcpu_irqs(struct its_vm *vm);
|
void its_free_vcpu_irqs(struct its_vm *vm);
|
||||||
int its_schedule_vpe(struct its_vpe *vpe, bool on);
|
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
|
||||||
|
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
|
||||||
int its_invall_vpe(struct its_vpe *vpe);
|
int its_invall_vpe(struct its_vpe *vpe);
|
||||||
int its_map_vlpi(int irq, struct its_vlpi_map *map);
|
int its_map_vlpi(int irq, struct its_vlpi_map *map);
|
||||||
int its_get_vlpi(int irq, struct its_vlpi_map *map);
|
int its_get_vlpi(int irq, struct its_vlpi_map *map);
|
||||||
int its_unmap_vlpi(int irq);
|
int its_unmap_vlpi(int irq);
|
||||||
int its_prop_update_vlpi(int irq, u8 config, bool inv);
|
int its_prop_update_vlpi(int irq, u8 config, bool inv);
|
||||||
|
int its_prop_update_vsgi(int irq, u8 priority, bool group);
|
||||||
|
|
||||||
struct irq_domain_ops;
|
struct irq_domain_ops;
|
||||||
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
|
int its_init_v4(struct irq_domain *domain,
|
||||||
|
const struct irq_domain_ops *vpe_ops,
|
||||||
|
const struct irq_domain_ops *sgi_ops);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -625,6 +625,14 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
|
if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
|
||||||
kvm_update_stolen_time(vcpu);
|
kvm_update_stolen_time(vcpu);
|
||||||
|
|
||||||
|
if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
|
||||||
|
/* The distributor enable bits were changed */
|
||||||
|
preempt_disable();
|
||||||
|
vgic_v4_put(vcpu, false);
|
||||||
|
vgic_v4_load(vcpu);
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -178,6 +178,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
|
||||||
struct kvm_vcpu *vcpu)
|
struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
char *type;
|
char *type;
|
||||||
|
bool pending;
|
||||||
|
|
||||||
if (irq->intid < VGIC_NR_SGIS)
|
if (irq->intid < VGIC_NR_SGIS)
|
||||||
type = "SGI";
|
type = "SGI";
|
||||||
else if (irq->intid < VGIC_NR_PRIVATE_IRQS)
|
else if (irq->intid < VGIC_NR_PRIVATE_IRQS)
|
||||||
|
@ -190,6 +192,16 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
|
||||||
if (irq->intid ==0 || irq->intid == VGIC_NR_PRIVATE_IRQS)
|
if (irq->intid ==0 || irq->intid == VGIC_NR_PRIVATE_IRQS)
|
||||||
print_header(s, irq, vcpu);
|
print_header(s, irq, vcpu);
|
||||||
|
|
||||||
|
pending = irq->pending_latch;
|
||||||
|
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = irq_get_irqchip_state(irq->host_irq,
|
||||||
|
IRQCHIP_STATE_PENDING,
|
||||||
|
&pending);
|
||||||
|
WARN_ON_ONCE(err);
|
||||||
|
}
|
||||||
|
|
||||||
seq_printf(s, " %s %4d "
|
seq_printf(s, " %s %4d "
|
||||||
" %2d "
|
" %2d "
|
||||||
"%d%d%d%d%d%d%d "
|
"%d%d%d%d%d%d%d "
|
||||||
|
@ -201,7 +213,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
|
||||||
"\n",
|
"\n",
|
||||||
type, irq->intid,
|
type, irq->intid,
|
||||||
(irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1,
|
(irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1,
|
||||||
irq->pending_latch,
|
pending,
|
||||||
irq->line_level,
|
irq->line_level,
|
||||||
irq->active,
|
irq->active,
|
||||||
irq->enabled,
|
irq->enabled,
|
||||||
|
|
|
@ -3,9 +3,11 @@
|
||||||
* VGICv3 MMIO handling functions
|
* VGICv3 MMIO handling functions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/bitfield.h>
|
||||||
#include <linux/irqchip/arm-gic-v3.h>
|
#include <linux/irqchip/arm-gic-v3.h>
|
||||||
#include <linux/kvm.h>
|
#include <linux/kvm.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
#include <kvm/iodev.h>
|
#include <kvm/iodev.h>
|
||||||
#include <kvm/arm_vgic.h>
|
#include <kvm/arm_vgic.h>
|
||||||
|
|
||||||
|
@ -69,6 +71,8 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
|
||||||
if (vgic->enabled)
|
if (vgic->enabled)
|
||||||
value |= GICD_CTLR_ENABLE_SS_G1;
|
value |= GICD_CTLR_ENABLE_SS_G1;
|
||||||
value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
|
value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
|
||||||
|
if (vgic->nassgireq)
|
||||||
|
value |= GICD_CTLR_nASSGIreq;
|
||||||
break;
|
break;
|
||||||
case GICD_TYPER:
|
case GICD_TYPER:
|
||||||
value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
|
value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
|
||||||
|
@ -80,6 +84,10 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
|
||||||
value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
|
value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case GICD_TYPER2:
|
||||||
|
if (kvm_vgic_global_state.has_gicv4_1)
|
||||||
|
value = GICD_TYPER2_nASSGIcap;
|
||||||
|
break;
|
||||||
case GICD_IIDR:
|
case GICD_IIDR:
|
||||||
value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
|
value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
|
||||||
(vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
|
(vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
|
||||||
|
@ -97,17 +105,46 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
|
||||||
unsigned long val)
|
unsigned long val)
|
||||||
{
|
{
|
||||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||||
bool was_enabled = dist->enabled;
|
|
||||||
|
|
||||||
switch (addr & 0x0c) {
|
switch (addr & 0x0c) {
|
||||||
case GICD_CTLR:
|
case GICD_CTLR: {
|
||||||
|
bool was_enabled, is_hwsgi;
|
||||||
|
|
||||||
|
mutex_lock(&vcpu->kvm->lock);
|
||||||
|
|
||||||
|
was_enabled = dist->enabled;
|
||||||
|
is_hwsgi = dist->nassgireq;
|
||||||
|
|
||||||
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
|
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
|
||||||
|
|
||||||
if (!was_enabled && dist->enabled)
|
/* Not a GICv4.1? No HW SGIs */
|
||||||
|
if (!kvm_vgic_global_state.has_gicv4_1)
|
||||||
|
val &= ~GICD_CTLR_nASSGIreq;
|
||||||
|
|
||||||
|
/* Dist stays enabled? nASSGIreq is RO */
|
||||||
|
if (was_enabled && dist->enabled) {
|
||||||
|
val &= ~GICD_CTLR_nASSGIreq;
|
||||||
|
val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Switching HW SGIs? */
|
||||||
|
dist->nassgireq = val & GICD_CTLR_nASSGIreq;
|
||||||
|
if (is_hwsgi != dist->nassgireq)
|
||||||
|
vgic_v4_configure_vsgis(vcpu->kvm);
|
||||||
|
|
||||||
|
if (kvm_vgic_global_state.has_gicv4_1 &&
|
||||||
|
was_enabled != dist->enabled)
|
||||||
|
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4);
|
||||||
|
else if (!was_enabled && dist->enabled)
|
||||||
vgic_kick_vcpus(vcpu->kvm);
|
vgic_kick_vcpus(vcpu->kvm);
|
||||||
|
|
||||||
|
mutex_unlock(&vcpu->kvm->lock);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case GICD_TYPER:
|
case GICD_TYPER:
|
||||||
|
case GICD_TYPER2:
|
||||||
case GICD_IIDR:
|
case GICD_IIDR:
|
||||||
|
/* This is at best for documentation purposes... */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -116,10 +153,22 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
|
||||||
gpa_t addr, unsigned int len,
|
gpa_t addr, unsigned int len,
|
||||||
unsigned long val)
|
unsigned long val)
|
||||||
{
|
{
|
||||||
|
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||||
|
|
||||||
switch (addr & 0x0c) {
|
switch (addr & 0x0c) {
|
||||||
|
case GICD_TYPER2:
|
||||||
case GICD_IIDR:
|
case GICD_IIDR:
|
||||||
if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
|
if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
return 0;
|
||||||
|
case GICD_CTLR:
|
||||||
|
/* Not a GICv4.1? No HW SGIs */
|
||||||
|
if (!kvm_vgic_global_state.has_gicv4_1)
|
||||||
|
val &= ~GICD_CTLR_nASSGIreq;
|
||||||
|
|
||||||
|
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
|
||||||
|
dist->nassgireq = val & GICD_CTLR_nASSGIreq;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
vgic_mmio_write_v3_misc(vcpu, addr, len, val);
|
vgic_mmio_write_v3_misc(vcpu, addr, len, val);
|
||||||
|
@ -257,8 +306,18 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < len * 8; i++) {
|
for (i = 0; i < len * 8; i++) {
|
||||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||||
|
bool state = irq->pending_latch;
|
||||||
|
|
||||||
if (irq->pending_latch)
|
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = irq_get_irqchip_state(irq->host_irq,
|
||||||
|
IRQCHIP_STATE_PENDING,
|
||||||
|
&state);
|
||||||
|
WARN_ON(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state)
|
||||||
value |= (1U << i);
|
value |= (1U << i);
|
||||||
|
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
@ -942,8 +1001,18 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
|
||||||
* generate interrupts of either group.
|
* generate interrupts of either group.
|
||||||
*/
|
*/
|
||||||
if (!irq->group || allow_group1) {
|
if (!irq->group || allow_group1) {
|
||||||
irq->pending_latch = true;
|
if (!irq->hw) {
|
||||||
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
irq->pending_latch = true;
|
||||||
|
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
||||||
|
} else {
|
||||||
|
/* HW SGI? Ask the GIC to inject it */
|
||||||
|
int err;
|
||||||
|
err = irq_set_irqchip_state(irq->host_irq,
|
||||||
|
IRQCHIP_STATE_PENDING,
|
||||||
|
true);
|
||||||
|
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
|
||||||
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,8 @@
|
||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/bsearch.h>
|
#include <linux/bsearch.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/irq.h>
|
||||||
#include <linux/kvm.h>
|
#include <linux/kvm.h>
|
||||||
#include <linux/kvm_host.h>
|
#include <linux/kvm_host.h>
|
||||||
#include <kvm/iodev.h>
|
#include <kvm/iodev.h>
|
||||||
|
@ -59,6 +61,11 @@ unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vgic_update_vsgi(struct vgic_irq *irq)
|
||||||
|
{
|
||||||
|
WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
|
||||||
|
}
|
||||||
|
|
||||||
void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
|
void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
|
||||||
unsigned int len, unsigned long val)
|
unsigned int len, unsigned long val)
|
||||||
{
|
{
|
||||||
|
@ -71,7 +78,12 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
irq->group = !!(val & BIT(i));
|
irq->group = !!(val & BIT(i));
|
||||||
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||||
|
vgic_update_vsgi(irq);
|
||||||
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
|
} else {
|
||||||
|
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
||||||
|
}
|
||||||
|
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
}
|
}
|
||||||
|
@ -113,7 +125,21 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
|
||||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
if (vgic_irq_is_mapped_level(irq)) {
|
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||||
|
if (!irq->enabled) {
|
||||||
|
struct irq_data *data;
|
||||||
|
|
||||||
|
irq->enabled = true;
|
||||||
|
data = &irq_to_desc(irq->host_irq)->irq_data;
|
||||||
|
while (irqd_irq_disabled(data))
|
||||||
|
enable_irq(irq->host_irq);
|
||||||
|
}
|
||||||
|
|
||||||
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
|
||||||
|
continue;
|
||||||
|
} else if (vgic_irq_is_mapped_level(irq)) {
|
||||||
bool was_high = irq->line_level;
|
bool was_high = irq->line_level;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -148,6 +174,8 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
|
||||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
|
if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
|
||||||
|
disable_irq_nosync(irq->host_irq);
|
||||||
|
|
||||||
irq->enabled = false;
|
irq->enabled = false;
|
||||||
|
|
||||||
|
@ -167,10 +195,22 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||||
for (i = 0; i < len * 8; i++) {
|
for (i = 0; i < len * 8; i++) {
|
||||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
bool val;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
if (irq_is_pending(irq))
|
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||||
value |= (1U << i);
|
int err;
|
||||||
|
|
||||||
|
val = false;
|
||||||
|
err = irq_get_irqchip_state(irq->host_irq,
|
||||||
|
IRQCHIP_STATE_PENDING,
|
||||||
|
&val);
|
||||||
|
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
|
||||||
|
} else {
|
||||||
|
val = irq_is_pending(irq);
|
||||||
|
}
|
||||||
|
|
||||||
|
value |= ((u32)val << i);
|
||||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
|
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
@ -215,6 +255,21 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
|
|
||||||
|
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||||
|
/* HW SGI? Ask the GIC to inject it */
|
||||||
|
int err;
|
||||||
|
err = irq_set_irqchip_state(irq->host_irq,
|
||||||
|
IRQCHIP_STATE_PENDING,
|
||||||
|
true);
|
||||||
|
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
|
||||||
|
|
||||||
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (irq->hw)
|
if (irq->hw)
|
||||||
vgic_hw_irq_spending(vcpu, irq, is_uaccess);
|
vgic_hw_irq_spending(vcpu, irq, is_uaccess);
|
||||||
else
|
else
|
||||||
|
@ -269,6 +324,20 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
|
|
||||||
|
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||||
|
/* HW SGI? Ask the GIC to clear its pending bit */
|
||||||
|
int err;
|
||||||
|
err = irq_set_irqchip_state(irq->host_irq,
|
||||||
|
IRQCHIP_STATE_PENDING,
|
||||||
|
false);
|
||||||
|
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
|
||||||
|
|
||||||
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (irq->hw)
|
if (irq->hw)
|
||||||
vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
|
vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
|
||||||
else
|
else
|
||||||
|
@ -318,8 +387,15 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
|
|
||||||
if (irq->hw) {
|
if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
|
||||||
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
|
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
|
||||||
|
} else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
|
||||||
|
/*
|
||||||
|
* GICv4.1 VSGI feature doesn't track an active state,
|
||||||
|
* so let's not kid ourselves, there is nothing we can
|
||||||
|
* do here.
|
||||||
|
*/
|
||||||
|
irq->active = false;
|
||||||
} else {
|
} else {
|
||||||
u32 model = vcpu->kvm->arch.vgic.vgic_model;
|
u32 model = vcpu->kvm->arch.vgic.vgic_model;
|
||||||
u8 active_source;
|
u8 active_source;
|
||||||
|
@ -493,6 +569,8 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
|
||||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
/* Narrow the priority range to what we actually support */
|
/* Narrow the priority range to what we actually support */
|
||||||
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
|
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
|
||||||
|
if (irq->hw && vgic_irq_is_sgi(irq->intid))
|
||||||
|
vgic_update_vsgi(irq);
|
||||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
|
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
|
|
@ -540,6 +540,8 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (kvm_vgic_global_state.has_gicv4_1)
|
||||||
|
vgic_v4_configure_vsgis(kvm);
|
||||||
dist->ready = true;
|
dist->ready = true;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -595,7 +597,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||||
/* GICv4 support? */
|
/* GICv4 support? */
|
||||||
if (info->has_v4) {
|
if (info->has_v4) {
|
||||||
kvm_vgic_global_state.has_gicv4 = gicv4_enable;
|
kvm_vgic_global_state.has_gicv4 = gicv4_enable;
|
||||||
kvm_info("GICv4 support %sabled\n",
|
kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
|
||||||
|
kvm_info("GICv4%s support %sabled\n",
|
||||||
|
kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
|
||||||
gicv4_enable ? "en" : "dis");
|
gicv4_enable ? "en" : "dis");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,10 +67,10 @@
|
||||||
* it. And if we've migrated our vcpu from one CPU to another, we must
|
* it. And if we've migrated our vcpu from one CPU to another, we must
|
||||||
* tell the ITS (so that the messages reach the right redistributor).
|
* tell the ITS (so that the messages reach the right redistributor).
|
||||||
* This is done in two steps: first issue a irq_set_affinity() on the
|
* This is done in two steps: first issue a irq_set_affinity() on the
|
||||||
* irq corresponding to the vcpu, then call its_schedule_vpe(). You
|
* irq corresponding to the vcpu, then call its_make_vpe_resident().
|
||||||
* must be in a non-preemptible context. On exit, another call to
|
* You must be in a non-preemptible context. On exit, a call to
|
||||||
* its_schedule_vpe() tells the redistributor that we're done with the
|
* its_make_vpe_non_resident() tells the redistributor that we're done
|
||||||
* vcpu.
|
* with the vcpu.
|
||||||
*
|
*
|
||||||
* Finally, the doorbell handling: Each vcpu is allocated an interrupt
|
* Finally, the doorbell handling: Each vcpu is allocated an interrupt
|
||||||
* which will fire each time a VLPI is made pending whilst the vcpu is
|
* which will fire each time a VLPI is made pending whilst the vcpu is
|
||||||
|
@ -86,7 +86,8 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
|
||||||
struct kvm_vcpu *vcpu = info;
|
struct kvm_vcpu *vcpu = info;
|
||||||
|
|
||||||
/* We got the message, no need to fire again */
|
/* We got the message, no need to fire again */
|
||||||
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
|
if (!kvm_vgic_global_state.has_gicv4_1 &&
|
||||||
|
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
|
||||||
disable_irq_nosync(irq);
|
disable_irq_nosync(irq);
|
||||||
|
|
||||||
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
|
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
|
||||||
|
@ -96,6 +97,104 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
|
||||||
|
{
|
||||||
|
vpe->sgi_config[irq->intid].enabled = irq->enabled;
|
||||||
|
vpe->sgi_config[irq->intid].group = irq->group;
|
||||||
|
vpe->sgi_config[irq->intid].priority = irq->priority;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* With GICv4.1, every virtual SGI can be directly injected. So
|
||||||
|
* let's pretend that they are HW interrupts, tied to a host
|
||||||
|
* IRQ. The SGI code will do its magic.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < VGIC_NR_SGIS; i++) {
|
||||||
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
|
||||||
|
struct irq_desc *desc;
|
||||||
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
|
|
||||||
|
if (irq->hw)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
irq->hw = true;
|
||||||
|
irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
|
||||||
|
|
||||||
|
/* Transfer the full irq state to the vPE */
|
||||||
|
vgic_v4_sync_sgi_config(vpe, irq);
|
||||||
|
desc = irq_to_desc(irq->host_irq);
|
||||||
|
ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
|
||||||
|
false);
|
||||||
|
if (!WARN_ON(ret)) {
|
||||||
|
/* Transfer pending state */
|
||||||
|
ret = irq_set_irqchip_state(irq->host_irq,
|
||||||
|
IRQCHIP_STATE_PENDING,
|
||||||
|
irq->pending_latch);
|
||||||
|
WARN_ON(ret);
|
||||||
|
irq->pending_latch = false;
|
||||||
|
}
|
||||||
|
unlock:
|
||||||
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < VGIC_NR_SGIS; i++) {
|
||||||
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
|
||||||
|
struct irq_desc *desc;
|
||||||
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||||
|
|
||||||
|
if (!irq->hw)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
irq->hw = false;
|
||||||
|
ret = irq_get_irqchip_state(irq->host_irq,
|
||||||
|
IRQCHIP_STATE_PENDING,
|
||||||
|
&irq->pending_latch);
|
||||||
|
WARN_ON(ret);
|
||||||
|
|
||||||
|
desc = irq_to_desc(irq->host_irq);
|
||||||
|
irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
|
||||||
|
unlock:
|
||||||
|
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||||
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Must be called with the kvm lock held */
|
||||||
|
void vgic_v4_configure_vsgis(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||||
|
struct kvm_vcpu *vcpu;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
kvm_arm_halt_guest(kvm);
|
||||||
|
|
||||||
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
|
if (dist->nassgireq)
|
||||||
|
vgic_v4_enable_vsgis(vcpu);
|
||||||
|
else
|
||||||
|
vgic_v4_disable_vsgis(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
kvm_arm_resume_guest(kvm);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vgic_v4_init - Initialize the GICv4 data structures
|
* vgic_v4_init - Initialize the GICv4 data structures
|
||||||
* @kvm: Pointer to the VM being initialized
|
* @kvm: Pointer to the VM being initialized
|
||||||
|
@ -140,6 +239,7 @@ int vgic_v4_init(struct kvm *kvm)
|
||||||
|
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
int irq = dist->its_vm.vpes[i]->irq;
|
int irq = dist->its_vm.vpes[i]->irq;
|
||||||
|
unsigned long irq_flags = DB_IRQ_FLAGS;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't automatically enable the doorbell, as we're
|
* Don't automatically enable the doorbell, as we're
|
||||||
|
@ -147,8 +247,14 @@ int vgic_v4_init(struct kvm *kvm)
|
||||||
* blocked. Also disable the lazy disabling, as the
|
* blocked. Also disable the lazy disabling, as the
|
||||||
* doorbell could kick us out of the guest too
|
* doorbell could kick us out of the guest too
|
||||||
* early...
|
* early...
|
||||||
|
*
|
||||||
|
* On GICv4.1, the doorbell is managed in HW and must
|
||||||
|
* be left enabled.
|
||||||
*/
|
*/
|
||||||
irq_set_status_flags(irq, DB_IRQ_FLAGS);
|
if (kvm_vgic_global_state.has_gicv4_1)
|
||||||
|
irq_flags &= ~IRQ_NOAUTOEN;
|
||||||
|
irq_set_status_flags(irq, irq_flags);
|
||||||
|
|
||||||
ret = request_irq(irq, vgic_v4_doorbell_handler,
|
ret = request_irq(irq, vgic_v4_doorbell_handler,
|
||||||
0, "vcpu", vcpu);
|
0, "vcpu", vcpu);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -199,19 +305,11 @@ void vgic_v4_teardown(struct kvm *kvm)
|
||||||
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
|
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
|
||||||
{
|
{
|
||||||
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
|
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
|
||||||
struct irq_desc *desc = irq_to_desc(vpe->irq);
|
|
||||||
|
|
||||||
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
|
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
return its_make_vpe_non_resident(vpe, need_db);
|
||||||
* If blocking, a doorbell is required. Undo the nested
|
|
||||||
* disable_irq() calls...
|
|
||||||
*/
|
|
||||||
while (need_db && irqd_irq_disabled(&desc->irq_data))
|
|
||||||
enable_irq(vpe->irq);
|
|
||||||
|
|
||||||
return its_schedule_vpe(vpe, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int vgic_v4_load(struct kvm_vcpu *vcpu)
|
int vgic_v4_load(struct kvm_vcpu *vcpu)
|
||||||
|
@ -232,18 +330,19 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* Disabled the doorbell, as we're about to enter the guest */
|
err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
|
||||||
disable_irq_nosync(vpe->irq);
|
|
||||||
|
|
||||||
err = its_schedule_vpe(vpe, true);
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now that the VPE is resident, let's get rid of a potential
|
* Now that the VPE is resident, let's get rid of a potential
|
||||||
* doorbell interrupt that would still be pending.
|
* doorbell interrupt that would still be pending. This is a
|
||||||
|
* GICv4.0 only "feature"...
|
||||||
*/
|
*/
|
||||||
return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
|
if (!kvm_vgic_global_state.has_gicv4_1)
|
||||||
|
err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
|
||||||
|
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vgic_its *vgic_get_its(struct kvm *kvm,
|
static struct vgic_its *vgic_get_its(struct kvm *kvm,
|
||||||
|
|
|
@ -316,5 +316,6 @@ void vgic_its_invalidate_cache(struct kvm *kvm);
|
||||||
bool vgic_supports_direct_msis(struct kvm *kvm);
|
bool vgic_supports_direct_msis(struct kvm *kvm);
|
||||||
int vgic_v4_init(struct kvm *kvm);
|
int vgic_v4_init(struct kvm *kvm);
|
||||||
void vgic_v4_teardown(struct kvm *kvm);
|
void vgic_v4_teardown(struct kvm *kvm);
|
||||||
|
void vgic_v4_configure_vsgis(struct kvm *kvm);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue