Merge branch 'kvm-arm64/vgic-fixes-5.7' into kvmarm-master/master
This commit is contained in:
commit
446c0768f5
|
@ -348,6 +348,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
|
||||
/*
|
||||
* Retire all pending LPIs on this vcpu anyway as we're
|
||||
* going to destroy it.
|
||||
*/
|
||||
vgic_flush_pending_lpis(vcpu);
|
||||
|
||||
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
|
||||
}
|
||||
|
||||
|
@ -359,10 +365,10 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
|
|||
|
||||
vgic_debug_destroy(kvm);
|
||||
|
||||
kvm_vgic_dist_destroy(kvm);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_vgic_vcpu_destroy(vcpu);
|
||||
|
||||
kvm_vgic_dist_destroy(kvm);
|
||||
}
|
||||
|
||||
void kvm_vgic_destroy(struct kvm *kvm)
|
||||
|
|
|
@ -96,14 +96,21 @@ out_unlock:
|
|||
* We "cache" the configuration table entries in our struct vgic_irq's.
|
||||
* However we only have those structs for mapped IRQs, so we read in
|
||||
* the respective config data from memory here upon mapping the LPI.
|
||||
*
|
||||
* Should any of these fail, behave as if we couldn't create the LPI
|
||||
* by dropping the refcount and returning the error.
|
||||
*/
|
||||
ret = update_lpi_config(kvm, irq, NULL, false);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
vgic_put_irq(kvm, irq);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
vgic_put_irq(kvm, irq);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
|
|
@ -409,24 +409,28 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
|
|||
NULL, vgic_mmio_uaccess_write_v2_group, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable,
|
||||
NULL, vgic_uaccess_write_senable, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable,
|
||||
NULL, vgic_uaccess_write_cenable, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending,
|
||||
NULL, vgic_uaccess_write_spending, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_cpending,
|
||||
NULL, vgic_uaccess_write_cpending, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
|
||||
vgic_mmio_read_active, vgic_mmio_write_sactive,
|
||||
NULL, vgic_mmio_uaccess_write_sactive, 1,
|
||||
vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
|
||||
vgic_mmio_read_active, vgic_mmio_write_cactive,
|
||||
NULL, vgic_mmio_uaccess_write_cactive, 1,
|
||||
vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
|
||||
vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
|
||||
|
|
|
@ -538,10 +538,12 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
|
|||
vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable,
|
||||
NULL, vgic_uaccess_write_senable, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable,
|
||||
NULL, vgic_uaccess_write_cenable, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending,
|
||||
|
@ -553,11 +555,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
|
|||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
|
||||
vgic_mmio_read_active, vgic_mmio_write_sactive,
|
||||
NULL, vgic_mmio_uaccess_write_sactive, 1,
|
||||
vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
|
||||
vgic_mmio_read_active, vgic_mmio_write_cactive,
|
||||
NULL, vgic_mmio_uaccess_write_cactive,
|
||||
vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
|
||||
1, VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
|
||||
vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
|
||||
|
@ -609,11 +611,13 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
|
|||
REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
|
||||
vgic_mmio_read_group, vgic_mmio_write_group, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ISENABLER0,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISENABLER0,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_senable,
|
||||
NULL, vgic_uaccess_write_senable, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICENABLER0,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICENABLER0,
|
||||
vgic_mmio_read_enable, vgic_mmio_write_cenable,
|
||||
NULL, vgic_uaccess_write_cenable, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending,
|
||||
|
@ -625,12 +629,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
|
|||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
|
||||
vgic_mmio_read_active, vgic_mmio_write_sactive,
|
||||
NULL, vgic_mmio_uaccess_write_sactive,
|
||||
4, VGIC_ACCESS_32bit),
|
||||
vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
|
||||
vgic_mmio_read_active, vgic_mmio_write_cactive,
|
||||
NULL, vgic_mmio_uaccess_write_cactive,
|
||||
4, VGIC_ACCESS_32bit),
|
||||
vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
|
||||
vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
|
||||
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
|
||||
|
|
|
@ -184,6 +184,48 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
}
|
||||
|
||||
int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
for_each_set_bit(i, &val, len * 8) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
irq->enabled = true;
|
||||
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
||||
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
for_each_set_bit(i, &val, len * 8) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
irq->enabled = false;
|
||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
|
@ -219,17 +261,6 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
|||
return value;
|
||||
}
|
||||
|
||||
/* Must be called with irq->irq_lock held */
|
||||
static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
||||
bool is_uaccess)
|
||||
{
|
||||
if (is_uaccess)
|
||||
return;
|
||||
|
||||
irq->pending_latch = true;
|
||||
vgic_irq_set_phys_active(irq, true);
|
||||
}
|
||||
|
||||
static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
|
||||
{
|
||||
return (vgic_irq_is_sgi(irq->intid) &&
|
||||
|
@ -240,7 +271,6 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
|
|||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
{
|
||||
bool is_uaccess = !kvm_get_running_vcpu();
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
@ -270,22 +300,48 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
|
|||
continue;
|
||||
}
|
||||
|
||||
irq->pending_latch = true;
|
||||
if (irq->hw)
|
||||
vgic_hw_irq_spending(vcpu, irq, is_uaccess);
|
||||
else
|
||||
irq->pending_latch = true;
|
||||
vgic_irq_set_phys_active(irq, true);
|
||||
|
||||
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
}
|
||||
|
||||
/* Must be called with irq->irq_lock held */
|
||||
static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
||||
bool is_uaccess)
|
||||
int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
{
|
||||
if (is_uaccess)
|
||||
return;
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
for_each_set_bit(i, &val, len * 8) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
irq->pending_latch = true;
|
||||
|
||||
/*
|
||||
* GICv2 SGIs are terribly broken. We can't restore
|
||||
* the source of the interrupt, so just pick the vcpu
|
||||
* itself as the source...
|
||||
*/
|
||||
if (is_vgic_v2_sgi(vcpu, irq))
|
||||
irq->source |= BIT(vcpu->vcpu_id);
|
||||
|
||||
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
|
||||
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Must be called with irq->irq_lock held */
|
||||
static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
|
||||
{
|
||||
irq->pending_latch = false;
|
||||
|
||||
/*
|
||||
|
@ -308,7 +364,6 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
|
|||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
{
|
||||
bool is_uaccess = !kvm_get_running_vcpu();
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
@ -339,7 +394,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
|
||||
if (irq->hw)
|
||||
vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
|
||||
vgic_hw_irq_cpending(vcpu, irq);
|
||||
else
|
||||
irq->pending_latch = false;
|
||||
|
||||
|
@ -348,8 +403,68 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
}
|
||||
|
||||
unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
for_each_set_bit(i, &val, len * 8) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
/*
|
||||
* More fun with GICv2 SGIs! If we're clearing one of them
|
||||
* from userspace, which source vcpu to clear? Let's not
|
||||
* even think of it, and blow the whole set.
|
||||
*/
|
||||
if (is_vgic_v2_sgi(vcpu, irq))
|
||||
irq->source = 0;
|
||||
|
||||
irq->pending_latch = false;
|
||||
|
||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are fiddling with an IRQ's active state, we have to make sure the IRQ
|
||||
* is not queued on some running VCPU's LRs, because then the change to the
|
||||
* active state can be overwritten when the VCPU's state is synced coming back
|
||||
* from the guest.
|
||||
*
|
||||
* For shared interrupts as well as GICv3 private interrupts, we have to
|
||||
* stop all the VCPUs because interrupts can be migrated while we don't hold
|
||||
* the IRQ locks and we don't want to be chasing moving targets.
|
||||
*
|
||||
* For GICv2 private interrupts we don't have to do anything because
|
||||
* userspace accesses to the VGIC state already require all VCPUs to be
|
||||
* stopped, and only the VCPU itself can modify its private interrupts
|
||||
* active state, which guarantees that the VCPU is not running.
|
||||
*/
|
||||
static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
|
||||
{
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
||||
intid >= VGIC_NR_PRIVATE_IRQS)
|
||||
kvm_arm_halt_guest(vcpu->kvm);
|
||||
}
|
||||
|
||||
/* See vgic_access_active_prepare */
|
||||
static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
|
||||
{
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
||||
intid >= VGIC_NR_PRIVATE_IRQS)
|
||||
kvm_arm_resume_guest(vcpu->kvm);
|
||||
}
|
||||
|
||||
static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
u32 value = 0;
|
||||
|
@ -359,6 +474,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
|||
for (i = 0; i < len * 8; i++) {
|
||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||
|
||||
/*
|
||||
* Even for HW interrupts, don't evaluate the HW state as
|
||||
* all the guest is interested in is the virtual state.
|
||||
*/
|
||||
if (irq->active)
|
||||
value |= (1U << i);
|
||||
|
||||
|
@ -368,6 +487,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
|||
return value;
|
||||
}
|
||||
|
||||
unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
vgic_access_active_prepare(vcpu, intid);
|
||||
|
||||
val = __vgic_mmio_read_active(vcpu, addr, len);
|
||||
|
||||
vgic_access_active_finish(vcpu, intid);
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
return __vgic_mmio_read_active(vcpu, addr, len);
|
||||
}
|
||||
|
||||
/* Must be called with irq->irq_lock held */
|
||||
static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
||||
bool active, bool is_uaccess)
|
||||
|
@ -426,36 +568,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
|||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are fiddling with an IRQ's active state, we have to make sure the IRQ
|
||||
* is not queued on some running VCPU's LRs, because then the change to the
|
||||
* active state can be overwritten when the VCPU's state is synced coming back
|
||||
* from the guest.
|
||||
*
|
||||
* For shared interrupts, we have to stop all the VCPUs because interrupts can
|
||||
* be migrated while we don't hold the IRQ locks and we don't want to be
|
||||
* chasing moving targets.
|
||||
*
|
||||
* For private interrupts we don't have to do anything because userspace
|
||||
* accesses to the VGIC state already require all VCPUs to be stopped, and
|
||||
* only the VCPU itself can modify its private interrupts active state, which
|
||||
* guarantees that the VCPU is not running.
|
||||
*/
|
||||
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
|
||||
{
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
||||
intid > VGIC_NR_PRIVATE_IRQS)
|
||||
kvm_arm_halt_guest(vcpu->kvm);
|
||||
}
|
||||
|
||||
/* See vgic_change_active_prepare */
|
||||
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
|
||||
{
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
|
||||
intid > VGIC_NR_PRIVATE_IRQS)
|
||||
kvm_arm_resume_guest(vcpu->kvm);
|
||||
}
|
||||
|
||||
static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val)
|
||||
|
@ -477,11 +589,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
|
|||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
vgic_change_active_prepare(vcpu, intid);
|
||||
vgic_access_active_prepare(vcpu, intid);
|
||||
|
||||
__vgic_mmio_write_cactive(vcpu, addr, len, val);
|
||||
|
||||
vgic_change_active_finish(vcpu, intid);
|
||||
vgic_access_active_finish(vcpu, intid);
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
}
|
||||
|
||||
|
@ -514,11 +626,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
|
|||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
vgic_change_active_prepare(vcpu, intid);
|
||||
vgic_access_active_prepare(vcpu, intid);
|
||||
|
||||
__vgic_mmio_write_sactive(vcpu, addr, len, val);
|
||||
|
||||
vgic_change_active_finish(vcpu, intid);
|
||||
vgic_access_active_finish(vcpu, intid);
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -138,6 +138,14 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
|
|||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
||||
int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
||||
int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len);
|
||||
|
||||
|
@ -149,9 +157,20 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
|
|||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
||||
int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
||||
int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
||||
unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len);
|
||||
|
||||
unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len);
|
||||
|
||||
void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
|
Loading…
Reference in New Issue