KVM: arm/arm64: vgic: Replace spin_is_locked() with lockdep
lockdep_assert_held() is better suited to checking locking requirements, since it only checks if the current thread holds the lock regardless of whether someone else does. This is also a step towards possibly removing spin_is_locked(). Signed-off-by: Lance Roy <ldr709@gmail.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Eric Auger <eric.auger@redhat.com> Cc: linux-arm-kernel@lists.infradead.org Cc: <kvmarm@lists.cs.columbia.edu> Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com> Acked-by: Christoffer Dall <christoffer.dall@arm.com>
This commit is contained in:
parent
35f3aa39f2
commit
d4d592a6ee
|
@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
|
||||||
*/
|
*/
|
||||||
static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
|
static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
|
||||||
{
|
{
|
||||||
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
|
lockdep_assert_held(&irq->irq_lock);
|
||||||
|
|
||||||
/* If the interrupt is active, it must stay on the current vcpu */
|
/* If the interrupt is active, it must stay on the current vcpu */
|
||||||
if (irq->active)
|
if (irq->active)
|
||||||
|
@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||||
|
|
||||||
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
|
lockdep_assert_held(&vgic_cpu->ap_list_lock);
|
||||||
|
|
||||||
list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
|
list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
|
||||||
}
|
}
|
||||||
|
@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
|
|
||||||
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
|
lockdep_assert_held(&irq->irq_lock);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
vcpu = vgic_target_oracle(irq);
|
vcpu = vgic_target_oracle(irq);
|
||||||
|
@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||||
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
|
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
|
||||||
struct vgic_irq *irq, int lr)
|
struct vgic_irq *irq, int lr)
|
||||||
{
|
{
|
||||||
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
|
lockdep_assert_held(&irq->irq_lock);
|
||||||
|
|
||||||
if (kvm_vgic_global_state.type == VGIC_V2)
|
if (kvm_vgic_global_state.type == VGIC_V2)
|
||||||
vgic_v2_populate_lr(vcpu, irq, lr);
|
vgic_v2_populate_lr(vcpu, irq, lr);
|
||||||
|
@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
*multi_sgi = false;
|
*multi_sgi = false;
|
||||||
|
|
||||||
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
|
lockdep_assert_held(&vgic_cpu->ap_list_lock);
|
||||||
|
|
||||||
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
|
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
|
||||||
int w;
|
int w;
|
||||||
|
@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
|
||||||
bool multi_sgi;
|
bool multi_sgi;
|
||||||
u8 prio = 0xff;
|
u8 prio = 0xff;
|
||||||
|
|
||||||
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
|
lockdep_assert_held(&vgic_cpu->ap_list_lock);
|
||||||
|
|
||||||
count = compute_ap_list_depth(vcpu, &multi_sgi);
|
count = compute_ap_list_depth(vcpu, &multi_sgi);
|
||||||
if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
|
if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
|
||||||
|
|
Loading…
Reference in New Issue