OSDN Git Service

KVM: arm/arm64: vgic: Do not use spin_lock_irqsave/restore with irq disabled
authorJia He <hejianet@gmail.com>
Fri, 3 Aug 2018 13:57:04 +0000 (21:57 +0800)
committerMarc Zyngier <marc.zyngier@arm.com>
Sun, 12 Aug 2018 11:15:18 +0000 (12:15 +0100)
kvm_vgic_sync_hwstate is only called with IRQ being disabled.
There is thus no need to call spin_lock_irqsave/restore in
vgic_fold_lr_state and vgic_prune_ap_list.

This patch replace them with the non irq-safe version.

Signed-off-by: Jia He <jia.he@hxt-semitech.com>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
[maz: commit message tidy-up]
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c

index df5e6a6..69b892a 100644 (file)
@@ -62,7 +62,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
        int lr;
-       unsigned long flags;
+
+       DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
        cpuif->vgic_hcr &= ~GICH_HCR_UIE;
 
@@ -83,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
 
                irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               spin_lock(&irq->irq_lock);
 
                /* Always preserve the active bit */
                irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -126,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
                                vgic_irq_set_phys_active(irq, false);
                }
 
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               spin_unlock(&irq->irq_lock);
                vgic_put_irq(vcpu->kvm, irq);
        }
 
index 530b849..9c0dd23 100644 (file)
@@ -46,7 +46,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
        struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
        u32 model = vcpu->kvm->arch.vgic.vgic_model;
        int lr;
-       unsigned long flags;
+
+       DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
        cpuif->vgic_hcr &= ~ICH_HCR_UIE;
 
@@ -75,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
                if (!irq)       /* An LPI could have been unmapped. */
                        continue;
 
-               spin_lock_irqsave(&irq->irq_lock, flags);
+               spin_lock(&irq->irq_lock);
 
                /* Always preserve the active bit */
                irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -118,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
                                vgic_irq_set_phys_active(irq, false);
                }
 
-               spin_unlock_irqrestore(&irq->irq_lock, flags);
+               spin_unlock(&irq->irq_lock);
                vgic_put_irq(vcpu->kvm, irq);
        }
 
index c22cea6..7cfdfbc 100644 (file)
@@ -593,10 +593,11 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_irq *irq, *tmp;
-       unsigned long flags;
+
+       DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
 retry:
-       spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
+       spin_lock(&vgic_cpu->ap_list_lock);
 
        list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
                struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
@@ -637,7 +638,7 @@ retry:
                /* This interrupt looks like it has to be migrated. */
 
                spin_unlock(&irq->irq_lock);
-               spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
+               spin_unlock(&vgic_cpu->ap_list_lock);
 
                /*
                 * Ensure locking order by always locking the smallest
@@ -651,7 +652,7 @@ retry:
                        vcpuB = vcpu;
                }
 
-               spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
+               spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
                spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
                                 SINGLE_DEPTH_NESTING);
                spin_lock(&irq->irq_lock);
@@ -676,7 +677,7 @@ retry:
 
                spin_unlock(&irq->irq_lock);
                spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
-               spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
+               spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
 
                if (target_vcpu_needs_kick) {
                        kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
@@ -686,7 +687,7 @@ retry:
                goto retry;
        }
 
-       spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
+       spin_unlock(&vgic_cpu->ap_list_lock);
 }
 
 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)