OSDN Git Service

irqchip/gic-v4.1: Ensure mutual exclusion betwen invalidations on the same RD
authorMarc Zyngier <maz@kernel.org>
Wed, 4 Mar 2020 20:33:12 +0000 (20:33 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Mar 2020 17:48:21 +0000 (17:48 +0000)
The GICv4.1 spec says that it is CONTRAINED UNPREDICTABLE to write to
any of the GICR_INV{LPI,ALL}R registers if GICR_SYNCR.Busy == 1.

To deal with it, we must ensure that only a single invalidation can
happen at a time for a given redistributor. Add a per-RD lock to that
effect and take it around the invalidation/syncr-read to deal with this.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Link: https://lore.kernel.org/r/20200304203330.4967-6-maz@kernel.org
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
include/linux/irqchip/arm-gic-v3.h

index c843702..fc57885 100644 (file)
@@ -1373,10 +1373,12 @@ static void direct_lpi_inv(struct irq_data *d)
 
        /* Target the redistributor this LPI is currently routed to */
        cpu = irq_to_cpuid_lock(d, &flags);
+       raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
        rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
        gic_write_lpir(val, rdbase + GICR_INVLPIR);
 
        wait_for_syncr(rdbase);
+       raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
        irq_to_cpuid_unlock(d, flags);
 }
 
@@ -3662,9 +3664,11 @@ static void its_vpe_send_inv(struct irq_data *d)
                void __iomem *rdbase;
 
                /* Target the redistributor this VPE is currently known on */
+               raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
                rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
                gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
                wait_for_syncr(rdbase);
+               raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
        } else {
                its_vpe_send_cmd(vpe, its_send_inv);
        }
@@ -3825,10 +3829,12 @@ static void its_vpe_4_1_invall(struct its_vpe *vpe)
        val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
 
        /* Target the redistributor this vPE is currently known on */
+       raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
        rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
        gic_write_lpir(val, rdbase + GICR_INVALLR);
 
        wait_for_syncr(rdbase);
+       raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
 }
 
 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
index b6b0f86..0f716c2 100644 (file)
@@ -834,6 +834,7 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
        typer = gic_read_typer(ptr + GICR_TYPER);
        if ((typer >> 32) == aff) {
                u64 offset = ptr - region->redist_base;
+               raw_spin_lock_init(&gic_data_rdist()->rd_lock);
                gic_data_rdist_rd_base() = ptr;
                gic_data_rdist()->phys_base = region->phys_base + offset;
 
index c29a026..b28acfa 100644 (file)
 
 struct rdists {
        struct {
+               raw_spinlock_t  rd_lock;
                void __iomem    *rd_base;
                struct page     *pend_page;
                phys_addr_t     phys_base;