OSDN Git Service

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[uclinux-h8/linux.git] / drivers / irqchip / irq-gic-v3.c
1 /*
2  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #define pr_fmt(fmt)     "GICv3: " fmt
19
20 #include <linux/acpi.h>
21 #include <linux/cpu.h>
22 #include <linux/cpu_pm.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/of_irq.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31
32 #include <linux/irqchip.h>
33 #include <linux/irqchip/arm-gic-common.h>
34 #include <linux/irqchip/arm-gic-v3.h>
35 #include <linux/irqchip/irq-partition-percpu.h>
36
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
39 #include <asm/smp_plat.h>
40 #include <asm/virt.h>
41
42 #include "irq-gic-common.h"
43
44 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996     (1ULL << 0)
45
46 struct redist_region {
47         void __iomem            *redist_base;
48         phys_addr_t             phys_base;
49         bool                    single_redist;
50 };
51
52 struct gic_chip_data {
53         struct fwnode_handle    *fwnode;
54         void __iomem            *dist_base;
55         struct redist_region    *redist_regions;
56         struct rdists           rdists;
57         struct irq_domain       *domain;
58         u64                     redist_stride;
59         u32                     nr_redist_regions;
60         u64                     flags;
61         bool                    has_rss;
62         unsigned int            irq_nr;
63         struct partition_desc   *ppi_descs[16];
64 };
65
66 static struct gic_chip_data gic_data __read_mostly;
67 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
68
69 static struct gic_kvm_info gic_v3_kvm_info;
70 static DEFINE_PER_CPU(bool, has_rss);
71
72 #define MPIDR_RS(mpidr)                 (((mpidr) & 0xF0UL) >> 4)
73 #define gic_data_rdist()                (this_cpu_ptr(gic_data.rdists.rdist))
74 #define gic_data_rdist_rd_base()        (gic_data_rdist()->rd_base)
75 #define gic_data_rdist_sgi_base()       (gic_data_rdist_rd_base() + SZ_64K)
76
77 /* Our default, arbitrary priority value. Linux only uses one anyway. */
78 #define DEFAULT_PMR_VALUE       0xf0
79
80 static inline unsigned int gic_irq(struct irq_data *d)
81 {
82         return d->hwirq;
83 }
84
85 static inline int gic_irq_in_rdist(struct irq_data *d)
86 {
87         return gic_irq(d) < 32;
88 }
89
90 static inline void __iomem *gic_dist_base(struct irq_data *d)
91 {
92         if (gic_irq_in_rdist(d))        /* SGI+PPI -> SGI_base for this CPU */
93                 return gic_data_rdist_sgi_base();
94
95         if (d->hwirq <= 1023)           /* SPI -> dist_base */
96                 return gic_data.dist_base;
97
98         return NULL;
99 }
100
101 static void gic_do_wait_for_rwp(void __iomem *base)
102 {
103         u32 count = 1000000;    /* 1s! */
104
105         while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
106                 count--;
107                 if (!count) {
108                         pr_err_ratelimited("RWP timeout, gone fishing\n");
109                         return;
110                 }
111                 cpu_relax();
112                 udelay(1);
113         };
114 }
115
116 /* Wait for completion of a distributor change */
117 static void gic_dist_wait_for_rwp(void)
118 {
119         gic_do_wait_for_rwp(gic_data.dist_base);
120 }
121
122 /* Wait for completion of a redistributor change */
123 static void gic_redist_wait_for_rwp(void)
124 {
125         gic_do_wait_for_rwp(gic_data_rdist_rd_base());
126 }
127
128 #ifdef CONFIG_ARM64
129
130 static u64 __maybe_unused gic_read_iar(void)
131 {
132         if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
133                 return gic_read_iar_cavium_thunderx();
134         else
135                 return gic_read_iar_common();
136 }
137 #endif
138
139 static void gic_enable_redist(bool enable)
140 {
141         void __iomem *rbase;
142         u32 count = 1000000;    /* 1s! */
143         u32 val;
144
145         if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
146                 return;
147
148         rbase = gic_data_rdist_rd_base();
149
150         val = readl_relaxed(rbase + GICR_WAKER);
151         if (enable)
152                 /* Wake up this CPU redistributor */
153                 val &= ~GICR_WAKER_ProcessorSleep;
154         else
155                 val |= GICR_WAKER_ProcessorSleep;
156         writel_relaxed(val, rbase + GICR_WAKER);
157
158         if (!enable) {          /* Check that GICR_WAKER is writeable */
159                 val = readl_relaxed(rbase + GICR_WAKER);
160                 if (!(val & GICR_WAKER_ProcessorSleep))
161                         return; /* No PM support in this redistributor */
162         }
163
164         while (--count) {
165                 val = readl_relaxed(rbase + GICR_WAKER);
166                 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
167                         break;
168                 cpu_relax();
169                 udelay(1);
170         };
171         if (!count)
172                 pr_err_ratelimited("redistributor failed to %s...\n",
173                                    enable ? "wakeup" : "sleep");
174 }
175
176 /*
177  * Routines to disable, enable, EOI and route interrupts
178  */
179 static int gic_peek_irq(struct irq_data *d, u32 offset)
180 {
181         u32 mask = 1 << (gic_irq(d) % 32);
182         void __iomem *base;
183
184         if (gic_irq_in_rdist(d))
185                 base = gic_data_rdist_sgi_base();
186         else
187                 base = gic_data.dist_base;
188
189         return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
190 }
191
192 static void gic_poke_irq(struct irq_data *d, u32 offset)
193 {
194         u32 mask = 1 << (gic_irq(d) % 32);
195         void (*rwp_wait)(void);
196         void __iomem *base;
197
198         if (gic_irq_in_rdist(d)) {
199                 base = gic_data_rdist_sgi_base();
200                 rwp_wait = gic_redist_wait_for_rwp;
201         } else {
202                 base = gic_data.dist_base;
203                 rwp_wait = gic_dist_wait_for_rwp;
204         }
205
206         writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
207         rwp_wait();
208 }
209
210 static void gic_mask_irq(struct irq_data *d)
211 {
212         gic_poke_irq(d, GICD_ICENABLER);
213 }
214
215 static void gic_eoimode1_mask_irq(struct irq_data *d)
216 {
217         gic_mask_irq(d);
218         /*
219          * When masking a forwarded interrupt, make sure it is
220          * deactivated as well.
221          *
222          * This ensures that an interrupt that is getting
223          * disabled/masked will not get "stuck", because there is
224          * noone to deactivate it (guest is being terminated).
225          */
226         if (irqd_is_forwarded_to_vcpu(d))
227                 gic_poke_irq(d, GICD_ICACTIVER);
228 }
229
230 static void gic_unmask_irq(struct irq_data *d)
231 {
232         gic_poke_irq(d, GICD_ISENABLER);
233 }
234
235 static int gic_irq_set_irqchip_state(struct irq_data *d,
236                                      enum irqchip_irq_state which, bool val)
237 {
238         u32 reg;
239
240         if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
241                 return -EINVAL;
242
243         switch (which) {
244         case IRQCHIP_STATE_PENDING:
245                 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
246                 break;
247
248         case IRQCHIP_STATE_ACTIVE:
249                 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
250                 break;
251
252         case IRQCHIP_STATE_MASKED:
253                 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
254                 break;
255
256         default:
257                 return -EINVAL;
258         }
259
260         gic_poke_irq(d, reg);
261         return 0;
262 }
263
264 static int gic_irq_get_irqchip_state(struct irq_data *d,
265                                      enum irqchip_irq_state which, bool *val)
266 {
267         if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
268                 return -EINVAL;
269
270         switch (which) {
271         case IRQCHIP_STATE_PENDING:
272                 *val = gic_peek_irq(d, GICD_ISPENDR);
273                 break;
274
275         case IRQCHIP_STATE_ACTIVE:
276                 *val = gic_peek_irq(d, GICD_ISACTIVER);
277                 break;
278
279         case IRQCHIP_STATE_MASKED:
280                 *val = !gic_peek_irq(d, GICD_ISENABLER);
281                 break;
282
283         default:
284                 return -EINVAL;
285         }
286
287         return 0;
288 }
289
290 static void gic_eoi_irq(struct irq_data *d)
291 {
292         gic_write_eoir(gic_irq(d));
293 }
294
295 static void gic_eoimode1_eoi_irq(struct irq_data *d)
296 {
297         /*
298          * No need to deactivate an LPI, or an interrupt that
299          * is is getting forwarded to a vcpu.
300          */
301         if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
302                 return;
303         gic_write_dir(gic_irq(d));
304 }
305
306 static int gic_set_type(struct irq_data *d, unsigned int type)
307 {
308         unsigned int irq = gic_irq(d);
309         void (*rwp_wait)(void);
310         void __iomem *base;
311
312         /* Interrupt configuration for SGIs can't be changed */
313         if (irq < 16)
314                 return -EINVAL;
315
316         /* SPIs have restrictions on the supported types */
317         if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
318                          type != IRQ_TYPE_EDGE_RISING)
319                 return -EINVAL;
320
321         if (gic_irq_in_rdist(d)) {
322                 base = gic_data_rdist_sgi_base();
323                 rwp_wait = gic_redist_wait_for_rwp;
324         } else {
325                 base = gic_data.dist_base;
326                 rwp_wait = gic_dist_wait_for_rwp;
327         }
328
329         return gic_configure_irq(irq, type, base, rwp_wait);
330 }
331
332 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
333 {
334         if (vcpu)
335                 irqd_set_forwarded_to_vcpu(d);
336         else
337                 irqd_clr_forwarded_to_vcpu(d);
338         return 0;
339 }
340
341 static u64 gic_mpidr_to_affinity(unsigned long mpidr)
342 {
343         u64 aff;
344
345         aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
346                MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
347                MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
348                MPIDR_AFFINITY_LEVEL(mpidr, 0));
349
350         return aff;
351 }
352
353 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
354 {
355         u32 irqnr;
356
357         irqnr = gic_read_iar();
358
359         if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
360                 int err;
361
362                 if (static_branch_likely(&supports_deactivate_key))
363                         gic_write_eoir(irqnr);
364                 else
365                         isb();
366
367                 err = handle_domain_irq(gic_data.domain, irqnr, regs);
368                 if (err) {
369                         WARN_ONCE(true, "Unexpected interrupt received!\n");
370                         if (static_branch_likely(&supports_deactivate_key)) {
371                                 if (irqnr < 8192)
372                                         gic_write_dir(irqnr);
373                         } else {
374                                 gic_write_eoir(irqnr);
375                         }
376                 }
377                 return;
378         }
379         if (irqnr < 16) {
380                 gic_write_eoir(irqnr);
381                 if (static_branch_likely(&supports_deactivate_key))
382                         gic_write_dir(irqnr);
383 #ifdef CONFIG_SMP
384                 /*
385                  * Unlike GICv2, we don't need an smp_rmb() here.
386                  * The control dependency from gic_read_iar to
387                  * the ISB in gic_write_eoir is enough to ensure
388                  * that any shared data read by handle_IPI will
389                  * be read after the ACK.
390                  */
391                 handle_IPI(irqnr, regs);
392 #else
393                 WARN_ONCE(true, "Unexpected SGI received!\n");
394 #endif
395         }
396 }
397
398 static void __init gic_dist_init(void)
399 {
400         unsigned int i;
401         u64 affinity;
402         void __iomem *base = gic_data.dist_base;
403
404         /* Disable the distributor */
405         writel_relaxed(0, base + GICD_CTLR);
406         gic_dist_wait_for_rwp();
407
408         /*
409          * Configure SPIs as non-secure Group-1. This will only matter
410          * if the GIC only has a single security state. This will not
411          * do the right thing if the kernel is running in secure mode,
412          * but that's not the intended use case anyway.
413          */
414         for (i = 32; i < gic_data.irq_nr; i += 32)
415                 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
416
417         gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
418
419         /* Enable distributor with ARE, Group1 */
420         writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
421                        base + GICD_CTLR);
422
423         /*
424          * Set all global interrupts to the boot CPU only. ARE must be
425          * enabled.
426          */
427         affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
428         for (i = 32; i < gic_data.irq_nr; i++)
429                 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
430 }
431
432 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
433 {
434         int ret = -ENODEV;
435         int i;
436
437         for (i = 0; i < gic_data.nr_redist_regions; i++) {
438                 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
439                 u64 typer;
440                 u32 reg;
441
442                 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
443                 if (reg != GIC_PIDR2_ARCH_GICv3 &&
444                     reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
445                         pr_warn("No redistributor present @%p\n", ptr);
446                         break;
447                 }
448
449                 do {
450                         typer = gic_read_typer(ptr + GICR_TYPER);
451                         ret = fn(gic_data.redist_regions + i, ptr);
452                         if (!ret)
453                                 return 0;
454
455                         if (gic_data.redist_regions[i].single_redist)
456                                 break;
457
458                         if (gic_data.redist_stride) {
459                                 ptr += gic_data.redist_stride;
460                         } else {
461                                 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
462                                 if (typer & GICR_TYPER_VLPIS)
463                                         ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
464                         }
465                 } while (!(typer & GICR_TYPER_LAST));
466         }
467
468         return ret ? -ENODEV : 0;
469 }
470
471 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
472 {
473         unsigned long mpidr = cpu_logical_map(smp_processor_id());
474         u64 typer;
475         u32 aff;
476
477         /*
478          * Convert affinity to a 32bit value that can be matched to
479          * GICR_TYPER bits [63:32].
480          */
481         aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
482                MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
483                MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
484                MPIDR_AFFINITY_LEVEL(mpidr, 0));
485
486         typer = gic_read_typer(ptr + GICR_TYPER);
487         if ((typer >> 32) == aff) {
488                 u64 offset = ptr - region->redist_base;
489                 gic_data_rdist_rd_base() = ptr;
490                 gic_data_rdist()->phys_base = region->phys_base + offset;
491
492                 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
493                         smp_processor_id(), mpidr,
494                         (int)(region - gic_data.redist_regions),
495                         &gic_data_rdist()->phys_base);
496                 return 0;
497         }
498
499         /* Try next one */
500         return 1;
501 }
502
503 static int gic_populate_rdist(void)
504 {
505         if (gic_iterate_rdists(__gic_populate_rdist) == 0)
506                 return 0;
507
508         /* We couldn't even deal with ourselves... */
509         WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
510              smp_processor_id(),
511              (unsigned long)cpu_logical_map(smp_processor_id()));
512         return -ENODEV;
513 }
514
515 static int __gic_update_vlpi_properties(struct redist_region *region,
516                                         void __iomem *ptr)
517 {
518         u64 typer = gic_read_typer(ptr + GICR_TYPER);
519         gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
520         gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
521
522         return 1;
523 }
524
525 static void gic_update_vlpi_properties(void)
526 {
527         gic_iterate_rdists(__gic_update_vlpi_properties);
528         pr_info("%sVLPI support, %sdirect LPI support\n",
529                 !gic_data.rdists.has_vlpis ? "no " : "",
530                 !gic_data.rdists.has_direct_lpi ? "no " : "");
531 }
532
533 static void gic_cpu_sys_reg_init(void)
534 {
535         int i, cpu = smp_processor_id();
536         u64 mpidr = cpu_logical_map(cpu);
537         u64 need_rss = MPIDR_RS(mpidr);
538         bool group0;
539         u32 val, pribits;
540
541         /*
542          * Need to check that the SRE bit has actually been set. If
543          * not, it means that SRE is disabled at EL2. We're going to
544          * die painfully, and there is nothing we can do about it.
545          *
546          * Kindly inform the luser.
547          */
548         if (!gic_enable_sre())
549                 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
550
551         pribits = gic_read_ctlr();
552         pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
553         pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
554         pribits++;
555
556         /*
557          * Let's find out if Group0 is under control of EL3 or not by
558          * setting the highest possible, non-zero priority in PMR.
559          *
560          * If SCR_EL3.FIQ is set, the priority gets shifted down in
561          * order for the CPU interface to set bit 7, and keep the
562          * actual priority in the non-secure range. In the process, it
563          * looses the least significant bit and the actual priority
564          * becomes 0x80. Reading it back returns 0, indicating that
565          * we're don't have access to Group0.
566          */
567         write_gicreg(BIT(8 - pribits), ICC_PMR_EL1);
568         val = read_gicreg(ICC_PMR_EL1);
569         group0 = val != 0;
570
571         /* Set priority mask register */
572         write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
573
574         /*
575          * Some firmwares hand over to the kernel with the BPR changed from
576          * its reset value (and with a value large enough to prevent
577          * any pre-emptive interrupts from working at all). Writing a zero
578          * to BPR restores is reset value.
579          */
580         gic_write_bpr1(0);
581
582         if (static_branch_likely(&supports_deactivate_key)) {
583                 /* EOI drops priority only (mode 1) */
584                 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
585         } else {
586                 /* EOI deactivates interrupt too (mode 0) */
587                 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
588         }
589
590         /* Always whack Group0 before Group1 */
591         if (group0) {
592                 switch(pribits) {
593                 case 8:
594                 case 7:
595                         write_gicreg(0, ICC_AP0R3_EL1);
596                         write_gicreg(0, ICC_AP0R2_EL1);
597                 case 6:
598                         write_gicreg(0, ICC_AP0R1_EL1);
599                 case 5:
600                 case 4:
601                         write_gicreg(0, ICC_AP0R0_EL1);
602                 }
603
604                 isb();
605         }
606
607         switch(pribits) {
608         case 8:
609         case 7:
610                 write_gicreg(0, ICC_AP1R3_EL1);
611                 write_gicreg(0, ICC_AP1R2_EL1);
612         case 6:
613                 write_gicreg(0, ICC_AP1R1_EL1);
614         case 5:
615         case 4:
616                 write_gicreg(0, ICC_AP1R0_EL1);
617         }
618
619         isb();
620
621         /* ... and let's hit the road... */
622         gic_write_grpen1(1);
623
624         /* Keep the RSS capability status in per_cpu variable */
625         per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
626
627         /* Check all the CPUs have capable of sending SGIs to other CPUs */
628         for_each_online_cpu(i) {
629                 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
630
631                 need_rss |= MPIDR_RS(cpu_logical_map(i));
632                 if (need_rss && (!have_rss))
633                         pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
634                                 cpu, (unsigned long)mpidr,
635                                 i, (unsigned long)cpu_logical_map(i));
636         }
637
638         /**
639          * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
640          * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
641          * UNPREDICTABLE choice of :
642          *   - The write is ignored.
643          *   - The RS field is treated as 0.
644          */
645         if (need_rss && (!gic_data.has_rss))
646                 pr_crit_once("RSS is required but GICD doesn't support it\n");
647 }
648
649 static bool gicv3_nolpi;
650
651 static int __init gicv3_nolpi_cfg(char *buf)
652 {
653         return strtobool(buf, &gicv3_nolpi);
654 }
655 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
656
657 static int gic_dist_supports_lpis(void)
658 {
659         return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
660                 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
661                 !gicv3_nolpi);
662 }
663
664 static void gic_cpu_init(void)
665 {
666         void __iomem *rbase;
667
668         /* Register ourselves with the rest of the world */
669         if (gic_populate_rdist())
670                 return;
671
672         gic_enable_redist(true);
673
674         rbase = gic_data_rdist_sgi_base();
675
676         /* Configure SGIs/PPIs as non-secure Group-1 */
677         writel_relaxed(~0, rbase + GICR_IGROUPR0);
678
679         gic_cpu_config(rbase, gic_redist_wait_for_rwp);
680
681         /* initialise system registers */
682         gic_cpu_sys_reg_init();
683 }
684
685 #ifdef CONFIG_SMP
686
687 #define MPIDR_TO_SGI_RS(mpidr)  (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
688 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr)  ((mpidr) & ~0xFUL)
689
690 static int gic_starting_cpu(unsigned int cpu)
691 {
692         gic_cpu_init();
693
694         if (gic_dist_supports_lpis())
695                 its_cpu_init();
696
697         return 0;
698 }
699
700 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
701                                    unsigned long cluster_id)
702 {
703         int next_cpu, cpu = *base_cpu;
704         unsigned long mpidr = cpu_logical_map(cpu);
705         u16 tlist = 0;
706
707         while (cpu < nr_cpu_ids) {
708                 tlist |= 1 << (mpidr & 0xf);
709
710                 next_cpu = cpumask_next(cpu, mask);
711                 if (next_cpu >= nr_cpu_ids)
712                         goto out;
713                 cpu = next_cpu;
714
715                 mpidr = cpu_logical_map(cpu);
716
717                 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
718                         cpu--;
719                         goto out;
720                 }
721         }
722 out:
723         *base_cpu = cpu;
724         return tlist;
725 }
726
727 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
728         (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
729                 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
730
731 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
732 {
733         u64 val;
734
735         val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)     |
736                MPIDR_TO_SGI_AFFINITY(cluster_id, 2)     |
737                irq << ICC_SGI1R_SGI_ID_SHIFT            |
738                MPIDR_TO_SGI_AFFINITY(cluster_id, 1)     |
739                MPIDR_TO_SGI_RS(cluster_id)              |
740                tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
741
742         pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
743         gic_write_sgi1r(val);
744 }
745
746 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
747 {
748         int cpu;
749
750         if (WARN_ON(irq >= 16))
751                 return;
752
753         /*
754          * Ensure that stores to Normal memory are visible to the
755          * other CPUs before issuing the IPI.
756          */
757         wmb();
758
759         for_each_cpu(cpu, mask) {
760                 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
761                 u16 tlist;
762
763                 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
764                 gic_send_sgi(cluster_id, tlist, irq);
765         }
766
767         /* Force the above writes to ICC_SGI1R_EL1 to be executed */
768         isb();
769 }
770
771 static void gic_smp_init(void)
772 {
773         set_smp_cross_call(gic_raise_softirq);
774         cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
775                                   "irqchip/arm/gicv3:starting",
776                                   gic_starting_cpu, NULL);
777 }
778
779 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
780                             bool force)
781 {
782         unsigned int cpu;
783         void __iomem *reg;
784         int enabled;
785         u64 val;
786
787         if (force)
788                 cpu = cpumask_first(mask_val);
789         else
790                 cpu = cpumask_any_and(mask_val, cpu_online_mask);
791
792         if (cpu >= nr_cpu_ids)
793                 return -EINVAL;
794
795         if (gic_irq_in_rdist(d))
796                 return -EINVAL;
797
798         /* If interrupt was enabled, disable it first */
799         enabled = gic_peek_irq(d, GICD_ISENABLER);
800         if (enabled)
801                 gic_mask_irq(d);
802
803         reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
804         val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
805
806         gic_write_irouter(val, reg);
807
808         /*
809          * If the interrupt was enabled, enabled it again. Otherwise,
810          * just wait for the distributor to have digested our changes.
811          */
812         if (enabled)
813                 gic_unmask_irq(d);
814         else
815                 gic_dist_wait_for_rwp();
816
817         irq_data_update_effective_affinity(d, cpumask_of(cpu));
818
819         return IRQ_SET_MASK_OK_DONE;
820 }
821 #else
822 #define gic_set_affinity        NULL
823 #define gic_smp_init()          do { } while(0)
824 #endif
825
826 #ifdef CONFIG_CPU_PM
827 /* Check whether it's single security state view */
828 static bool gic_dist_security_disabled(void)
829 {
830         return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
831 }
832
833 static int gic_cpu_pm_notifier(struct notifier_block *self,
834                                unsigned long cmd, void *v)
835 {
836         if (cmd == CPU_PM_EXIT) {
837                 if (gic_dist_security_disabled())
838                         gic_enable_redist(true);
839                 gic_cpu_sys_reg_init();
840         } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
841                 gic_write_grpen1(0);
842                 gic_enable_redist(false);
843         }
844         return NOTIFY_OK;
845 }
846
847 static struct notifier_block gic_cpu_pm_notifier_block = {
848         .notifier_call = gic_cpu_pm_notifier,
849 };
850
851 static void gic_cpu_pm_init(void)
852 {
853         cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
854 }
855
856 #else
857 static inline void gic_cpu_pm_init(void) { }
858 #endif /* CONFIG_CPU_PM */
859
860 static struct irq_chip gic_chip = {
861         .name                   = "GICv3",
862         .irq_mask               = gic_mask_irq,
863         .irq_unmask             = gic_unmask_irq,
864         .irq_eoi                = gic_eoi_irq,
865         .irq_set_type           = gic_set_type,
866         .irq_set_affinity       = gic_set_affinity,
867         .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
868         .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
869         .flags                  = IRQCHIP_SET_TYPE_MASKED |
870                                   IRQCHIP_SKIP_SET_WAKE |
871                                   IRQCHIP_MASK_ON_SUSPEND,
872 };
873
874 static struct irq_chip gic_eoimode1_chip = {
875         .name                   = "GICv3",
876         .irq_mask               = gic_eoimode1_mask_irq,
877         .irq_unmask             = gic_unmask_irq,
878         .irq_eoi                = gic_eoimode1_eoi_irq,
879         .irq_set_type           = gic_set_type,
880         .irq_set_affinity       = gic_set_affinity,
881         .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
882         .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
883         .irq_set_vcpu_affinity  = gic_irq_set_vcpu_affinity,
884         .flags                  = IRQCHIP_SET_TYPE_MASKED |
885                                   IRQCHIP_SKIP_SET_WAKE |
886                                   IRQCHIP_MASK_ON_SUSPEND,
887 };
888
889 #define GIC_ID_NR       (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
890
891 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
892                               irq_hw_number_t hw)
893 {
894         struct irq_chip *chip = &gic_chip;
895
896         if (static_branch_likely(&supports_deactivate_key))
897                 chip = &gic_eoimode1_chip;
898
899         /* SGIs are private to the core kernel */
900         if (hw < 16)
901                 return -EPERM;
902         /* Nothing here */
903         if (hw >= gic_data.irq_nr && hw < 8192)
904                 return -EPERM;
905         /* Off limits */
906         if (hw >= GIC_ID_NR)
907                 return -EPERM;
908
909         /* PPIs */
910         if (hw < 32) {
911                 irq_set_percpu_devid(irq);
912                 irq_domain_set_info(d, irq, hw, chip, d->host_data,
913                                     handle_percpu_devid_irq, NULL, NULL);
914                 irq_set_status_flags(irq, IRQ_NOAUTOEN);
915         }
916         /* SPIs */
917         if (hw >= 32 && hw < gic_data.irq_nr) {
918                 irq_domain_set_info(d, irq, hw, chip, d->host_data,
919                                     handle_fasteoi_irq, NULL, NULL);
920                 irq_set_probe(irq);
921                 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
922         }
923         /* LPIs */
924         if (hw >= 8192 && hw < GIC_ID_NR) {
925                 if (!gic_dist_supports_lpis())
926                         return -EPERM;
927                 irq_domain_set_info(d, irq, hw, chip, d->host_data,
928                                     handle_fasteoi_irq, NULL, NULL);
929         }
930
931         return 0;
932 }
933
934 #define GIC_IRQ_TYPE_PARTITION  (GIC_IRQ_TYPE_LPI + 1)
935
936 static int gic_irq_domain_translate(struct irq_domain *d,
937                                     struct irq_fwspec *fwspec,
938                                     unsigned long *hwirq,
939                                     unsigned int *type)
940 {
941         if (is_of_node(fwspec->fwnode)) {
942                 if (fwspec->param_count < 3)
943                         return -EINVAL;
944
945                 switch (fwspec->param[0]) {
946                 case 0:                 /* SPI */
947                         *hwirq = fwspec->param[1] + 32;
948                         break;
949                 case 1:                 /* PPI */
950                 case GIC_IRQ_TYPE_PARTITION:
951                         *hwirq = fwspec->param[1] + 16;
952                         break;
953                 case GIC_IRQ_TYPE_LPI:  /* LPI */
954                         *hwirq = fwspec->param[1];
955                         break;
956                 default:
957                         return -EINVAL;
958                 }
959
960                 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
961
962                 /*
963                  * Make it clear that broken DTs are... broken.
964                  * Partitionned PPIs are an unfortunate exception.
965                  */
966                 WARN_ON(*type == IRQ_TYPE_NONE &&
967                         fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
968                 return 0;
969         }
970
971         if (is_fwnode_irqchip(fwspec->fwnode)) {
972                 if(fwspec->param_count != 2)
973                         return -EINVAL;
974
975                 *hwirq = fwspec->param[0];
976                 *type = fwspec->param[1];
977
978                 WARN_ON(*type == IRQ_TYPE_NONE);
979                 return 0;
980         }
981
982         return -EINVAL;
983 }
984
985 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
986                                 unsigned int nr_irqs, void *arg)
987 {
988         int i, ret;
989         irq_hw_number_t hwirq;
990         unsigned int type = IRQ_TYPE_NONE;
991         struct irq_fwspec *fwspec = arg;
992
993         ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
994         if (ret)
995                 return ret;
996
997         for (i = 0; i < nr_irqs; i++) {
998                 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
999                 if (ret)
1000                         return ret;
1001         }
1002
1003         return 0;
1004 }
1005
1006 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1007                                 unsigned int nr_irqs)
1008 {
1009         int i;
1010
1011         for (i = 0; i < nr_irqs; i++) {
1012                 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1013                 irq_set_handler(virq + i, NULL);
1014                 irq_domain_reset_irq_data(d);
1015         }
1016 }
1017
1018 static int gic_irq_domain_select(struct irq_domain *d,
1019                                  struct irq_fwspec *fwspec,
1020                                  enum irq_domain_bus_token bus_token)
1021 {
1022         /* Not for us */
1023         if (fwspec->fwnode != d->fwnode)
1024                 return 0;
1025
1026         /* If this is not DT, then we have a single domain */
1027         if (!is_of_node(fwspec->fwnode))
1028                 return 1;
1029
1030         /*
1031          * If this is a PPI and we have a 4th (non-null) parameter,
1032          * then we need to match the partition domain.
1033          */
1034         if (fwspec->param_count >= 4 &&
1035             fwspec->param[0] == 1 && fwspec->param[3] != 0)
1036                 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
1037
1038         return d == gic_data.domain;
1039 }
1040
1041 static const struct irq_domain_ops gic_irq_domain_ops = {
1042         .translate = gic_irq_domain_translate,
1043         .alloc = gic_irq_domain_alloc,
1044         .free = gic_irq_domain_free,
1045         .select = gic_irq_domain_select,
1046 };
1047
1048 static int partition_domain_translate(struct irq_domain *d,
1049                                       struct irq_fwspec *fwspec,
1050                                       unsigned long *hwirq,
1051                                       unsigned int *type)
1052 {
1053         struct device_node *np;
1054         int ret;
1055
1056         np = of_find_node_by_phandle(fwspec->param[3]);
1057         if (WARN_ON(!np))
1058                 return -EINVAL;
1059
1060         ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
1061                                      of_node_to_fwnode(np));
1062         if (ret < 0)
1063                 return ret;
1064
1065         *hwirq = ret;
1066         *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1067
1068         return 0;
1069 }
1070
1071 static const struct irq_domain_ops partition_domain_ops = {
1072         .translate = partition_domain_translate,
1073         .select = gic_irq_domain_select,
1074 };
1075
1076 static bool gic_enable_quirk_msm8996(void *data)
1077 {
1078         struct gic_chip_data *d = data;
1079
1080         d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1081
1082         return true;
1083 }
1084
1085 static int __init gic_init_bases(void __iomem *dist_base,
1086                                  struct redist_region *rdist_regs,
1087                                  u32 nr_redist_regions,
1088                                  u64 redist_stride,
1089                                  struct fwnode_handle *handle)
1090 {
1091         u32 typer;
1092         int gic_irqs;
1093         int err;
1094
1095         if (!is_hyp_mode_available())
1096                 static_branch_disable(&supports_deactivate_key);
1097
1098         if (static_branch_likely(&supports_deactivate_key))
1099                 pr_info("GIC: Using split EOI/Deactivate mode\n");
1100
1101         gic_data.fwnode = handle;
1102         gic_data.dist_base = dist_base;
1103         gic_data.redist_regions = rdist_regs;
1104         gic_data.nr_redist_regions = nr_redist_regions;
1105         gic_data.redist_stride = redist_stride;
1106
1107         /*
1108          * Find out how many interrupts are supported.
1109          * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
1110          */
1111         typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1112         gic_data.rdists.gicd_typer = typer;
1113         gic_irqs = GICD_TYPER_IRQS(typer);
1114         if (gic_irqs > 1020)
1115                 gic_irqs = 1020;
1116         gic_data.irq_nr = gic_irqs;
1117
1118         gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1119                                                  &gic_data);
1120         irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1121         gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1122         gic_data.rdists.has_vlpis = true;
1123         gic_data.rdists.has_direct_lpi = true;
1124
1125         if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1126                 err = -ENOMEM;
1127                 goto out_free;
1128         }
1129
1130         gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1131         pr_info("Distributor has %sRange Selector support\n",
1132                 gic_data.has_rss ? "" : "no ");
1133
1134         if (typer & GICD_TYPER_MBIS) {
1135                 err = mbi_init(handle, gic_data.domain);
1136                 if (err)
1137                         pr_err("Failed to initialize MBIs\n");
1138         }
1139
1140         set_handle_irq(gic_handle_irq);
1141
1142         gic_update_vlpi_properties();
1143
1144         gic_smp_init();
1145         gic_dist_init();
1146         gic_cpu_init();
1147         gic_cpu_pm_init();
1148
1149         if (gic_dist_supports_lpis()) {
1150                 its_init(handle, &gic_data.rdists, gic_data.domain);
1151                 its_cpu_init();
1152         }
1153
1154         return 0;
1155
1156 out_free:
1157         if (gic_data.domain)
1158                 irq_domain_remove(gic_data.domain);
1159         free_percpu(gic_data.rdists.rdist);
1160         return err;
1161 }
1162
1163 static int __init gic_validate_dist_version(void __iomem *dist_base)
1164 {
1165         u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1166
1167         if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1168                 return -ENODEV;
1169
1170         return 0;
1171 }
1172
1173 /* Create all possible partitions at boot time */
1174 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1175 {
1176         struct device_node *parts_node, *child_part;
1177         int part_idx = 0, i;
1178         int nr_parts;
1179         struct partition_affinity *parts;
1180
1181         parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1182         if (!parts_node)
1183                 return;
1184
1185         nr_parts = of_get_child_count(parts_node);
1186
1187         if (!nr_parts)
1188                 goto out_put_node;
1189
1190         parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1191         if (WARN_ON(!parts))
1192                 goto out_put_node;
1193
1194         for_each_child_of_node(parts_node, child_part) {
1195                 struct partition_affinity *part;
1196                 int n;
1197
1198                 part = &parts[part_idx];
1199
1200                 part->partition_id = of_node_to_fwnode(child_part);
1201
1202                 pr_info("GIC: PPI partition %pOFn[%d] { ",
1203                         child_part, part_idx);
1204
1205                 n = of_property_count_elems_of_size(child_part, "affinity",
1206                                                     sizeof(u32));
1207                 WARN_ON(n <= 0);
1208
1209                 for (i = 0; i < n; i++) {
1210                         int err, cpu;
1211                         u32 cpu_phandle;
1212                         struct device_node *cpu_node;
1213
1214                         err = of_property_read_u32_index(child_part, "affinity",
1215                                                          i, &cpu_phandle);
1216                         if (WARN_ON(err))
1217                                 continue;
1218
1219                         cpu_node = of_find_node_by_phandle(cpu_phandle);
1220                         if (WARN_ON(!cpu_node))
1221                                 continue;
1222
1223                         cpu = of_cpu_node_to_id(cpu_node);
1224                         if (WARN_ON(cpu < 0))
1225                                 continue;
1226
1227                         pr_cont("%pOF[%d] ", cpu_node, cpu);
1228
1229                         cpumask_set_cpu(cpu, &part->mask);
1230                 }
1231
1232                 pr_cont("}\n");
1233                 part_idx++;
1234         }
1235
1236         for (i = 0; i < 16; i++) {
1237                 unsigned int irq;
1238                 struct partition_desc *desc;
1239                 struct irq_fwspec ppi_fwspec = {
1240                         .fwnode         = gic_data.fwnode,
1241                         .param_count    = 3,
1242                         .param          = {
1243                                 [0]     = GIC_IRQ_TYPE_PARTITION,
1244                                 [1]     = i,
1245                                 [2]     = IRQ_TYPE_NONE,
1246                         },
1247                 };
1248
1249                 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1250                 if (WARN_ON(!irq))
1251                         continue;
1252                 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1253                                              irq, &partition_domain_ops);
1254                 if (WARN_ON(!desc))
1255                         continue;
1256
1257                 gic_data.ppi_descs[i] = desc;
1258         }
1259
1260 out_put_node:
1261         of_node_put(parts_node);
1262 }
1263
1264 static void __init gic_of_setup_kvm_info(struct device_node *node)
1265 {
1266         int ret;
1267         struct resource r;
1268         u32 gicv_idx;
1269
1270         gic_v3_kvm_info.type = GIC_V3;
1271
1272         gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1273         if (!gic_v3_kvm_info.maint_irq)
1274                 return;
1275
1276         if (of_property_read_u32(node, "#redistributor-regions",
1277                                  &gicv_idx))
1278                 gicv_idx = 1;
1279
1280         gicv_idx += 3;  /* Also skip GICD, GICC, GICH */
1281         ret = of_address_to_resource(node, gicv_idx, &r);
1282         if (!ret)
1283                 gic_v3_kvm_info.vcpu = r;
1284
1285         gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1286         gic_set_kvm_info(&gic_v3_kvm_info);
1287 }
1288
1289 static const struct gic_quirk gic_quirks[] = {
1290         {
1291                 .desc   = "GICv3: Qualcomm MSM8996 broken firmware",
1292                 .compatible = "qcom,msm8996-gic-v3",
1293                 .init   = gic_enable_quirk_msm8996,
1294         },
1295         {
1296         }
1297 };
1298
1299 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1300 {
1301         void __iomem *dist_base;
1302         struct redist_region *rdist_regs;
1303         u64 redist_stride;
1304         u32 nr_redist_regions;
1305         int err, i;
1306
1307         dist_base = of_iomap(node, 0);
1308         if (!dist_base) {
1309                 pr_err("%pOF: unable to map gic dist registers\n", node);
1310                 return -ENXIO;
1311         }
1312
1313         err = gic_validate_dist_version(dist_base);
1314         if (err) {
1315                 pr_err("%pOF: no distributor detected, giving up\n", node);
1316                 goto out_unmap_dist;
1317         }
1318
1319         if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1320                 nr_redist_regions = 1;
1321
1322         rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
1323                              GFP_KERNEL);
1324         if (!rdist_regs) {
1325                 err = -ENOMEM;
1326                 goto out_unmap_dist;
1327         }
1328
1329         for (i = 0; i < nr_redist_regions; i++) {
1330                 struct resource res;
1331                 int ret;
1332
1333                 ret = of_address_to_resource(node, 1 + i, &res);
1334                 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1335                 if (ret || !rdist_regs[i].redist_base) {
1336                         pr_err("%pOF: couldn't map region %d\n", node, i);
1337                         err = -ENODEV;
1338                         goto out_unmap_rdist;
1339                 }
1340                 rdist_regs[i].phys_base = res.start;
1341         }
1342
1343         if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1344                 redist_stride = 0;
1345
1346         gic_enable_of_quirks(node, gic_quirks, &gic_data);
1347
1348         err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1349                              redist_stride, &node->fwnode);
1350         if (err)
1351                 goto out_unmap_rdist;
1352
1353         gic_populate_ppi_partitions(node);
1354
1355         if (static_branch_likely(&supports_deactivate_key))
1356                 gic_of_setup_kvm_info(node);
1357         return 0;
1358
1359 out_unmap_rdist:
1360         for (i = 0; i < nr_redist_regions; i++)
1361                 if (rdist_regs[i].redist_base)
1362                         iounmap(rdist_regs[i].redist_base);
1363         kfree(rdist_regs);
1364 out_unmap_dist:
1365         iounmap(dist_base);
1366         return err;
1367 }
1368
1369 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
1370
1371 #ifdef CONFIG_ACPI
1372 static struct
1373 {
1374         void __iomem *dist_base;
1375         struct redist_region *redist_regs;
1376         u32 nr_redist_regions;
1377         bool single_redist;
1378         u32 maint_irq;
1379         int maint_irq_mode;
1380         phys_addr_t vcpu_base;
1381 } acpi_data __initdata;
1382
1383 static void __init
1384 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1385 {
1386         static int count = 0;
1387
1388         acpi_data.redist_regs[count].phys_base = phys_base;
1389         acpi_data.redist_regs[count].redist_base = redist_base;
1390         acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
1391         count++;
1392 }
1393
1394 static int __init
1395 gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
1396                            const unsigned long end)
1397 {
1398         struct acpi_madt_generic_redistributor *redist =
1399                         (struct acpi_madt_generic_redistributor *)header;
1400         void __iomem *redist_base;
1401
1402         redist_base = ioremap(redist->base_address, redist->length);
1403         if (!redist_base) {
1404                 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1405                 return -ENOMEM;
1406         }
1407
1408         gic_acpi_register_redist(redist->base_address, redist_base);
1409         return 0;
1410 }
1411
1412 static int __init
1413 gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
1414                          const unsigned long end)
1415 {
1416         struct acpi_madt_generic_interrupt *gicc =
1417                                 (struct acpi_madt_generic_interrupt *)header;
1418         u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1419         u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1420         void __iomem *redist_base;
1421
1422         /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
1423         if (!(gicc->flags & ACPI_MADT_ENABLED))
1424                 return 0;
1425
1426         redist_base = ioremap(gicc->gicr_base_address, size);
1427         if (!redist_base)
1428                 return -ENOMEM;
1429
1430         gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1431         return 0;
1432 }
1433
1434 static int __init gic_acpi_collect_gicr_base(void)
1435 {
1436         acpi_tbl_entry_handler redist_parser;
1437         enum acpi_madt_type type;
1438
1439         if (acpi_data.single_redist) {
1440                 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
1441                 redist_parser = gic_acpi_parse_madt_gicc;
1442         } else {
1443                 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
1444                 redist_parser = gic_acpi_parse_madt_redist;
1445         }
1446
1447         /* Collect redistributor base addresses in GICR entries */
1448         if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
1449                 return 0;
1450
1451         pr_info("No valid GICR entries exist\n");
1452         return -ENODEV;
1453 }
1454
1455 static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
1456                                   const unsigned long end)
1457 {
1458         /* Subtable presence means that redist exists, that's it */
1459         return 0;
1460 }
1461
1462 static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
1463                                       const unsigned long end)
1464 {
1465         struct acpi_madt_generic_interrupt *gicc =
1466                                 (struct acpi_madt_generic_interrupt *)header;
1467
1468         /*
1469          * If GICC is enabled and has valid gicr base address, then it means
1470          * GICR base is presented via GICC
1471          */
1472         if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
1473                 return 0;
1474
1475         /*
1476          * It's perfectly valid firmware can pass disabled GICC entry, driver
1477          * should not treat as errors, skip the entry instead of probe fail.
1478          */
1479         if (!(gicc->flags & ACPI_MADT_ENABLED))
1480                 return 0;
1481
1482         return -ENODEV;
1483 }
1484
1485 static int __init gic_acpi_count_gicr_regions(void)
1486 {
1487         int count;
1488
1489         /*
1490          * Count how many redistributor regions we have. It is not allowed
1491          * to mix redistributor description, GICR and GICC subtables have to be
1492          * mutually exclusive.
1493          */
1494         count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1495                                       gic_acpi_match_gicr, 0);
1496         if (count > 0) {
1497                 acpi_data.single_redist = false;
1498                 return count;
1499         }
1500
1501         count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1502                                       gic_acpi_match_gicc, 0);
1503         if (count > 0)
1504                 acpi_data.single_redist = true;
1505
1506         return count;
1507 }
1508
1509 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
1510                                            struct acpi_probe_entry *ape)
1511 {
1512         struct acpi_madt_generic_distributor *dist;
1513         int count;
1514
1515         dist = (struct acpi_madt_generic_distributor *)header;
1516         if (dist->version != ape->driver_data)
1517                 return false;
1518
1519         /* We need to do that exercise anyway, the sooner the better */
1520         count = gic_acpi_count_gicr_regions();
1521         if (count <= 0)
1522                 return false;
1523
1524         acpi_data.nr_redist_regions = count;
1525         return true;
1526 }
1527
1528 static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
1529                                                 const unsigned long end)
1530 {
1531         struct acpi_madt_generic_interrupt *gicc =
1532                 (struct acpi_madt_generic_interrupt *)header;
1533         int maint_irq_mode;
1534         static int first_madt = true;
1535
1536         /* Skip unusable CPUs */
1537         if (!(gicc->flags & ACPI_MADT_ENABLED))
1538                 return 0;
1539
1540         maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1541                 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1542
1543         if (first_madt) {
1544                 first_madt = false;
1545
1546                 acpi_data.maint_irq = gicc->vgic_interrupt;
1547                 acpi_data.maint_irq_mode = maint_irq_mode;
1548                 acpi_data.vcpu_base = gicc->gicv_base_address;
1549
1550                 return 0;
1551         }
1552
1553         /*
1554          * The maintenance interrupt and GICV should be the same for every CPU
1555          */
1556         if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
1557             (acpi_data.maint_irq_mode != maint_irq_mode) ||
1558             (acpi_data.vcpu_base != gicc->gicv_base_address))
1559                 return -EINVAL;
1560
1561         return 0;
1562 }
1563
1564 static bool __init gic_acpi_collect_virt_info(void)
1565 {
1566         int count;
1567
1568         count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1569                                       gic_acpi_parse_virt_madt_gicc, 0);
1570
1571         return (count > 0);
1572 }
1573
1574 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
1575 #define ACPI_GICV2_VCTRL_MEM_SIZE       (SZ_4K)
1576 #define ACPI_GICV2_VCPU_MEM_SIZE        (SZ_8K)
1577
1578 static void __init gic_acpi_setup_kvm_info(void)
1579 {
1580         int irq;
1581
1582         if (!gic_acpi_collect_virt_info()) {
1583                 pr_warn("Unable to get hardware information used for virtualization\n");
1584                 return;
1585         }
1586
1587         gic_v3_kvm_info.type = GIC_V3;
1588
1589         irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
1590                                 acpi_data.maint_irq_mode,
1591                                 ACPI_ACTIVE_HIGH);
1592         if (irq <= 0)
1593                 return;
1594
1595         gic_v3_kvm_info.maint_irq = irq;
1596
1597         if (acpi_data.vcpu_base) {
1598                 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
1599
1600                 vcpu->flags = IORESOURCE_MEM;
1601                 vcpu->start = acpi_data.vcpu_base;
1602                 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1603         }
1604
1605         gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1606         gic_set_kvm_info(&gic_v3_kvm_info);
1607 }
1608
1609 static int __init
1610 gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
1611 {
1612         struct acpi_madt_generic_distributor *dist;
1613         struct fwnode_handle *domain_handle;
1614         size_t size;
1615         int i, err;
1616
1617         /* Get distributor base address */
1618         dist = (struct acpi_madt_generic_distributor *)header;
1619         acpi_data.dist_base = ioremap(dist->base_address,
1620                                       ACPI_GICV3_DIST_MEM_SIZE);
1621         if (!acpi_data.dist_base) {
1622                 pr_err("Unable to map GICD registers\n");
1623                 return -ENOMEM;
1624         }
1625
1626         err = gic_validate_dist_version(acpi_data.dist_base);
1627         if (err) {
1628                 pr_err("No distributor detected at @%p, giving up\n",
1629                        acpi_data.dist_base);
1630                 goto out_dist_unmap;
1631         }
1632
1633         size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
1634         acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
1635         if (!acpi_data.redist_regs) {
1636                 err = -ENOMEM;
1637                 goto out_dist_unmap;
1638         }
1639
1640         err = gic_acpi_collect_gicr_base();
1641         if (err)
1642                 goto out_redist_unmap;
1643
1644         domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
1645         if (!domain_handle) {
1646                 err = -ENOMEM;
1647                 goto out_redist_unmap;
1648         }
1649
1650         err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
1651                              acpi_data.nr_redist_regions, 0, domain_handle);
1652         if (err)
1653                 goto out_fwhandle_free;
1654
1655         acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
1656
1657         if (static_branch_likely(&supports_deactivate_key))
1658                 gic_acpi_setup_kvm_info();
1659
1660         return 0;
1661
1662 out_fwhandle_free:
1663         irq_domain_free_fwnode(domain_handle);
1664 out_redist_unmap:
1665         for (i = 0; i < acpi_data.nr_redist_regions; i++)
1666                 if (acpi_data.redist_regs[i].redist_base)
1667                         iounmap(acpi_data.redist_regs[i].redist_base);
1668         kfree(acpi_data.redist_regs);
1669 out_dist_unmap:
1670         iounmap(acpi_data.dist_base);
1671         return err;
1672 }
1673 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1674                      acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
1675                      gic_acpi_init);
1676 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1677                      acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
1678                      gic_acpi_init);
1679 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1680                      acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
1681                      gic_acpi_init);
1682 #endif