2 * linux/arch/arm/common/gic.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Interrupt architecture for the GIC:
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/err.h>
28 #include <linux/module.h>
29 #include <linux/list.h>
30 #include <linux/smp.h>
31 #include <linux/cpu_pm.h>
32 #include <linux/cpumask.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/irqdomain.h>
38 #include <linux/interrupt.h>
39 #include <linux/percpu.h>
40 #include <linux/slab.h>
41 #include <linux/irqchip/arm-gic.h>
44 #include <asm/exception.h>
45 #include <asm/smp_plat.h>
46 #include <asm/mach/irq.h>
51 void __iomem *common_base;
52 void __percpu __iomem **percpu_base;
55 struct gic_chip_data {
56 union gic_base dist_base;
57 union gic_base cpu_base;
59 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
60 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
61 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
62 u32 __percpu *saved_ppi_enable;
63 u32 __percpu *saved_ppi_conf;
65 struct irq_domain *domain;
66 unsigned int gic_irqs;
67 #ifdef CONFIG_GIC_NON_BANKED
68 void __iomem *(*get_base)(union gic_base *);
72 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
75 * The GIC mapping of CPU interfaces does not necessarily match
76 * the logical CPU numbering. Let's use a mapping as returned
79 #define NR_GIC_CPU_IF 8
80 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
83 * Supported arch specific GIC irq extension.
84 * Default make them NULL.
86 struct irq_chip gic_arch_extn = {
90 .irq_retrigger = NULL,
99 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
101 #ifdef CONFIG_GIC_NON_BANKED
102 static void __iomem *gic_get_percpu_base(union gic_base *base)
104 return *__this_cpu_ptr(base->percpu_base);
107 static void __iomem *gic_get_common_base(union gic_base *base)
109 return base->common_base;
112 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
114 return data->get_base(&data->dist_base);
117 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
119 return data->get_base(&data->cpu_base);
122 static inline void gic_set_base_accessor(struct gic_chip_data *data,
123 void __iomem *(*f)(union gic_base *))
128 #define gic_data_dist_base(d) ((d)->dist_base.common_base)
129 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
130 #define gic_set_base_accessor(d,f)
133 static inline void __iomem *gic_dist_base(struct irq_data *d)
135 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
136 return gic_data_dist_base(gic_data);
139 static inline void __iomem *gic_cpu_base(struct irq_data *d)
141 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
142 return gic_data_cpu_base(gic_data);
145 static inline unsigned int gic_irq(struct irq_data *d)
151 * Routines to acknowledge, disable and enable interrupts
153 static void gic_mask_irq(struct irq_data *d)
155 u32 mask = 1 << (gic_irq(d) % 32);
157 raw_spin_lock(&irq_controller_lock);
158 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
159 if (gic_arch_extn.irq_mask)
160 gic_arch_extn.irq_mask(d);
161 raw_spin_unlock(&irq_controller_lock);
164 static void gic_unmask_irq(struct irq_data *d)
166 u32 mask = 1 << (gic_irq(d) % 32);
168 raw_spin_lock(&irq_controller_lock);
169 if (gic_arch_extn.irq_unmask)
170 gic_arch_extn.irq_unmask(d);
171 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
172 raw_spin_unlock(&irq_controller_lock);
175 static void gic_eoi_irq(struct irq_data *d)
177 if (gic_arch_extn.irq_eoi) {
178 raw_spin_lock(&irq_controller_lock);
179 gic_arch_extn.irq_eoi(d);
180 raw_spin_unlock(&irq_controller_lock);
183 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
186 static int gic_set_type(struct irq_data *d, unsigned int type)
188 void __iomem *base = gic_dist_base(d);
189 unsigned int gicirq = gic_irq(d);
190 u32 enablemask = 1 << (gicirq % 32);
191 u32 enableoff = (gicirq / 32) * 4;
192 u32 confmask = 0x2 << ((gicirq % 16) * 2);
193 u32 confoff = (gicirq / 16) * 4;
194 bool enabled = false;
197 /* Interrupt configuration for SGIs can't be changed */
201 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
204 raw_spin_lock(&irq_controller_lock);
206 if (gic_arch_extn.irq_set_type)
207 gic_arch_extn.irq_set_type(d, type);
209 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
210 if (type == IRQ_TYPE_LEVEL_HIGH)
212 else if (type == IRQ_TYPE_EDGE_RISING)
216 * As recommended by the spec, disable the interrupt before changing
219 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
220 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
224 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
227 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
229 raw_spin_unlock(&irq_controller_lock);
234 static int gic_retrigger(struct irq_data *d)
236 if (gic_arch_extn.irq_retrigger)
237 return gic_arch_extn.irq_retrigger(d);
243 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
246 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
247 unsigned int shift = (gic_irq(d) % 4) * 8;
248 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
251 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
254 mask = 0xff << shift;
255 bit = gic_cpu_map[cpu] << shift;
257 raw_spin_lock(&irq_controller_lock);
258 val = readl_relaxed(reg) & ~mask;
259 writel_relaxed(val | bit, reg);
260 raw_spin_unlock(&irq_controller_lock);
262 return IRQ_SET_MASK_OK;
267 static int gic_set_wake(struct irq_data *d, unsigned int on)
271 if (gic_arch_extn.irq_set_wake)
272 ret = gic_arch_extn.irq_set_wake(d, on);
278 #define gic_set_wake NULL
281 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
284 struct gic_chip_data *gic = &gic_data[0];
285 void __iomem *cpu_base = gic_data_cpu_base(gic);
288 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
289 irqnr = irqstat & ~0x1c00;
291 if (likely(irqnr > 15 && irqnr < 1021)) {
292 irqnr = irq_find_mapping(gic->domain, irqnr);
293 handle_IRQ(irqnr, regs);
297 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
299 handle_IPI(irqnr, regs);
307 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
309 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
310 struct irq_chip *chip = irq_get_chip(irq);
311 unsigned int cascade_irq, gic_irq;
312 unsigned long status;
314 chained_irq_enter(chip, desc);
316 raw_spin_lock(&irq_controller_lock);
317 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
318 raw_spin_unlock(&irq_controller_lock);
320 gic_irq = (status & 0x3ff);
324 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
325 if (unlikely(gic_irq < 32 || gic_irq > 1020))
326 do_bad_IRQ(cascade_irq, desc);
328 generic_handle_irq(cascade_irq);
331 chained_irq_exit(chip, desc);
334 static struct irq_chip gic_chip = {
336 .irq_mask = gic_mask_irq,
337 .irq_unmask = gic_unmask_irq,
338 .irq_eoi = gic_eoi_irq,
339 .irq_set_type = gic_set_type,
340 .irq_retrigger = gic_retrigger,
342 .irq_set_affinity = gic_set_affinity,
344 .irq_set_wake = gic_set_wake,
347 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
349 if (gic_nr >= MAX_GIC_NR)
351 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
353 irq_set_chained_handler(irq, gic_handle_cascade_irq);
356 static u8 gic_get_cpumask(struct gic_chip_data *gic)
358 void __iomem *base = gic_data_dist_base(gic);
361 for (i = mask = 0; i < 32; i += 4) {
362 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
370 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
375 static void __init gic_dist_init(struct gic_chip_data *gic)
379 unsigned int gic_irqs = gic->gic_irqs;
380 void __iomem *base = gic_data_dist_base(gic);
382 writel_relaxed(0, base + GIC_DIST_CTRL);
385 * Set all global interrupts to be level triggered, active low.
387 for (i = 32; i < gic_irqs; i += 16)
388 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
391 * Set all global interrupts to this CPU only.
393 cpumask = gic_get_cpumask(gic);
394 cpumask |= cpumask << 8;
395 cpumask |= cpumask << 16;
396 for (i = 32; i < gic_irqs; i += 4)
397 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
400 * Set priority on all global interrupts.
402 for (i = 32; i < gic_irqs; i += 4)
403 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
406 * Disable all interrupts. Leave the PPI and SGIs alone
407 * as these enables are banked registers.
409 for (i = 32; i < gic_irqs; i += 32)
410 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
412 writel_relaxed(1, base + GIC_DIST_CTRL);
415 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
417 void __iomem *dist_base = gic_data_dist_base(gic);
418 void __iomem *base = gic_data_cpu_base(gic);
419 unsigned int cpu_mask, cpu = smp_processor_id();
423 * Get what the GIC says our CPU mask is.
425 BUG_ON(cpu >= NR_GIC_CPU_IF);
426 cpu_mask = gic_get_cpumask(gic);
427 gic_cpu_map[cpu] = cpu_mask;
430 * Clear our mask from the other map entries in case they're
433 for (i = 0; i < NR_GIC_CPU_IF; i++)
435 gic_cpu_map[i] &= ~cpu_mask;
438 * Deal with the banked PPI and SGI interrupts - disable all
439 * PPI interrupts, ensure all SGI interrupts are enabled.
441 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
442 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
445 * Set priority on PPI and SGI interrupts
447 for (i = 0; i < 32; i += 4)
448 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
450 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
451 writel_relaxed(1, base + GIC_CPU_CTRL);
456 * Saves the GIC distributor registers during suspend or idle. Must be called
457 * with interrupts disabled but before powering down the GIC. After calling
458 * this function, no interrupts will be delivered by the GIC, and another
459 * platform-specific wakeup source must be enabled.
461 static void gic_dist_save(unsigned int gic_nr)
463 unsigned int gic_irqs;
464 void __iomem *dist_base;
467 if (gic_nr >= MAX_GIC_NR)
470 gic_irqs = gic_data[gic_nr].gic_irqs;
471 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
476 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
477 gic_data[gic_nr].saved_spi_conf[i] =
478 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
480 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
481 gic_data[gic_nr].saved_spi_target[i] =
482 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
484 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
485 gic_data[gic_nr].saved_spi_enable[i] =
486 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
490 * Restores the GIC distributor registers during resume or when coming out of
491 * idle. Must be called before enabling interrupts. If a level interrupt
492 * that occured while the GIC was suspended is still present, it will be
493 * handled normally, but any edge interrupts that occured will not be seen by
494 * the GIC and need to be handled by the platform-specific wakeup source.
496 static void gic_dist_restore(unsigned int gic_nr)
498 unsigned int gic_irqs;
500 void __iomem *dist_base;
502 if (gic_nr >= MAX_GIC_NR)
505 gic_irqs = gic_data[gic_nr].gic_irqs;
506 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
511 writel_relaxed(0, dist_base + GIC_DIST_CTRL);
513 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
514 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
515 dist_base + GIC_DIST_CONFIG + i * 4);
517 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
518 writel_relaxed(0xa0a0a0a0,
519 dist_base + GIC_DIST_PRI + i * 4);
521 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
522 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
523 dist_base + GIC_DIST_TARGET + i * 4);
525 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
526 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
527 dist_base + GIC_DIST_ENABLE_SET + i * 4);
529 writel_relaxed(1, dist_base + GIC_DIST_CTRL);
532 static void gic_cpu_save(unsigned int gic_nr)
536 void __iomem *dist_base;
537 void __iomem *cpu_base;
539 if (gic_nr >= MAX_GIC_NR)
542 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
543 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
545 if (!dist_base || !cpu_base)
548 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
549 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
550 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
552 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
553 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
554 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
558 static void gic_cpu_restore(unsigned int gic_nr)
562 void __iomem *dist_base;
563 void __iomem *cpu_base;
565 if (gic_nr >= MAX_GIC_NR)
568 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
569 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
571 if (!dist_base || !cpu_base)
574 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
575 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
576 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
578 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
579 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
580 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
582 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
583 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
585 writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
586 writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
589 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
593 for (i = 0; i < MAX_GIC_NR; i++) {
594 #ifdef CONFIG_GIC_NON_BANKED
595 /* Skip over unused GICs */
596 if (!gic_data[i].get_base)
603 case CPU_PM_ENTER_FAILED:
607 case CPU_CLUSTER_PM_ENTER:
610 case CPU_CLUSTER_PM_ENTER_FAILED:
611 case CPU_CLUSTER_PM_EXIT:
620 static struct notifier_block gic_notifier_block = {
621 .notifier_call = gic_notifier,
624 static void __init gic_pm_init(struct gic_chip_data *gic)
626 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
628 BUG_ON(!gic->saved_ppi_enable);
630 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
632 BUG_ON(!gic->saved_ppi_conf);
634 if (gic == &gic_data[0])
635 cpu_pm_register_notifier(&gic_notifier_block);
638 static void __init gic_pm_init(struct gic_chip_data *gic)
644 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
647 unsigned long map = 0;
649 /* Convert our logical CPU mask into a physical one. */
650 for_each_cpu(cpu, mask)
651 map |= 1 << cpu_logical_map(cpu);
654 * Ensure that stores to Normal memory are visible to the
655 * other CPUs before issuing the IPI.
659 /* this always happens on GIC0 */
660 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
664 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
668 irq_set_percpu_devid(irq);
669 irq_set_chip_and_handler(irq, &gic_chip,
670 handle_percpu_devid_irq);
671 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
673 irq_set_chip_and_handler(irq, &gic_chip,
675 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
677 irq_set_chip_data(irq, d->host_data);
681 static int gic_irq_domain_xlate(struct irq_domain *d,
682 struct device_node *controller,
683 const u32 *intspec, unsigned int intsize,
684 unsigned long *out_hwirq, unsigned int *out_type)
686 if (d->of_node != controller)
691 /* Get the interrupt number and add 16 to skip over SGIs */
692 *out_hwirq = intspec[1] + 16;
694 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
698 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
702 const struct irq_domain_ops gic_irq_domain_ops = {
703 .map = gic_irq_domain_map,
704 .xlate = gic_irq_domain_xlate,
707 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
708 void __iomem *dist_base, void __iomem *cpu_base,
709 u32 percpu_offset, struct device_node *node)
711 irq_hw_number_t hwirq_base;
712 struct gic_chip_data *gic;
713 int gic_irqs, irq_base, i;
715 BUG_ON(gic_nr >= MAX_GIC_NR);
717 gic = &gic_data[gic_nr];
718 #ifdef CONFIG_GIC_NON_BANKED
719 if (percpu_offset) { /* Frankein-GIC without banked registers... */
722 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
723 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
724 if (WARN_ON(!gic->dist_base.percpu_base ||
725 !gic->cpu_base.percpu_base)) {
726 free_percpu(gic->dist_base.percpu_base);
727 free_percpu(gic->cpu_base.percpu_base);
731 for_each_possible_cpu(cpu) {
732 unsigned long offset = percpu_offset * cpu_logical_map(cpu);
733 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
734 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
737 gic_set_base_accessor(gic, gic_get_percpu_base);
740 { /* Normal, sane GIC... */
742 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
744 gic->dist_base.common_base = dist_base;
745 gic->cpu_base.common_base = cpu_base;
746 gic_set_base_accessor(gic, gic_get_common_base);
750 * Initialize the CPU interface map to all CPUs.
751 * It will be refined as each CPU probes its ID.
753 for (i = 0; i < NR_GIC_CPU_IF; i++)
754 gic_cpu_map[i] = 0xff;
757 * For primary GICs, skip over SGIs.
758 * For secondary GICs, skip over PPIs, too.
760 if (gic_nr == 0 && (irq_start & 31) > 0) {
763 irq_start = (irq_start & ~31) + 16;
769 * Find out how many interrupts are supported.
770 * The GIC only supports up to 1020 interrupt sources.
772 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
773 gic_irqs = (gic_irqs + 1) * 32;
776 gic->gic_irqs = gic_irqs;
778 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
779 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
780 if (IS_ERR_VALUE(irq_base)) {
781 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
783 irq_base = irq_start;
785 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
786 hwirq_base, &gic_irq_domain_ops, gic);
787 if (WARN_ON(!gic->domain))
791 set_smp_cross_call(gic_raise_softirq);
794 set_handle_irq(gic_handle_irq);
796 gic_chip.flags |= gic_arch_extn.flags;
802 void __cpuinit gic_secondary_init(unsigned int gic_nr)
804 BUG_ON(gic_nr >= MAX_GIC_NR);
806 gic_cpu_init(&gic_data[gic_nr]);
810 static int gic_cnt __initdata = 0;
812 int __init gic_of_init(struct device_node *node, struct device_node *parent)
814 void __iomem *cpu_base;
815 void __iomem *dist_base;
822 dist_base = of_iomap(node, 0);
823 WARN(!dist_base, "unable to map gic dist registers\n");
825 cpu_base = of_iomap(node, 1);
826 WARN(!cpu_base, "unable to map gic cpu registers\n");
828 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
831 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
834 irq = irq_of_parse_and_map(node, 0);
835 gic_cascade_irq(gic_cnt, irq);
840 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
841 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
842 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
843 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);