2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <asm/hw_irq.h>
20 #include <asm/i8259.h>
22 #include <asm/irq_remapping.h>
24 struct irq_domain *x86_vector_domain;
25 static DEFINE_RAW_SPINLOCK(vector_lock);
26 static struct irq_chip lapic_controller;
27 #ifdef CONFIG_X86_IO_APIC
28 static struct irq_cfg *legacy_irq_cfgs[NR_IRQS_LEGACY];
31 void lock_vector_lock(void)
33 /* Used to the online set of cpus does not change
34 * during assign_irq_vector.
36 raw_spin_lock(&vector_lock);
39 void unlock_vector_lock(void)
41 raw_spin_unlock(&vector_lock);
44 struct irq_cfg *irq_cfg(unsigned int irq)
46 return irqd_cfg(irq_get_irq_data(irq));
49 struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
54 while (irq_data->parent_data)
55 irq_data = irq_data->parent_data;
57 return irq_data->chip_data;
60 static struct irq_cfg *alloc_irq_cfg(int node)
64 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
67 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
69 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
73 free_cpumask_var(cfg->domain);
79 struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
81 int res = irq_alloc_desc_at(at, node);
92 cfg = alloc_irq_cfg(node);
94 irq_set_chip_data(at, cfg);
100 static void free_irq_cfg(struct irq_cfg *cfg)
103 free_cpumask_var(cfg->domain);
104 free_cpumask_var(cfg->old_domain);
110 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
113 * NOTE! The local APIC isn't very good at handling
114 * multiple interrupts at the same interrupt level.
115 * As the interrupt level is determined by taking the
116 * vector number and shifting that right by 4, we
117 * want to spread these out a bit so that they don't
118 * all fall in the same interrupt level.
120 * Also, we've got to be careful not to trash gate
121 * 0x80, because int 0x80 is hm, kind of importantish. ;)
123 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
124 static int current_offset = VECTOR_OFFSET_START % 16;
126 cpumask_var_t tmp_mask;
128 if (cfg->move_in_progress)
131 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
134 /* Only try and allocate irqs on cpus that are present */
136 cpumask_clear(cfg->old_domain);
137 cpu = cpumask_first_and(mask, cpu_online_mask);
138 while (cpu < nr_cpu_ids) {
139 int new_cpu, vector, offset;
141 apic->vector_allocation_domain(cpu, tmp_mask, mask);
143 if (cpumask_subset(tmp_mask, cfg->domain)) {
145 if (cpumask_equal(tmp_mask, cfg->domain))
148 * New cpumask using the vector is a proper subset of
149 * the current in use mask. So cleanup the vector
150 * allocation for the members that are not used anymore.
152 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
153 cfg->move_in_progress =
154 cpumask_intersects(cfg->old_domain, cpu_online_mask);
155 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
159 vector = current_vector;
160 offset = current_offset;
163 if (vector >= first_system_vector) {
164 offset = (offset + 1) % 16;
165 vector = FIRST_EXTERNAL_VECTOR + offset;
168 if (unlikely(current_vector == vector)) {
169 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
170 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
171 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
175 if (test_bit(vector, used_vectors))
178 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
179 if (per_cpu(vector_irq, new_cpu)[vector] >
184 current_vector = vector;
185 current_offset = offset;
187 cpumask_copy(cfg->old_domain, cfg->domain);
188 cfg->move_in_progress =
189 cpumask_intersects(cfg->old_domain, cpu_online_mask);
191 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
192 per_cpu(vector_irq, new_cpu)[vector] = irq;
193 cfg->vector = vector;
194 cpumask_copy(cfg->domain, tmp_mask);
198 free_cpumask_var(tmp_mask);
201 /* cache destination APIC IDs into cfg->dest_apicid */
202 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain,
209 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
214 raw_spin_lock_irqsave(&vector_lock, flags);
215 err = __assign_irq_vector(irq, cfg, mask);
216 raw_spin_unlock_irqrestore(&vector_lock, flags);
220 void clear_irq_vector(int irq, struct irq_cfg *cfg)
225 raw_spin_lock_irqsave(&vector_lock, flags);
226 BUG_ON(!cfg->vector);
228 vector = cfg->vector;
229 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
230 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
233 cpumask_clear(cfg->domain);
235 if (likely(!cfg->move_in_progress)) {
236 raw_spin_unlock_irqrestore(&vector_lock, flags);
240 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
241 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
243 if (per_cpu(vector_irq, cpu)[vector] != irq)
245 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
249 cfg->move_in_progress = 0;
250 raw_spin_unlock_irqrestore(&vector_lock, flags);
253 void init_irq_alloc_info(struct irq_alloc_info *info,
254 const struct cpumask *mask)
256 memset(info, 0, sizeof(*info));
260 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
265 memset(dst, 0, sizeof(*dst));
268 static inline const struct cpumask *
269 irq_alloc_info_get_mask(struct irq_alloc_info *info)
271 return (!info || !info->mask) ? apic->target_cpus() : info->mask;
274 static void x86_vector_free_irqs(struct irq_domain *domain,
275 unsigned int virq, unsigned int nr_irqs)
277 struct irq_data *irq_data;
280 for (i = 0; i < nr_irqs; i++) {
281 irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
282 if (irq_data && irq_data->chip_data) {
283 clear_irq_vector(virq + i, irq_data->chip_data);
284 free_irq_cfg(irq_data->chip_data);
285 #ifdef CONFIG_X86_IO_APIC
286 if (virq + i < nr_legacy_irqs())
287 legacy_irq_cfgs[virq + i] = NULL;
289 irq_domain_reset_irq_data(irq_data);
294 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
295 unsigned int nr_irqs, void *arg)
297 struct irq_alloc_info *info = arg;
298 const struct cpumask *mask;
299 struct irq_data *irq_data;
306 /* Currently vector allocator can't guarantee contiguous allocations */
307 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
310 mask = irq_alloc_info_get_mask(info);
311 for (i = 0; i < nr_irqs; i++) {
312 irq_data = irq_domain_get_irq_data(domain, virq + i);
314 #ifdef CONFIG_X86_IO_APIC
315 if (virq + i < nr_legacy_irqs() && legacy_irq_cfgs[virq + i])
316 cfg = legacy_irq_cfgs[virq + i];
319 cfg = alloc_irq_cfg(irq_data->node);
325 irq_data->chip = &lapic_controller;
326 irq_data->chip_data = cfg;
327 irq_data->hwirq = virq + i;
328 err = assign_irq_vector(virq, cfg, mask);
336 x86_vector_free_irqs(domain, virq, i + 1);
340 static struct irq_domain_ops x86_vector_domain_ops = {
341 .alloc = x86_vector_alloc_irqs,
342 .free = x86_vector_free_irqs,
345 int __init arch_probe_nr_irqs(void)
349 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
350 nr_irqs = NR_VECTORS * nr_cpu_ids;
352 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
353 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
355 * for MSI and HT dyn irq
357 if (gsi_top <= NR_IRQS_LEGACY)
358 nr += 8 * nr_cpu_ids;
365 return nr_legacy_irqs();
368 #ifdef CONFIG_X86_IO_APIC
369 static void init_legacy_irqs(void)
371 int i, node = cpu_to_node(0);
375 * For legacy IRQ's, start with assigning irq0 to irq15 to
376 * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
378 for (i = 0; i < nr_legacy_irqs(); i++) {
379 cfg = legacy_irq_cfgs[i] = alloc_irq_cfg(node);
382 * For legacy IRQ's, start with assigning irq0 to irq15 to
383 * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
385 cfg->vector = IRQ0_VECTOR + i;
386 cpumask_setall(cfg->domain);
387 irq_set_chip_data(i, cfg);
391 static void init_legacy_irqs(void) { }
394 int __init arch_early_irq_init(void)
398 x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops,
400 BUG_ON(x86_vector_domain == NULL);
401 irq_set_default_host(x86_vector_domain);
403 arch_init_msi_domain(x86_vector_domain);
404 arch_init_htirq_domain(x86_vector_domain);
406 return arch_early_ioapic_init();
409 static void __setup_vector_irq(int cpu)
411 /* Initialize vector_irq on a new cpu */
416 * vector_lock will make sure that we don't run into irq vector
417 * assignments that might be happening on another cpu in parallel,
418 * while we setup our initial vector to irq mappings.
420 raw_spin_lock(&vector_lock);
421 /* Mark the inuse vectors */
422 for_each_active_irq(irq) {
427 if (!cpumask_test_cpu(cpu, cfg->domain))
429 vector = cfg->vector;
430 per_cpu(vector_irq, cpu)[vector] = irq;
432 /* Mark the free vectors */
433 for (vector = 0; vector < NR_VECTORS; ++vector) {
434 irq = per_cpu(vector_irq, cpu)[vector];
435 if (irq <= VECTOR_UNDEFINED)
439 if (!cpumask_test_cpu(cpu, cfg->domain))
440 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
442 raw_spin_unlock(&vector_lock);
446 * Setup the vector to irq mappings.
448 void setup_vector_irq(int cpu)
453 * On most of the platforms, legacy PIC delivers the interrupts on the
454 * boot cpu. But there are certain platforms where PIC interrupts are
455 * delivered to multiple cpu's. If the legacy IRQ is handled by the
456 * legacy PIC, for the new cpu that is coming online, setup the static
457 * legacy vector to irq mapping:
459 for (irq = 0; irq < nr_legacy_irqs(); irq++)
460 per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
462 __setup_vector_irq(cpu);
465 int apic_retrigger_irq(struct irq_data *data)
467 struct irq_cfg *cfg = irqd_cfg(data);
471 raw_spin_lock_irqsave(&vector_lock, flags);
472 cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
473 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
474 raw_spin_unlock_irqrestore(&vector_lock, flags);
479 void apic_ack_edge(struct irq_data *data)
481 irq_complete_move(irqd_cfg(data));
487 * Either sets data->affinity to a valid value, and returns
488 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
489 * leaves data->affinity untouched.
491 int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
492 unsigned int *dest_id)
494 struct irq_cfg *cfg = irqd_cfg(data);
495 unsigned int irq = data->irq;
498 if (!config_enabled(CONFIG_SMP))
501 if (!cpumask_intersects(mask, cpu_online_mask))
504 err = assign_irq_vector(irq, cfg, mask);
508 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
510 if (assign_irq_vector(irq, cfg, data->affinity))
511 pr_err("Failed to recover vector for irq %d\n", irq);
515 cpumask_copy(data->affinity, mask);
520 static int vector_set_affinity(struct irq_data *irq_data,
521 const struct cpumask *dest, bool force)
523 struct irq_cfg *cfg = irq_data->chip_data;
524 int err, irq = irq_data->irq;
526 if (!config_enabled(CONFIG_SMP))
529 if (!cpumask_intersects(dest, cpu_online_mask))
532 err = assign_irq_vector(irq, cfg, dest);
534 struct irq_data *top = irq_get_irq_data(irq);
536 if (assign_irq_vector(irq, cfg, top->affinity))
537 pr_err("Failed to recover vector for irq %d\n", irq);
541 return IRQ_SET_MASK_OK;
544 static struct irq_chip lapic_controller = {
545 .irq_ack = apic_ack_edge,
546 .irq_set_affinity = vector_set_affinity,
547 .irq_retrigger = apic_retrigger_irq,
551 void send_cleanup_vector(struct irq_cfg *cfg)
553 cpumask_var_t cleanup_mask;
555 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
558 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
559 apic->send_IPI_mask(cpumask_of(i),
560 IRQ_MOVE_CLEANUP_VECTOR);
562 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
563 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
564 free_cpumask_var(cleanup_mask);
566 cfg->move_in_progress = 0;
569 asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
577 me = smp_processor_id();
578 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
581 struct irq_desc *desc;
584 irq = __this_cpu_read(vector_irq[vector]);
586 if (irq <= VECTOR_UNDEFINED)
589 desc = irq_to_desc(irq);
597 raw_spin_lock(&desc->lock);
600 * Check if the irq migration is in progress. If so, we
601 * haven't received the cleanup request yet for this irq.
603 if (cfg->move_in_progress)
606 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
609 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
611 * Check if the vector that needs to be cleanedup is
612 * registered at the cpu's IRR. If so, then this is not
613 * the best time to clean it up. Lets clean it up in the
614 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
617 if (irr & (1 << (vector % 32))) {
618 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
621 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
623 raw_spin_unlock(&desc->lock);
629 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
633 if (likely(!cfg->move_in_progress))
636 me = smp_processor_id();
638 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
639 send_cleanup_vector(cfg);
642 void irq_complete_move(struct irq_cfg *cfg)
644 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
647 void irq_force_complete_move(int irq)
649 struct irq_cfg *cfg = irq_cfg(irq);
654 __irq_complete_move(cfg, cfg->vector);
658 static void __init print_APIC_field(int base)
664 for (i = 0; i < 8; i++)
665 pr_cont("%08x", apic_read(base + i*0x10));
670 static void __init print_local_APIC(void *dummy)
672 unsigned int i, v, ver, maxlvt;
675 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
676 smp_processor_id(), hard_smp_processor_id());
677 v = apic_read(APIC_ID);
678 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
679 v = apic_read(APIC_LVR);
680 pr_info("... APIC VERSION: %08x\n", v);
681 ver = GET_APIC_VERSION(v);
682 maxlvt = lapic_get_maxlvt();
684 v = apic_read(APIC_TASKPRI);
685 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
688 if (APIC_INTEGRATED(ver)) {
689 if (!APIC_XAPIC(ver)) {
690 v = apic_read(APIC_ARBPRI);
691 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
692 v, v & APIC_ARBPRI_MASK);
694 v = apic_read(APIC_PROCPRI);
695 pr_debug("... APIC PROCPRI: %08x\n", v);
699 * Remote read supported only in the 82489DX and local APIC for
700 * Pentium processors.
702 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
703 v = apic_read(APIC_RRR);
704 pr_debug("... APIC RRR: %08x\n", v);
707 v = apic_read(APIC_LDR);
708 pr_debug("... APIC LDR: %08x\n", v);
709 if (!x2apic_enabled()) {
710 v = apic_read(APIC_DFR);
711 pr_debug("... APIC DFR: %08x\n", v);
713 v = apic_read(APIC_SPIV);
714 pr_debug("... APIC SPIV: %08x\n", v);
716 pr_debug("... APIC ISR field:\n");
717 print_APIC_field(APIC_ISR);
718 pr_debug("... APIC TMR field:\n");
719 print_APIC_field(APIC_TMR);
720 pr_debug("... APIC IRR field:\n");
721 print_APIC_field(APIC_IRR);
724 if (APIC_INTEGRATED(ver)) {
725 /* Due to the Pentium erratum 3AP. */
727 apic_write(APIC_ESR, 0);
729 v = apic_read(APIC_ESR);
730 pr_debug("... APIC ESR: %08x\n", v);
733 icr = apic_icr_read();
734 pr_debug("... APIC ICR: %08x\n", (u32)icr);
735 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
737 v = apic_read(APIC_LVTT);
738 pr_debug("... APIC LVTT: %08x\n", v);
742 v = apic_read(APIC_LVTPC);
743 pr_debug("... APIC LVTPC: %08x\n", v);
745 v = apic_read(APIC_LVT0);
746 pr_debug("... APIC LVT0: %08x\n", v);
747 v = apic_read(APIC_LVT1);
748 pr_debug("... APIC LVT1: %08x\n", v);
752 v = apic_read(APIC_LVTERR);
753 pr_debug("... APIC LVTERR: %08x\n", v);
756 v = apic_read(APIC_TMICT);
757 pr_debug("... APIC TMICT: %08x\n", v);
758 v = apic_read(APIC_TMCCT);
759 pr_debug("... APIC TMCCT: %08x\n", v);
760 v = apic_read(APIC_TDCR);
761 pr_debug("... APIC TDCR: %08x\n", v);
763 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
764 v = apic_read(APIC_EFEAT);
765 maxlvt = (v >> 16) & 0xff;
766 pr_debug("... APIC EFEAT: %08x\n", v);
767 v = apic_read(APIC_ECTRL);
768 pr_debug("... APIC ECTRL: %08x\n", v);
769 for (i = 0; i < maxlvt; i++) {
770 v = apic_read(APIC_EILVTn(i));
771 pr_debug("... APIC EILVT%d: %08x\n", i, v);
777 static void __init print_local_APICs(int maxcpu)
785 for_each_online_cpu(cpu) {
788 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
793 static void __init print_PIC(void)
798 if (!nr_legacy_irqs())
801 pr_debug("\nprinting PIC contents\n");
803 raw_spin_lock_irqsave(&i8259A_lock, flags);
805 v = inb(0xa1) << 8 | inb(0x21);
806 pr_debug("... PIC IMR: %04x\n", v);
808 v = inb(0xa0) << 8 | inb(0x20);
809 pr_debug("... PIC IRR: %04x\n", v);
813 v = inb(0xa0) << 8 | inb(0x20);
817 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
819 pr_debug("... PIC ISR: %04x\n", v);
821 v = inb(0x4d1) << 8 | inb(0x4d0);
822 pr_debug("... PIC ELCR: %04x\n", v);
825 static int show_lapic __initdata = 1;
826 static __init int setup_show_lapic(char *arg)
830 if (strcmp(arg, "all") == 0) {
831 show_lapic = CONFIG_NR_CPUS;
833 get_option(&arg, &num);
840 __setup("show_lapic=", setup_show_lapic);
842 static int __init print_ICs(void)
844 if (apic_verbosity == APIC_QUIET)
849 /* don't print out if apic is not there */
850 if (!cpu_has_apic && !apic_from_smp_config())
853 print_local_APICs(show_lapic);
859 late_initcall(print_ICs);