OSDN Git Service

Merge branches 'cpus4096', 'x86/cleanups' and 'x86/urgent' into x86/percpu
authorIngo Molnar <mingo@elte.hu>
Thu, 15 Jan 2009 12:18:57 +0000 (13:18 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 15 Jan 2009 12:18:57 +0000 (13:18 +0100)
92 files changed:
Documentation/cputopology.txt
arch/alpha/kernel/irq.c
arch/arm/kernel/irq.c
arch/arm/oprofile/op_model_mpcore.c
arch/blackfin/kernel/irqchip.c
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/msi_ia64.c
arch/ia64/sn/kernel/msi_sn.c
arch/mips/include/asm/irq.h
arch/mips/kernel/irq-gic.c
arch/mips/kernel/smtc.c
arch/mips/mti-malta/malta-smtc.c
arch/mips/sgi-ip22/ip22-int.c
arch/mips/sgi-ip22/ip22-time.c
arch/mips/sibyte/bcm1480/smp.c
arch/mips/sibyte/sb1250/smp.c
arch/mn10300/kernel/mn10300-watchdog.c
arch/parisc/kernel/irq.c
arch/powerpc/kernel/irq.c
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/sysdev/mpic.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/time_64.c
arch/x86/include/asm/apicnum.h [new file with mode: 0644]
arch/x86/include/asm/bitops.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpumask.h [new file with mode: 0644]
arch/x86/include/asm/hardirq_32.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/mpspec_def.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/uv/uv_bau.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/crash.c
arch/x86/kernel/io_apic.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/microcode_intel.c
arch/x86/kernel/module_32.c
arch/x86/kernel/module_64.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/msr.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tlb_32.c
arch/x86/kernel/tlb_64.c
arch/x86/kernel/tlb_uv.c
arch/x86/mach-voyager/setup.c
arch/x86/mm/init_32.c
arch/x86/mm/pat.c
arch/x86/xen/enlighten.c
drivers/base/cpu.c
drivers/base/topology.c
drivers/firmware/dcdbas.c
drivers/misc/sgi-xp/xpc_main.c
drivers/net/sfc/efx.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/buffer_sync.h
drivers/oprofile/oprof.c
drivers/pci/intr_remapping.c
drivers/xen/events.c
drivers/xen/manage.c
include/asm-generic/bitops/__ffs.h
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
include/asm-generic/bitops/fls64.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/irqnr.h
include/linux/topology.h
kernel/irq/chip.c
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/migration.c
kernel/irq/numa_migrate.c
kernel/irq/proc.c
kernel/sched_rt.c
kernel/softirq.c
lib/smp_processor_id.c

index 45932ec..b41f3e5 100644 (file)
@@ -18,11 +18,11 @@ For an architecture to support this feature, it must define some of
 these macros in include/asm-XXX/topology.h:
 #define topology_physical_package_id(cpu)
 #define topology_core_id(cpu)
-#define topology_thread_siblings(cpu)
-#define topology_core_siblings(cpu)
+#define topology_thread_cpumask(cpu)
+#define topology_core_cpumask(cpu)
 
 The type of **_id is int.
-The type of siblings is cpumask_t.
+The type of siblings is (const) struct cpumask *.
 
 To be consistent on all architectures, include/linux/topology.h
 provides default definitions for any of the above macros that are
index 703731a..7bc7489 100644 (file)
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
                cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
        last_cpu = cpu;
 
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
        irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
        return 0;
 }
index 7141cee..4bb723e 100644 (file)
@@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
        .lock = SPIN_LOCK_UNLOCKED
 };
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating bad_irq_desc.affinity or .pending_mask */
+#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
 /*
  * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
  * come via this function.  Instead, they should provide their
@@ -161,7 +166,7 @@ void __init init_IRQ(void)
                irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
 
 #ifdef CONFIG_SMP
-       bad_irq_desc.affinity = CPU_MASK_ALL;
+       cpumask_setall(bad_irq_desc.affinity);
        bad_irq_desc.cpu = smp_processor_id();
 #endif
        init_arch_irq();
@@ -191,15 +196,16 @@ void migrate_irqs(void)
                struct irq_desc *desc = irq_desc + i;
 
                if (desc->cpu == cpu) {
-                       unsigned int newcpu = any_online_cpu(desc->affinity);
-
-                       if (newcpu == NR_CPUS) {
+                       unsigned int newcpu = cpumask_any_and(desc->affinity,
+                                                             cpu_online_mask);
+                       if (newcpu >= nr_cpu_ids) {
                                if (printk_ratelimit())
                                        printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
                                               i, cpu);
 
-                               cpus_setall(desc->affinity);
-                               newcpu = any_online_cpu(desc->affinity);
+                               cpumask_setall(desc->affinity);
+                               newcpu = cpumask_any_and(desc->affinity,
+                                                        cpu_online_mask);
                        }
 
                        route_irq(desc, i, newcpu);
index 6d6bd58..853d42b 100644 (file)
@@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
        const struct cpumask *mask = cpumask_of(cpu);
 
        spin_lock_irq(&desc->lock);
-       desc->affinity = *mask;
+       cpumask_copy(desc->affinity, mask);
        desc->chip->set_affinity(irq, mask);
        spin_unlock_irq(&desc->lock);
 }
index ab8209c..5780d6d 100644 (file)
@@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = {
 #endif
 };
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating a variable-sized bad_irq_desc.affinity */
+#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
 int show_interrupts(struct seq_file *p, void *v)
 {
        int i = *(loff_t *) v, j;
index 5cfd3d9..006ad36 100644 (file)
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
        if (iosapic_intr_info[irq].count == 0) {
 #ifdef CONFIG_SMP
                /* Clear affinity */
-               cpus_setall(idesc->affinity);
+               cpumask_setall(idesc->affinity);
 #endif
                /* Clear the interrupt information */
                iosapic_intr_info[irq].dest = 0;
index a58f64c..226233a 100644 (file)
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 {
        if (irq < NR_IRQS) {
-               cpumask_copy(&irq_desc[irq].affinity,
+               cpumask_copy(irq_desc[irq].affinity,
                             cpumask_of(cpu_logical_id(hwid)));
                irq_redir[irq] = (char) (redir & 0xff);
        }
@@ -148,7 +148,7 @@ static void migrate_irqs(void)
                if (desc->status == IRQ_PER_CPU)
                        continue;
 
-               if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+               if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
                    >= nr_cpu_ids) {
                        /*
                         * Save it for phase 2 processing
index 28d3d48..927ad02 100644 (file)
@@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
        saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
        ia64_srlz_d();
        while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               struct irq_desc *desc = irq_to_desc(vector);
+
                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
                        smp_local_flush_tlb();
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                } else if (unlikely(IS_RESCHEDULE(vector)))
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                else {
                        int irq = local_vector_to_irq(vector);
 
@@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
          * Perform normal interrupt style processing
          */
        while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               struct irq_desc *desc = irq_to_desc(vector);
+
                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
                        smp_local_flush_tlb();
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                } else if (unlikely(IS_RESCHEDULE(vector)))
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                else {
                        struct pt_regs *old_regs = set_irq_regs(NULL);
                        int irq = local_vector_to_irq(vector);
index 8903393..dcb6b7c 100644 (file)
@@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
        msg.data = data;
 
        write_msi_msg(irq, &msg);
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 }
 #endif /* CONFIG_SMP */
 
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
        msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
 
        dmar_msi_write(irq, &msg);
-       irq_desc[irq].affinity = *mask;
+       cpumask_copy(irq_desc[irq].affinity, mask);
 }
 #endif /* CONFIG_SMP */
 
index ca553b0..81e4289 100644 (file)
@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
        msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
 
        write_msi_msg(irq, &msg);
-       irq_desc[irq].affinity = *cpu_mask;
+       cpumask_copy(irq_desc[irq].affinity, cpu_mask);
 }
 #endif /* CONFIG_SMP */
 
index abc62aa..3214ade 100644 (file)
@@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
  */
 #define IRQ_AFFINITY_HOOK(irq)                                         \
 do {                                                                   \
-    if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) {      \
+    if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
        smtc_forward_irq(irq);                                          \
        irq_exit();                                                     \
        return;                                                         \
index 494a49a..87deb8f 100644 (file)
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
                set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
 
        }
-       irq_desc[irq].affinity = *cpumask;
+       cpumask_copy(irq_desc[irq].affinity, cpumask);
        spin_unlock_irqrestore(&gic_lock, flags);
 
 }
index b6cca01..5f5af7d 100644 (file)
@@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
         * and efficiency, we just pick the easiest one to find.
         */
 
-       target = first_cpu(irq_desc[irq].affinity);
+       target = cpumask_first(irq_desc[irq].affinity);
 
        /*
         * We depend on the platform code to have correctly processed
@@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
        struct clock_event_device *cd;
        void *arg_copy = pipi->arg;
        int type_copy = pipi->type;
+       int irq = MIPS_CPU_IRQ_BASE + 1;
+
        smtc_ipi_nq(&freeIPIq, pipi);
        switch (type_copy) {
        case SMTC_CLOCK_TICK:
                irq_enter();
-               kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
+               kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
                cd = &per_cpu(mips_clockevent_device, cpu);
                cd->event_handler(cd);
                irq_exit();
index aabd727..5ba3188 100644 (file)
@@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
 
 void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 {
-       cpumask_t tmask = *affinity;
+       cpumask_t tmask;
        int cpu = 0;
        void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
 
@@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
         * be made to forward to an offline "CPU".
         */
 
+       cpumask_copy(&tmask, affinity);
        for_each_cpu(cpu, affinity) {
                if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
                        cpu_clear(cpu, tmask);
        }
-       irq_desc[irq].affinity = tmask;
+       cpumask_copy(irq_desc[irq].affinity, &tmask);
 
        if (cpus_empty(tmask))
                /*
index f8b18af..0ecd5fe 100644 (file)
@@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
        int irq = SGI_BUSERR_IRQ;
 
        irq_enter();
-       kstat_this_cpu.irqs[irq]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        ip22_be_interrupt(irq);
        irq_exit();
 }
index 3dcb27e..c8f7d23 100644 (file)
@@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
        char c;
 
        irq_enter();
-       kstat_this_cpu.irqs[irq]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
        ArcRead(0, &c, 1, &cnt);
        ArcEnterInteractiveMode();
index dddfda8..3146916 100644 (file)
@@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
 void bcm1480_mailbox_interrupt(void)
 {
        int cpu = smp_processor_id();
+       int irq = K_BCM1480_INT_MBOX_0_0;
        unsigned int action;
 
-       kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        /* Load the mailbox register to figure out what we're supposed to do */
        action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
 
index 5950a28..cad1400 100644 (file)
@@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
 void sb1250_mailbox_interrupt(void)
 {
        int cpu = smp_processor_id();
+       int irq = K_INT_MBOX_0;
        unsigned int action;
 
-       kstat_this_cpu.irqs[K_INT_MBOX_0]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        /* Load the mailbox register to figure out what we're supposed to do */
        action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
 
index 10811e9..2e370d8 100644 (file)
@@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
         * the stack NMI-atomically, it's safe to use smp_processor_id().
         */
        int sum, cpu = smp_processor_id();
+       int irq = NMIIRQ;
        u8 wdt, tmp;
 
        wdt = WDCTR & ~WDCTR_WDCNE;
@@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
        NMICR = NMICR_WDIF;
 
        nmi_count(cpu)++;
-       kstat_this_cpu.irqs[NMIIRQ]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        sum = irq_stat[cpu].__irq_count;
 
        if (last_irq_sums[cpu] == sum) {
index ac2c822..4948280 100644 (file)
@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
        if (CHECK_IRQ_PER_CPU(irq)) {
                /* Bad linux design decision.  The mask has already
                 * been set; we must reset it */
-               irq_desc[irq].affinity = CPU_MASK_ALL;
+               cpumask_setall(irq_desc[irq].affinity);
                return -EINVAL;
        }
 
@@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
        if (cpu_check_affinity(irq, dest))
                return;
 
-       irq_desc[irq].affinity = *dest;
+       cpumask_copy(irq_desc[irq].affinity, dest);
 }
 #endif
 
@@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
 unsigned long txn_affinity_addr(unsigned int irq, int cpu)
 {
 #ifdef CONFIG_SMP
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 #endif
 
        return per_cpu(cpu_data, cpu).txn_addr;
@@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
        irq = eirr_to_irq(eirr_val);
 
 #ifdef CONFIG_SMP
-       dest = irq_desc[irq].affinity;
+       cpumask_copy(&dest, irq_desc[irq].affinity);
        if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
            !cpu_isset(smp_processor_id(), dest)) {
                int cpu = first_cpu(dest);
index 23b8b5e..ad1e5ac 100644 (file)
@@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
                if (irq_desc[irq].status & IRQ_PER_CPU)
                        continue;
 
-               cpus_and(mask, irq_desc[irq].affinity, map);
+               cpumask_and(&mask, irq_desc[irq].affinity, &map);
                if (any_online_cpu(mask) == NR_CPUS) {
                        printk("Breaking affinity for irq %i\n", irq);
                        mask = map;
index 84e058f..80b5134 100644 (file)
@@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
 {
        int server;
        /* For the moment only implement delivery to all cpus or one cpu */
-       cpumask_t cpumask = irq_desc[virq].affinity;
+       cpumask_t cpumask;
        cpumask_t tmp = CPU_MASK_NONE;
 
+       cpumask_copy(&cpumask, irq_desc[virq].affinity);
        if (!distribute_irqs)
                return default_server;
 
@@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
                       virq, cpu);
 
                /* Reset affinity to all cpus */
-               irq_desc[virq].affinity = CPU_MASK_ALL;
+               cpumask_setall(irq_desc[virq].affinity);
                desc->chip->set_affinity(virq, cpu_all_mask);
 unlock:
                spin_unlock_irqrestore(&desc->lock, flags);
index 3e0d89d..0afd21f 100644 (file)
@@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
 #ifdef CONFIG_SMP
 static int irq_choose_cpu(unsigned int virt_irq)
 {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
 
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
index cab8e02..4ac5c65 100644 (file)
@@ -247,9 +247,10 @@ struct irq_handler_data {
 #ifdef CONFIG_SMP
 static int irq_choose_cpu(unsigned int virt_irq)
 {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
 
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
@@ -854,7 +855,7 @@ void fixup_irqs(void)
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
                        if (irq_desc[irq].chip->set_affinity)
                                irq_desc[irq].chip->set_affinity(irq,
-                                       &irq_desc[irq].affinity);
+                                       irq_desc[irq].affinity);
                }
                spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
        }
index 2db3c22..db310aa 100644 (file)
@@ -729,7 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
 
        irq_enter();
 
-       kstat_this_cpu.irqs[0]++;
+       kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
 
        if (unlikely(!evt->event_handler)) {
                printk(KERN_WARNING
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h
new file mode 100644 (file)
index 0000000..82f613c
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_APICNUM_H
+#define _ASM_X86_APICNUM_H
+
+/* define MAX_IO_APICS */
+#ifdef CONFIG_X86_32
+# define MAX_IO_APICS 64
+#else
+# define MAX_IO_APICS 128
+# define MAX_LOCAL_APIC 32768
+#endif
+
+#endif /* _ASM_X86_APICNUM_H */
index e02a359..02b47a6 100644 (file)
@@ -3,6 +3,9 @@
 
 /*
  * Copyright 1992, Linus Torvalds.
+ *
+ * Note: inlines with more than a single statement should be marked
+ * __always_inline to avoid problems with older gcc's inlining heuristics.
  */
 
 #ifndef _LINUX_BITOPS_H
@@ -53,7 +56,8 @@
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  */
-static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
+static __always_inline void
+set_bit(unsigned int nr, volatile unsigned long *addr)
 {
        if (IS_IMMEDIATE(nr)) {
                asm volatile(LOCK_PREFIX "orb %1,%0"
@@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  * in order to ensure changes are visible on other processors.
  */
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+clear_bit(int nr, volatile unsigned long *addr)
 {
        if (IS_IMMEDIATE(nr)) {
                asm volatile(LOCK_PREFIX "andb %1,%0"
@@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
  *
  * This is the same as test_and_set_bit on x86.
  */
-static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
+static __always_inline int
+test_and_set_bit_lock(int nr, volatile unsigned long *addr)
 {
        return test_and_set_bit(nr, addr);
 }
@@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
        return oldbit;
 }
 
-static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
 {
        return ((1UL << (nr % BITS_PER_LONG)) &
                (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
index bae482d..f03b23e 100644 (file)
@@ -7,6 +7,20 @@
 #include <linux/nodemask.h>
 #include <linux/percpu.h>
 
+#ifdef CONFIG_SMP
+
+extern void prefill_possible_map(void);
+
+#else /* CONFIG_SMP */
+
+static inline void prefill_possible_map(void) {}
+
+#define cpu_physical_id(cpu)                   boot_cpu_physical_apicid
+#define safe_smp_processor_id()                        0
+#define stack_smp_processor_id()               0
+
+#endif /* CONFIG_SMP */
+
 struct x86_cpu {
        struct cpu cpu;
 };
@@ -17,4 +31,11 @@ extern void arch_unregister_cpu(int);
 #endif
 
 DECLARE_PER_CPU(int, cpu_state);
+
+#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
+extern unsigned char boot_cpu_id;
+#else
+#define boot_cpu_id                            0
+#endif
+
 #endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
new file mode 100644 (file)
index 0000000..26c6dad
--- /dev/null
@@ -0,0 +1,28 @@
+#ifndef _ASM_X86_CPUMASK_H
+#define _ASM_X86_CPUMASK_H
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_X86_64
+
+extern cpumask_var_t cpu_callin_mask;
+extern cpumask_var_t cpu_callout_mask;
+extern cpumask_var_t cpu_initialized_mask;
+extern cpumask_var_t cpu_sibling_setup_mask;
+
+#else /* CONFIG_X86_32 */
+
+extern cpumask_t cpu_callin_map;
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_initialized;
+extern cpumask_t cpu_sibling_setup_map;
+
+#define cpu_callin_mask                ((struct cpumask *)&cpu_callin_map)
+#define cpu_callout_mask       ((struct cpumask *)&cpu_callout_map)
+#define cpu_initialized_mask   ((struct cpumask *)&cpu_initialized)
+#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
+
+#endif /* CONFIG_X86_32 */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_CPUMASK_H */
index cf7954d..d4b5d73 100644 (file)
@@ -19,6 +19,9 @@ typedef struct {
 
 DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
 
+/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
+#define MAX_HARDIRQS_PER_CPU NR_VECTORS
+
 #define __ARCH_IRQ_STAT
 #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
 
index 7a1f44a..08ec793 100644 (file)
@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
 extern int nr_ioapics;
 extern int nr_ioapic_registers[MAX_IO_APICS];
 
-/*
- * MP-BIOS irq configuration table structures:
- */
-
 #define MP_MAX_IOAPIC_PIN 127
 
-struct mp_config_ioapic {
-       unsigned long mp_apicaddr;
-       unsigned int mp_apicid;
-       unsigned char mp_type;
-       unsigned char mp_apicver;
-       unsigned char mp_flags;
-};
-
-struct mp_config_intsrc {
-       unsigned int mp_dstapic;
-       unsigned char mp_type;
-       unsigned char mp_irqtype;
-       unsigned short mp_irqflag;
-       unsigned char mp_srcbus;
-       unsigned char mp_srcbusirq;
-       unsigned char mp_dstirq;
-};
-
 /* I/O APIC entries */
-extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
+extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
 
 /* # of MP IRQ source entries */
 extern int mp_irq_entries;
 
 /* MP IRQ source entries */
-extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
 
 /* non-0 if default (table-less) MP configuration */
 extern int mpc_default_type;
index f7ff650..a16a2ab 100644 (file)
 
 #if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
 
+#include <asm/apicnum.h>       /* need MAX_IO_APICS */
+
 #ifndef CONFIG_SPARSE_IRQ
 # if NR_CPUS < MAX_IO_APICS
 #  define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
 #  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
 # endif
 #else
-# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
-#  define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
-# else
-#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
-# endif
+
+# define NR_IRQS                                       \
+       ((8 * NR_CPUS) > (32 * MAX_IO_APICS) ?          \
+               (NR_VECTORS + (8 * NR_CPUS)) :          \
+               (NR_VECTORS + (32 * MAX_IO_APICS)))     \
+
 #endif
 
 #elif defined(CONFIG_X86_VOYAGER)
index 59568bc..4a7f96d 100644 (file)
 # endif
 #endif
 
-struct intel_mp_floating {
-       char mpf_signature[4];          /* "_MP_"                       */
-       unsigned int mpf_physptr;       /* Configuration table address  */
-       unsigned char mpf_length;       /* Our length (paragraphs)      */
-       unsigned char mpf_specification;/* Specification version        */
-       unsigned char mpf_checksum;     /* Checksum (makes sum 0)       */
-       unsigned char mpf_feature1;     /* Standard or configuration ?  */
-       unsigned char mpf_feature2;     /* Bit7 set for IMCR|PIC        */
-       unsigned char mpf_feature3;     /* Unused (0)                   */
-       unsigned char mpf_feature4;     /* Unused (0)                   */
-       unsigned char mpf_feature5;     /* Unused (0)                   */
+/* Intel MP Floating Pointer Structure */
+struct mpf_intel {
+       char signature[4];              /* "_MP_"                       */
+       unsigned int physptr;           /* Configuration table address  */
+       unsigned char length;           /* Our length (paragraphs)      */
+       unsigned char specification;    /* Specification version        */
+       unsigned char checksum;         /* Checksum (makes sum 0)       */
+       unsigned char feature1;         /* Standard or configuration ?  */
+       unsigned char feature2;         /* Bit7 set for IMCR|PIC        */
+       unsigned char feature3;         /* Unused (0)                   */
+       unsigned char feature4;         /* Unused (0)                   */
+       unsigned char feature5;         /* Unused (0)                   */
 };
 
 #define MPC_SIGNATURE "PCMP"
index ba3e2ff..c26c6bf 100644 (file)
@@ -244,7 +244,8 @@ struct pv_mmu_ops {
        void (*flush_tlb_user)(void);
        void (*flush_tlb_kernel)(void);
        void (*flush_tlb_single)(unsigned long addr);
-       void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
+       void (*flush_tlb_others)(const struct cpumask *cpus,
+                                struct mm_struct *mm,
                                 unsigned long va);
 
        /* Hooks for allocating and freeing a pagetable top-level */
@@ -984,10 +985,11 @@ static inline void __flush_tlb_single(unsigned long addr)
        PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
 }
 
-static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+static inline void flush_tlb_others(const struct cpumask *cpumask,
+                                   struct mm_struct *mm,
                                    unsigned long va)
 {
-       PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
+       PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
 }
 
 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
index 19953df..a8cea7b 100644 (file)
 #endif
 #include <asm/pda.h>
 #include <asm/thread_info.h>
-
-#ifdef CONFIG_X86_64
-
-extern cpumask_var_t cpu_callin_mask;
-extern cpumask_var_t cpu_callout_mask;
-extern cpumask_var_t cpu_initialized_mask;
-extern cpumask_var_t cpu_sibling_setup_mask;
-
-#else /* CONFIG_X86_32 */
-
-extern cpumask_t cpu_callin_map;
-extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_initialized;
-extern cpumask_t cpu_sibling_setup_map;
-
-#define cpu_callin_mask                ((struct cpumask *)&cpu_callin_map)
-#define cpu_callout_mask       ((struct cpumask *)&cpu_callout_map)
-#define cpu_initialized_mask   ((struct cpumask *)&cpu_initialized)
-#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
-
-#endif /* CONFIG_X86_32 */
-
-extern void (*mtrr_hook)(void);
-extern void zap_low_mappings(void);
+#include <asm/cpumask.h>
 
 extern int __cpuinit get_local_pda(int cpu);
 
@@ -167,8 +144,6 @@ void play_dead_common(void);
 void native_send_call_func_ipi(const struct cpumask *mask);
 void native_send_call_func_single_ipi(int cpu);
 
-extern void prefill_possible_map(void);
-
 void smp_store_cpu_info(int id);
 #define cpu_physical_id(cpu)   per_cpu(x86_cpu_to_apicid, cpu)
 
@@ -177,10 +152,6 @@ static inline int num_booting_cpus(void)
 {
        return cpumask_weight(cpu_callout_mask);
 }
-#else
-static inline void prefill_possible_map(void)
-{
-}
 #endif /* CONFIG_SMP */
 
 extern unsigned disabled_cpus __cpuinitdata;
@@ -205,10 +176,6 @@ extern int safe_smp_processor_id(void);
 })
 #define safe_smp_processor_id()                smp_processor_id()
 
-#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
-#define cpu_physical_id(cpu)           boot_cpu_physical_apicid
-#define safe_smp_processor_id()                0
-#define stack_smp_processor_id()       0
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -251,11 +218,5 @@ static inline int hard_smp_processor_id(void)
 
 #endif /* CONFIG_X86_LOCAL_APIC */
 
-#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
-extern unsigned char boot_cpu_id;
-#else
-#define boot_cpu_id    0
-#endif
-
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_SMP_H */
index 0e7bbb5..17feaa9 100644 (file)
@@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
                __flush_tlb();
 }
 
-static inline void native_flush_tlb_others(const cpumask_t *cpumask,
+static inline void native_flush_tlb_others(const struct cpumask *cpumask,
                                           struct mm_struct *mm,
                                           unsigned long va)
 {
@@ -142,8 +142,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
        flush_tlb_mm(vma->vm_mm);
 }
 
-void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
-                            unsigned long va);
+void native_flush_tlb_others(const struct cpumask *cpumask,
+                            struct mm_struct *mm, unsigned long va);
 
 #define TLBSTATE_OK    1
 #define TLBSTATE_LAZY  2
@@ -166,7 +166,7 @@ static inline void reset_lazy_tlbstate(void)
 #endif /* SMP */
 
 #ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
+#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
 #endif
 
 static inline void flush_tlb_kernel_range(unsigned long start,
@@ -175,4 +175,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
        flush_tlb_all();
 }
 
+extern void zap_low_mappings(void);
+
 #endif /* _ASM_X86_TLBFLUSH_H */
index 50423c7..74e6393 100644 (file)
@@ -325,7 +325,8 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
 #define cpubit_isset(cpu, bau_local_cpumask) \
        test_bit((cpu), (bau_local_cpumask).bits)
 
-extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
+extern int uv_flush_tlb_others(struct cpumask *,
+                              struct mm_struct *, unsigned long);
 extern void uv_bau_message_intr1(void);
 extern void uv_bau_timeout_intr1(void);
 
index d37593c..4cb5964 100644 (file)
@@ -912,8 +912,8 @@ static u8 __init uniq_ioapic_id(u8 id)
        DECLARE_BITMAP(used, 256);
        bitmap_zero(used, 256);
        for (i = 0; i < nr_ioapics; i++) {
-               struct mp_config_ioapic *ia = &mp_ioapics[i];
-               __set_bit(ia->mp_apicid, used);
+               struct mpc_ioapic *ia = &mp_ioapics[i];
+               __set_bit(ia->apicid, used);
        }
        if (!test_bit(id, used))
                return id;
@@ -945,47 +945,47 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
 
        idx = nr_ioapics;
 
-       mp_ioapics[idx].mp_type = MP_IOAPIC;
-       mp_ioapics[idx].mp_flags = MPC_APIC_USABLE;
-       mp_ioapics[idx].mp_apicaddr = address;
+       mp_ioapics[idx].type = MP_IOAPIC;
+       mp_ioapics[idx].flags = MPC_APIC_USABLE;
+       mp_ioapics[idx].apicaddr = address;
 
        set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-       mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id);
+       mp_ioapics[idx].apicid = uniq_ioapic_id(id);
 #ifdef CONFIG_X86_32
-       mp_ioapics[idx].mp_apicver = io_apic_get_version(idx);
+       mp_ioapics[idx].apicver = io_apic_get_version(idx);
 #else
-       mp_ioapics[idx].mp_apicver = 0;
+       mp_ioapics[idx].apicver = 0;
 #endif
        /*
         * Build basic GSI lookup table to facilitate gsi->io_apic lookups
         * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
         */
-       mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid;
+       mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
        mp_ioapic_routing[idx].gsi_base = gsi_base;
        mp_ioapic_routing[idx].gsi_end = gsi_base +
            io_apic_get_redir_entries(idx);
 
-       printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
-              "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid,
-              mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr,
+       printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
+              "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
+              mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
               mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
 
        nr_ioapics++;
 }
 
-static void assign_to_mp_irq(struct mp_config_intsrc *m,
-                                   struct mp_config_intsrc *mp_irq)
+static void assign_to_mp_irq(struct mpc_intsrc *m,
+                                   struct mpc_intsrc *mp_irq)
 {
-       memcpy(mp_irq, m, sizeof(struct mp_config_intsrc));
+       memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
 }
 
-static int mp_irq_cmp(struct mp_config_intsrc *mp_irq,
-                               struct mp_config_intsrc *m)
+static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
+                               struct mpc_intsrc *m)
 {
-       return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc));
+       return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
 }
 
-static void save_mp_irq(struct mp_config_intsrc *m)
+static void save_mp_irq(struct mpc_intsrc *m)
 {
        int i;
 
@@ -1003,7 +1003,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
 {
        int ioapic;
        int pin;
-       struct mp_config_intsrc mp_irq;
+       struct mpc_intsrc mp_irq;
 
        /*
         * Convert 'gsi' to 'ioapic.pin'.
@@ -1021,13 +1021,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
        if ((bus_irq == 0) && (trigger == 3))
                trigger = 1;
 
-       mp_irq.mp_type = MP_INTSRC;
-       mp_irq.mp_irqtype = mp_INT;
-       mp_irq.mp_irqflag = (trigger << 2) | polarity;
-       mp_irq.mp_srcbus = MP_ISA_BUS;
-       mp_irq.mp_srcbusirq = bus_irq;  /* IRQ */
-       mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */
-       mp_irq.mp_dstirq = pin; /* INTIN# */
+       mp_irq.type = MP_INTSRC;
+       mp_irq.irqtype = mp_INT;
+       mp_irq.irqflag = (trigger << 2) | polarity;
+       mp_irq.srcbus = MP_ISA_BUS;
+       mp_irq.srcbusirq = bus_irq;     /* IRQ */
+       mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
+       mp_irq.dstirq = pin;    /* INTIN# */
 
        save_mp_irq(&mp_irq);
 }
@@ -1037,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void)
        int i;
        int ioapic;
        unsigned int dstapic;
-       struct mp_config_intsrc mp_irq;
+       struct mpc_intsrc mp_irq;
 
 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
        /*
@@ -1062,7 +1062,7 @@ void __init mp_config_acpi_legacy_irqs(void)
        ioapic = mp_find_ioapic(0);
        if (ioapic < 0)
                return;
-       dstapic = mp_ioapics[ioapic].mp_apicid;
+       dstapic = mp_ioapics[ioapic].apicid;
 
        /*
         * Use the default configuration for the IRQs 0-15.  Unless
@@ -1072,16 +1072,14 @@ void __init mp_config_acpi_legacy_irqs(void)
                int idx;
 
                for (idx = 0; idx < mp_irq_entries; idx++) {
-                       struct mp_config_intsrc *irq = mp_irqs + idx;
+                       struct mpc_intsrc *irq = mp_irqs + idx;
 
                        /* Do we already have a mapping for this ISA IRQ? */
-                       if (irq->mp_srcbus == MP_ISA_BUS
-                           && irq->mp_srcbusirq == i)
+                       if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
                                break;
 
                        /* Do we already have a mapping for this IOAPIC pin */
-                       if (irq->mp_dstapic == dstapic &&
-                           irq->mp_dstirq == i)
+                       if (irq->dstapic == dstapic && irq->dstirq == i)
                                break;
                }
 
@@ -1090,13 +1088,13 @@ void __init mp_config_acpi_legacy_irqs(void)
                        continue;       /* IRQ already used */
                }
 
-               mp_irq.mp_type = MP_INTSRC;
-               mp_irq.mp_irqflag = 0;  /* Conforming */
-               mp_irq.mp_srcbus = MP_ISA_BUS;
-               mp_irq.mp_dstapic = dstapic;
-               mp_irq.mp_irqtype = mp_INT;
-               mp_irq.mp_srcbusirq = i; /* Identity mapped */
-               mp_irq.mp_dstirq = i;
+               mp_irq.type = MP_INTSRC;
+               mp_irq.irqflag = 0;     /* Conforming */
+               mp_irq.srcbus = MP_ISA_BUS;
+               mp_irq.dstapic = dstapic;
+               mp_irq.irqtype = mp_INT;
+               mp_irq.srcbusirq = i; /* Identity mapped */
+               mp_irq.dstirq = i;
 
                save_mp_irq(&mp_irq);
        }
@@ -1207,22 +1205,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
                        u32 gsi, int triggering, int polarity)
 {
 #ifdef CONFIG_X86_MPPARSE
-       struct mp_config_intsrc mp_irq;
+       struct mpc_intsrc mp_irq;
        int ioapic;
 
        if (!acpi_ioapic)
                return 0;
 
        /* print the entry should happen on mptable identically */
-       mp_irq.mp_type = MP_INTSRC;
-       mp_irq.mp_irqtype = mp_INT;
-       mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
+       mp_irq.type = MP_INTSRC;
+       mp_irq.irqtype = mp_INT;
+       mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
                                (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
-       mp_irq.mp_srcbus = number;
-       mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
+       mp_irq.srcbus = number;
+       mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
        ioapic = mp_find_ioapic(gsi);
-       mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id;
-       mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
+       mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
+       mp_irq.dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
 
        save_mp_irq(&mp_irq);
 #endif
index 0f830e4..38d6aab 100644 (file)
@@ -895,6 +895,10 @@ void disable_local_APIC(void)
 {
        unsigned int value;
 
+       /* APIC hasn't been mapped yet */
+       if (!apic_phys)
+               return;
+
        clear_local_APIC();
 
        /*
@@ -1126,6 +1130,11 @@ void __cpuinit setup_local_APIC(void)
        unsigned int value;
        int i, j;
 
+       if (disable_apic) {
+               disable_ioapic_setup();
+               return;
+       }
+
 #ifdef CONFIG_X86_32
        /* Pound the ESR really hard over the head with a big hammer - mbligh */
        if (lapic_is_integrated() && esr_disable) {
@@ -1566,11 +1575,11 @@ int apic_version[MAX_APICS];
 
 int __init APIC_init_uniprocessor(void)
 {
-#ifdef CONFIG_X86_64
        if (disable_apic) {
                pr_info("Apic disabled\n");
                return -1;
        }
+#ifdef CONFIG_X86_64
        if (!cpu_has_apic) {
                disable_apic = 1;
                pr_info("Apic disabled by BIOS\n");
index 83492b1..f002584 100644 (file)
@@ -21,6 +21,8 @@
 #include <asm/asm.h>
 #include <asm/numa.h>
 #include <asm/smp.h>
+#include <asm/cpu.h>
+#include <asm/cpumask.h>
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/mpspec.h>
 #include <asm/apic.h>
index 6f11e02..8f3c95c 100644 (file)
@@ -235,8 +235,6 @@ static u32 get_cur_val(const struct cpumask *mask)
                return 0;
        }
 
-       cpumask_copy(cmd.mask, mask);
-
        drv_read(&cmd);
 
        dprintk("get_cur_val = %u\n", cmd.val);
index 48533d7..58527a9 100644 (file)
@@ -132,7 +132,16 @@ struct _cpuid4_info {
        union _cpuid4_leaf_ecx ecx;
        unsigned long size;
        unsigned long can_disable;
-       cpumask_t shared_cpu_map;       /* future?: only cpus/node is needed */
+       DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+};
+
+/* subset of above _cpuid4_info w/o shared_cpu_map */
+struct _cpuid4_info_regs {
+       union _cpuid4_leaf_eax eax;
+       union _cpuid4_leaf_ebx ebx;
+       union _cpuid4_leaf_ecx ecx;
+       unsigned long size;
+       unsigned long can_disable;
 };
 
 #ifdef CONFIG_PCI
@@ -263,7 +272,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
 }
 
 static void __cpuinit
-amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
+amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
 {
        if (index < 3)
                return;
@@ -271,7 +280,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
 }
 
 static int
-__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+__cpuinit cpuid4_cache_lookup_regs(int index,
+                                  struct _cpuid4_info_regs *this_leaf)
 {
        union _cpuid4_leaf_eax  eax;
        union _cpuid4_leaf_ebx  ebx;
@@ -299,6 +309,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
        return 0;
 }
 
+static int
+__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+{
+       struct _cpuid4_info_regs *leaf_regs =
+               (struct _cpuid4_info_regs *)this_leaf;
+
+       return cpuid4_cache_lookup_regs(index, leaf_regs);
+}
+
 static int __cpuinit find_num_cache_leaves(void)
 {
        unsigned int            eax, ebx, ecx, edx;
@@ -338,11 +357,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
                 * parameters cpuid leaf to find the cache details
                 */
                for (i = 0; i < num_cache_leaves; i++) {
-                       struct _cpuid4_info this_leaf;
-
+                       struct _cpuid4_info_regs this_leaf;
                        int retval;
 
-                       retval = cpuid4_cache_lookup(i, &this_leaf);
+                       retval = cpuid4_cache_lookup_regs(i, &this_leaf);
                        if (retval >= 0) {
                                switch(this_leaf.eax.split.level) {
                                    case 1:
@@ -491,17 +509,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
        num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
 
        if (num_threads_sharing == 1)
-               cpu_set(cpu, this_leaf->shared_cpu_map);
+               cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
        else {
                index_msb = get_count_order(num_threads_sharing);
 
                for_each_online_cpu(i) {
                        if (cpu_data(i).apicid >> index_msb ==
                            c->apicid >> index_msb) {
-                               cpu_set(i, this_leaf->shared_cpu_map);
+                               cpumask_set_cpu(i,
+                                       to_cpumask(this_leaf->shared_cpu_map));
                                if (i != cpu && per_cpu(cpuid4_info, i))  {
-                                       sibling_leaf = CPUID4_INFO_IDX(i, index);
-                                       cpu_set(cpu, sibling_leaf->shared_cpu_map);
+                                       sibling_leaf =
+                                               CPUID4_INFO_IDX(i, index);
+                                       cpumask_set_cpu(cpu, to_cpumask(
+                                               sibling_leaf->shared_cpu_map));
                                }
                        }
                }
@@ -513,9 +534,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
        int sibling;
 
        this_leaf = CPUID4_INFO_IDX(cpu, index);
-       for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
+       for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
                sibling_leaf = CPUID4_INFO_IDX(sibling, index);
-               cpu_clear(cpu, sibling_leaf->shared_cpu_map);
+               cpumask_clear_cpu(cpu,
+                                 to_cpumask(sibling_leaf->shared_cpu_map));
        }
 }
 #else
@@ -620,8 +642,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
        int n = 0;
 
        if (len > 1) {
-               cpumask_t *mask = &this_leaf->shared_cpu_map;
+               const struct cpumask *mask;
 
+               mask = to_cpumask(this_leaf->shared_cpu_map);
                n = type?
                        cpulist_scnprintf(buf, len-2, mask) :
                        cpumask_scnprintf(buf, len-2, mask);
@@ -684,7 +707,8 @@ static struct pci_dev *get_k8_northbridge(int node)
 
 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
 {
-       int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+       const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
+       int node = cpu_to_node(cpumask_first(mask));
        struct pci_dev *dev = NULL;
        ssize_t ret = 0;
        int i;
@@ -718,7 +742,8 @@ static ssize_t
 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
                    size_t count)
 {
-       int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+       const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
+       int node = cpu_to_node(cpumask_first(mask));
        struct pci_dev *dev = NULL;
        unsigned int ret, index, val;
 
@@ -863,7 +888,7 @@ err_out:
        return -ENOMEM;
 }
 
-static cpumask_t cache_dev_map = CPU_MASK_NONE;
+static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
 
 /* Add/Remove cache interface for CPU device */
 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
@@ -903,7 +928,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
                }
                kobject_uevent(&(this_object->kobj), KOBJ_ADD);
        }
-       cpu_set(cpu, cache_dev_map);
+       cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
 
        kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
        return 0;
@@ -916,9 +941,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
 
        if (per_cpu(cpuid4_info, cpu) == NULL)
                return;
-       if (!cpu_isset(cpu, cache_dev_map))
+       if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
                return;
-       cpu_clear(cpu, cache_dev_map);
+       cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
 
        for (i = 0; i < num_cache_leaves; i++)
                kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
index 8ae8c4f..4772e91 100644 (file)
@@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
 struct threshold_bank {
        struct kobject *kobj;
        struct threshold_block *blocks;
-       cpumask_t cpus;
+       cpumask_var_t cpus;
 };
 static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
 
@@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 
 #ifdef CONFIG_SMP
        if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {   /* symlink */
-               i = first_cpu(per_cpu(cpu_core_map, cpu));
+               i = cpumask_first(&per_cpu(cpu_core_map, cpu));
 
                /* first core not up yet */
                if (cpu_data(i).cpu_core_id)
@@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                if (err)
                        goto out;
 
-               b->cpus = per_cpu(cpu_core_map, cpu);
+               cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
                per_cpu(threshold_banks, cpu)[bank] = b;
                goto out;
        }
@@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                err = -ENOMEM;
                goto out;
        }
+       if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+               kfree(b);
+               err = -ENOMEM;
+               goto out;
+       }
 
        b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
        if (!b->kobj)
                goto out_free;
 
 #ifndef CONFIG_SMP
-       b->cpus = CPU_MASK_ALL;
+       cpumask_setall(b->cpus);
 #else
-       b->cpus = per_cpu(cpu_core_map, cpu);
+       cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
@@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
        if (err)
                goto out_free;
 
-       for_each_cpu_mask_nr(i, b->cpus) {
+       for_each_cpu(i, b->cpus) {
                if (i == cpu)
                        continue;
 
@@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 
 out_free:
        per_cpu(threshold_banks, cpu)[bank] = NULL;
+       free_cpumask_var(b->cpus);
        kfree(b);
 out:
        return err;
@@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 #endif
 
        /* remove all sibling symlinks before unregistering */
-       for_each_cpu_mask_nr(i, b->cpus) {
+       for_each_cpu(i, b->cpus) {
                if (i == cpu)
                        continue;
 
@@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 free_out:
        kobject_del(b->kobj);
        kobject_put(b->kobj);
+       free_cpumask_var(b->cpus);
        kfree(b);
        per_cpu(threshold_banks, cpu)[bank] = NULL;
 }
index c689d19..11b93ca 100644 (file)
@@ -24,7 +24,7 @@
 #include <asm/apic.h>
 #include <asm/hpet.h>
 #include <linux/kdebug.h>
-#include <asm/smp.h>
+#include <asm/cpu.h>
 #include <asm/reboot.h>
 #include <asm/virtext.h>
 
index 1c4a130..f796603 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/idle.h>
 #include <asm/io.h>
 #include <asm/smp.h>
+#include <asm/cpu.h>
 #include <asm/desc.h>
 #include <asm/proto.h>
 #include <asm/acpi.h>
@@ -82,11 +83,11 @@ static DEFINE_SPINLOCK(vector_lock);
 int nr_ioapic_registers[MAX_IO_APICS];
 
 /* I/O APIC entries */
-struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
+struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
 int nr_ioapics;
 
 /* MP IRQ source entries */
-struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
 
 /* # of MP IRQ source entries */
 int mp_irq_entries;
@@ -356,7 +357,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
 
        if (!cfg->move_in_progress) {
                /* it means that domain is not changed */
-               if (!cpumask_intersects(&desc->affinity, mask))
+               if (!cpumask_intersects(desc->affinity, mask))
                        cfg->move_desc_pending = 1;
        }
 }
@@ -386,7 +387,7 @@ struct io_apic {
 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
 {
        return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
-               + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
+               + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
 }
 
 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
@@ -579,9 +580,9 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
        if (assign_irq_vector(irq, cfg, mask))
                return BAD_APICID;
 
-       cpumask_and(&desc->affinity, cfg->domain, mask);
+       cpumask_and(desc->affinity, cfg->domain, mask);
        set_extra_move_desc(desc, mask);
-       return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
+       return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
 }
 
 static void
@@ -944,10 +945,10 @@ static int find_irq_entry(int apic, int pin, int type)
        int i;
 
        for (i = 0; i < mp_irq_entries; i++)
-               if (mp_irqs[i].mp_irqtype == type &&
-                   (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
-                    mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
-                   mp_irqs[i].mp_dstirq == pin)
+               if (mp_irqs[i].irqtype == type &&
+                   (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
+                    mp_irqs[i].dstapic == MP_APIC_ALL) &&
+                   mp_irqs[i].dstirq == pin)
                        return i;
 
        return -1;
@@ -961,13 +962,13 @@ static int __init find_isa_irq_pin(int irq, int type)
        int i;
 
        for (i = 0; i < mp_irq_entries; i++) {
-               int lbus = mp_irqs[i].mp_srcbus;
+               int lbus = mp_irqs[i].srcbus;
 
                if (test_bit(lbus, mp_bus_not_pci) &&
-                   (mp_irqs[i].mp_irqtype == type) &&
-                   (mp_irqs[i].mp_srcbusirq == irq))
+                   (mp_irqs[i].irqtype == type) &&
+                   (mp_irqs[i].srcbusirq == irq))
 
-                       return mp_irqs[i].mp_dstirq;
+                       return mp_irqs[i].dstirq;
        }
        return -1;
 }
@@ -977,17 +978,17 @@ static int __init find_isa_irq_apic(int irq, int type)
        int i;
 
        for (i = 0; i < mp_irq_entries; i++) {
-               int lbus = mp_irqs[i].mp_srcbus;
+               int lbus = mp_irqs[i].srcbus;
 
                if (test_bit(lbus, mp_bus_not_pci) &&
-                   (mp_irqs[i].mp_irqtype == type) &&
-                   (mp_irqs[i].mp_srcbusirq == irq))
+                   (mp_irqs[i].irqtype == type) &&
+                   (mp_irqs[i].srcbusirq == irq))
                        break;
        }
        if (i < mp_irq_entries) {
                int apic;
                for(apic = 0; apic < nr_ioapics; apic++) {
-                       if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
+                       if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
                                return apic;
                }
        }
@@ -1012,23 +1013,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
                return -1;
        }
        for (i = 0; i < mp_irq_entries; i++) {
-               int lbus = mp_irqs[i].mp_srcbus;
+               int lbus = mp_irqs[i].srcbus;
 
                for (apic = 0; apic < nr_ioapics; apic++)
-                       if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
-                           mp_irqs[i].mp_dstapic == MP_APIC_ALL)
+                       if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
+                           mp_irqs[i].dstapic == MP_APIC_ALL)
                                break;
 
                if (!test_bit(lbus, mp_bus_not_pci) &&
-                   !mp_irqs[i].mp_irqtype &&
+                   !mp_irqs[i].irqtype &&
                    (bus == lbus) &&
-                   (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
-                       int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
+                   (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
+                       int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
 
                        if (!(apic || IO_APIC_IRQ(irq)))
                                continue;
 
-                       if (pin == (mp_irqs[i].mp_srcbusirq & 3))
+                       if (pin == (mp_irqs[i].srcbusirq & 3))
                                return irq;
                        /*
                         * Use the first all-but-pin matching entry as a
@@ -1071,7 +1072,7 @@ static int EISA_ELCR(unsigned int irq)
  * EISA conforming in the MP table, that means its trigger type must
  * be read in from the ELCR */
 
-#define default_EISA_trigger(idx)      (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
+#define default_EISA_trigger(idx)      (EISA_ELCR(mp_irqs[idx].srcbusirq))
 #define default_EISA_polarity(idx)     default_ISA_polarity(idx)
 
 /* PCI interrupts are always polarity one level triggered,
@@ -1088,13 +1089,13 @@ static int EISA_ELCR(unsigned int irq)
 
 static int MPBIOS_polarity(int idx)
 {
-       int bus = mp_irqs[idx].mp_srcbus;
+       int bus = mp_irqs[idx].srcbus;
        int polarity;
 
        /*
         * Determine IRQ line polarity (high active or low active):
         */
-       switch (mp_irqs[idx].mp_irqflag & 3)
+       switch (mp_irqs[idx].irqflag & 3)
        {
                case 0: /* conforms, ie. bus-type dependent polarity */
                        if (test_bit(bus, mp_bus_not_pci))
@@ -1130,13 +1131,13 @@ static int MPBIOS_polarity(int idx)
 
 static int MPBIOS_trigger(int idx)
 {
-       int bus = mp_irqs[idx].mp_srcbus;
+       int bus = mp_irqs[idx].srcbus;
        int trigger;
 
        /*
         * Determine IRQ trigger mode (edge or level sensitive):
         */
-       switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
+       switch ((mp_irqs[idx].irqflag>>2) & 3)
        {
                case 0: /* conforms, ie. bus-type dependent */
                        if (test_bit(bus, mp_bus_not_pci))
@@ -1214,16 +1215,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq);
 static int pin_2_irq(int idx, int apic, int pin)
 {
        int irq, i;
-       int bus = mp_irqs[idx].mp_srcbus;
+       int bus = mp_irqs[idx].srcbus;
 
        /*
         * Debugging check, we are in big trouble if this message pops up!
         */
-       if (mp_irqs[idx].mp_dstirq != pin)
+       if (mp_irqs[idx].dstirq != pin)
                printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
 
        if (test_bit(bus, mp_bus_not_pci)) {
-               irq = mp_irqs[idx].mp_srcbusirq;
+               irq = mp_irqs[idx].srcbusirq;
        } else {
                /*
                 * PCI IRQs are mapped in order
@@ -1566,14 +1567,14 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
        apic_printk(APIC_VERBOSE,KERN_DEBUG
                    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
                    "IRQ %d Mode:%i Active:%i)\n",
-                   apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
+                   apic, mp_ioapics[apic].apicid, pin, cfg->vector,
                    irq, trigger, polarity);
 
 
-       if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
+       if (setup_ioapic_entry(mp_ioapics[apic].apicid, irq, &entry,
                               dest, trigger, polarity, cfg->vector)) {
                printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
-                      mp_ioapics[apic].mp_apicid, pin);
+                      mp_ioapics[apic].apicid, pin);
                __clear_irq_vector(irq, cfg);
                return;
        }
@@ -1604,12 +1605,10 @@ static void __init setup_IO_APIC_irqs(void)
                                        notcon = 1;
                                        apic_printk(APIC_VERBOSE,
                                                KERN_DEBUG " %d-%d",
-                                               mp_ioapics[apic].mp_apicid,
-                                               pin);
+                                               mp_ioapics[apic].apicid, pin);
                                } else
                                        apic_printk(APIC_VERBOSE, " %d-%d",
-                                               mp_ioapics[apic].mp_apicid,
-                                               pin);
+                                               mp_ioapics[apic].apicid, pin);
                                continue;
                        }
                        if (notcon) {
@@ -1699,7 +1698,7 @@ __apicdebuginit(void) print_IO_APIC(void)
        printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
        for (i = 0; i < nr_ioapics; i++)
                printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-                      mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
+                      mp_ioapics[i].apicid, nr_ioapic_registers[i]);
 
        /*
         * We are a bit conservative about what we expect.  We have to
@@ -1719,7 +1718,7 @@ __apicdebuginit(void) print_IO_APIC(void)
        spin_unlock_irqrestore(&ioapic_lock, flags);
 
        printk("\n");
-       printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
+       printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
        printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
        printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
        printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
@@ -2121,14 +2120,14 @@ static void __init setup_ioapic_ids_from_mpc(void)
                reg_00.raw = io_apic_read(apic, 0);
                spin_unlock_irqrestore(&ioapic_lock, flags);
 
-               old_id = mp_ioapics[apic].mp_apicid;
+               old_id = mp_ioapics[apic].apicid;
 
-               if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
+               if (mp_ioapics[apic].apicid >= get_physical_broadcast()) {
                        printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
-                               apic, mp_ioapics[apic].mp_apicid);
+                               apic, mp_ioapics[apic].apicid);
                        printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
                                reg_00.bits.ID);
-                       mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
+                       mp_ioapics[apic].apicid = reg_00.bits.ID;
                }
 
                /*
@@ -2137,9 +2136,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
                 * 'stuck on smp_invalidate_needed IPI wait' messages.
                 */
                if (check_apicid_used(phys_id_present_map,
-                                       mp_ioapics[apic].mp_apicid)) {
+                                       mp_ioapics[apic].apicid)) {
                        printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
-                               apic, mp_ioapics[apic].mp_apicid);
+                               apic, mp_ioapics[apic].apicid);
                        for (i = 0; i < get_physical_broadcast(); i++)
                                if (!physid_isset(i, phys_id_present_map))
                                        break;
@@ -2148,13 +2147,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
                        printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
                                i);
                        physid_set(i, phys_id_present_map);
-                       mp_ioapics[apic].mp_apicid = i;
+                       mp_ioapics[apic].apicid = i;
                } else {
                        physid_mask_t tmp;
-                       tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
+                       tmp = apicid_to_cpu_present(mp_ioapics[apic].apicid);
                        apic_printk(APIC_VERBOSE, "Setting %d in the "
                                        "phys_id_present_map\n",
-                                       mp_ioapics[apic].mp_apicid);
+                                       mp_ioapics[apic].apicid);
                        physids_or(phys_id_present_map, phys_id_present_map, tmp);
                }
 
@@ -2163,11 +2162,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
                 * We need to adjust the IRQ routing table
                 * if the ID changed.
                 */
-               if (old_id != mp_ioapics[apic].mp_apicid)
+               if (old_id != mp_ioapics[apic].apicid)
                        for (i = 0; i < mp_irq_entries; i++)
-                               if (mp_irqs[i].mp_dstapic == old_id)
-                                       mp_irqs[i].mp_dstapic
-                                               = mp_ioapics[apic].mp_apicid;
+                               if (mp_irqs[i].dstapic == old_id)
+                                       mp_irqs[i].dstapic
+                                               = mp_ioapics[apic].apicid;
 
                /*
                 * Read the right value from the MPC table and
@@ -2175,9 +2174,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
                 */
                apic_printk(APIC_VERBOSE, KERN_INFO
                        "...changing IO-APIC physical APIC ID to %d ...",
-                       mp_ioapics[apic].mp_apicid);
+                       mp_ioapics[apic].apicid);
 
-               reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
+               reg_00.bits.ID = mp_ioapics[apic].apicid;
                spin_lock_irqsave(&ioapic_lock, flags);
                io_apic_write(apic, 0, reg_00.raw);
                spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -2188,7 +2187,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
                spin_lock_irqsave(&ioapic_lock, flags);
                reg_00.raw = io_apic_read(apic, 0);
                spin_unlock_irqrestore(&ioapic_lock, flags);
-               if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
+               if (reg_00.bits.ID != mp_ioapics[apic].apicid)
                        printk("could not set ID!\n");
                else
                        apic_printk(APIC_VERBOSE, " ok.\n");
@@ -2383,7 +2382,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
        if (cfg->move_in_progress)
                send_cleanup_vector(cfg);
 
-       cpumask_copy(&desc->affinity, mask);
+       cpumask_copy(desc->affinity, mask);
 }
 
 static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2405,11 +2404,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
        }
 
        /* everthing is clear. we have right of way */
-       migrate_ioapic_irq_desc(desc, &desc->pending_mask);
+       migrate_ioapic_irq_desc(desc, desc->pending_mask);
 
        ret = 0;
        desc->status &= ~IRQ_MOVE_PENDING;
-       cpumask_clear(&desc->pending_mask);
+       cpumask_clear(desc->pending_mask);
 
 unmask:
        unmask_IO_APIC_irq_desc(desc);
@@ -2434,7 +2433,7 @@ static void ir_irq_migration(struct work_struct *work)
                                continue;
                        }
 
-                       desc->chip->set_affinity(irq, &desc->pending_mask);
+                       desc->chip->set_affinity(irq, desc->pending_mask);
                        spin_unlock_irqrestore(&desc->lock, flags);
                }
        }
@@ -2448,7 +2447,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
 {
        if (desc->status & IRQ_LEVEL) {
                desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(&desc->pending_mask, mask);
+               cpumask_copy(desc->pending_mask, mask);
                migrate_irq_remapped_level_desc(desc);
                return;
        }
@@ -2516,7 +2515,7 @@ static void irq_complete_move(struct irq_desc **descp)
 
                /* domain has not changed, but affinity did */
                me = smp_processor_id();
-               if (cpu_isset(me, desc->affinity)) {
+               if (cpumask_test_cpu(me, desc->affinity)) {
                        *descp = desc = move_irq_desc(desc, me);
                        /* get the new one */
                        cfg = desc->chip_data;
@@ -3117,8 +3116,8 @@ static int ioapic_resume(struct sys_device *dev)
 
        spin_lock_irqsave(&ioapic_lock, flags);
        reg_00.raw = io_apic_read(dev->id, 0);
-       if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
-               reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
+       if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
+               reg_00.bits.ID = mp_ioapics[dev->id].apicid;
                io_apic_write(dev->id, 0, reg_00.raw);
        }
        spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -3183,7 +3182,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
 
        irq = 0;
        spin_lock_irqsave(&vector_lock, flags);
-       for (new = irq_want; new < NR_IRQS; new++) {
+       for (new = irq_want; new < nr_irqs; new++) {
                if (platform_legacy_irq(new))
                        continue;
 
@@ -3258,6 +3257,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
        int err;
        unsigned dest;
 
+       if (disable_apic)
+               return -ENXIO;
+
        cfg = irq_cfg(irq);
        err = assign_irq_vector(irq, cfg, TARGET_CPUS);
        if (err)
@@ -3726,6 +3728,9 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
        struct irq_cfg *cfg;
        int err;
 
+       if (disable_apic)
+               return -ENXIO;
+
        cfg = irq_cfg(irq);
        err = assign_irq_vector(irq, cfg, TARGET_CPUS);
        if (!err) {
@@ -3850,6 +3855,22 @@ void __init probe_nr_irqs_gsi(void)
                nr_irqs_gsi = nr;
 }
 
+#ifdef CONFIG_SPARSE_IRQ
+int __init arch_probe_nr_irqs(void)
+{
+       int nr;
+
+       nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ?
+               (NR_VECTORS + (8 * nr_cpu_ids)) :
+               (NR_VECTORS + (32 * nr_ioapics)));
+
+       if (nr < nr_irqs && nr > nr_irqs_gsi)
+               nr_irqs = nr;
+
+       return 0;
+}
+#endif
+
 /* --------------------------------------------------------------------------
                           ACPI-based IOAPIC Configuration
    -------------------------------------------------------------------------- */
@@ -3984,8 +4005,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
                return -1;
 
        for (i = 0; i < mp_irq_entries; i++)
-               if (mp_irqs[i].mp_irqtype == mp_INT &&
-                   mp_irqs[i].mp_srcbusirq == bus_irq)
+               if (mp_irqs[i].irqtype == mp_INT &&
+                   mp_irqs[i].srcbusirq == bus_irq)
                        break;
        if (i >= mp_irq_entries)
                return -1;
@@ -4039,7 +4060,7 @@ void __init setup_ioapic_dest(void)
                         */
                        if (desc->status &
                            (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
-                               mask = &desc->affinity;
+                               mask = desc->affinity;
                        else
                                mask = TARGET_CPUS;
 
@@ -4100,7 +4121,7 @@ void __init ioapic_init_mappings(void)
        ioapic_res = ioapic_setup_resources();
        for (i = 0; i < nr_ioapics; i++) {
                if (smp_found_config) {
-                       ioapic_phys = mp_ioapics[i].mp_apicaddr;
+                       ioapic_phys = mp_ioapics[i].apicaddr;
 #ifdef CONFIG_X86_32
                        if (!ioapic_phys) {
                                printk(KERN_ERR
index 74b9ff7..e0f29be 100644 (file)
@@ -248,7 +248,7 @@ void fixup_irqs(void)
                if (irq == 2)
                        continue;
 
-               affinity = &desc->affinity;
+               affinity = desc->affinity;
                if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
                        printk("Breaking affinity for irq %i\n", irq);
                        affinity = cpu_all_mask;
index 63c88e6..0b21cb1 100644 (file)
@@ -100,7 +100,7 @@ void fixup_irqs(void)
                /* interrupt's are disabled at this point */
                spin_lock(&desc->lock);
 
-               affinity = &desc->affinity;
+               affinity = desc->affinity;
                if (!irq_has_action(irq) ||
                    cpumask_equal(affinity, cpu_online_mask)) {
                        spin_unlock(&desc->lock);
index b7f4c92..5e9f4fc 100644 (file)
@@ -87,9 +87,9 @@
 #include <linux/cpu.h>
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
+#include <linux/uaccess.h>
 
 #include <asm/msr.h>
-#include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/microcode.h>
 
@@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
        return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
 }
 
-static inline int 
+static inline int
 update_match_revision(struct microcode_header_intel *mc_header,        int rev)
 {
        return (mc_header->rev <= rev) ? 0 : 1;
@@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
                return ret;
        }
 
-       ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
-                       &get_ucode_fw);
+       ret = generic_load_microcode(cpu, (void *)firmware->data,
+                                    firmware->size, &get_ucode_fw);
 
        release_firmware(firmware);
 
@@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
        /* We should bind the task to the CPU */
        BUG_ON(cpu != raw_smp_processor_id());
 
-       return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user);
+       return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
 }
 
 static void microcode_fini_cpu(int cpu)
index 3db0a54..0edd819 100644 (file)
@@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
 {
        vfree(module_region);
        /* FIXME: If module_region == mod->init_region, trim exception
-           table entries. */
+          table entries. */
 }
 
 /* We don't need anything special. */
@@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
                *para = NULL;
        char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
-       for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 
+       for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
                if (!strcmp(".text", secstrings + s->sh_name))
                        text = s;
                if (!strcmp(".altinstructions", secstrings + s->sh_name))
                        alt = s;
                if (!strcmp(".smp_locks", secstrings + s->sh_name))
-                       locks= s;
+                       locks = s;
                if (!strcmp(".parainstructions", secstrings + s->sh_name))
                        para = s;
        }
index 6ba8783..c23880b 100644 (file)
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
-#define DEBUGP(fmt...) 
+#define DEBUGP(fmt...)
 
 #ifndef CONFIG_UML
 void module_free(struct module *mod, void *module_region)
 {
        vfree(module_region);
        /* FIXME: If module_region == mod->init_region, trim exception
-           table entries. */
+          table entries. */
 }
 
 void *module_alloc(unsigned long size)
@@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
        Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
        Elf64_Sym *sym;
        void *loc;
-       u64 val; 
+       u64 val;
 
        DEBUGP("Applying relocate section %u to %u\n", relsec,
               sechdrs[relsec].sh_info);
@@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
                        + ELF64_R_SYM(rel[i].r_info);
 
-               DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
-                      (int)ELF64_R_TYPE(rel[i].r_info), 
-                      sym->st_value, rel[i].r_addend, (u64)loc);
+               DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
+                       (int)ELF64_R_TYPE(rel[i].r_info),
+                       sym->st_value, rel[i].r_addend, (u64)loc);
 
-               val = sym->st_value + rel[i].r_addend; 
+               val = sym->st_value + rel[i].r_addend;
 
                switch (ELF64_R_TYPE(rel[i].r_info)) {
                case R_X86_64_NONE:
@@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                        if ((s64)val != *(s32 *)loc)
                                goto overflow;
                        break;
-               case R_X86_64_PC32: 
+               case R_X86_64_PC32:
                        val -= (u64)loc;
                        *(u32 *)loc = val;
 #if 0
                        if ((s64)val != *(s32 *)loc)
-                               goto overflow; 
+                               goto overflow;
 #endif
                        break;
                default:
-                       printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n",
+                       printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
                               me->name, ELF64_R_TYPE(rel[i].r_info));
                        return -ENOEXEC;
                }
@@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
        return 0;
 
 overflow:
-       printk(KERN_ERR "overflow in relocation type %d val %Lx\n", 
+       printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
               (int)ELF64_R_TYPE(rel[i].r_info), val);
        printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
               me->name);
@@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
                   unsigned int relsec,
                   struct module *me)
 {
-       printk("non add relocation not supported\n");
+       printk(KERN_ERR "non add relocation not supported\n");
        return -ENOSYS;
-} 
+}
 
 int module_finalize(const Elf_Ehdr *hdr,
-                    const Elf_Shdr *sechdrs,
-                    struct module *me)
+                   const Elf_Shdr *sechdrs,
+                   struct module *me)
 {
        const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
                *para = NULL;
@@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                if (!strcmp(".altinstructions", secstrings + s->sh_name))
                        alt = s;
                if (!strcmp(".smp_locks", secstrings + s->sh_name))
-                       locks= s;
+                       locks = s;
                if (!strcmp(".parainstructions", secstrings + s->sh_name))
                        para = s;
        }
index a649a4c..fa6bb26 100644 (file)
@@ -144,11 +144,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
        if (bad_ioapic(m->apicaddr))
                return;
 
-       mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr;
-       mp_ioapics[nr_ioapics].mp_apicid = m->apicid;
-       mp_ioapics[nr_ioapics].mp_type = m->type;
-       mp_ioapics[nr_ioapics].mp_apicver = m->apicver;
-       mp_ioapics[nr_ioapics].mp_flags = m->flags;
+       mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
+       mp_ioapics[nr_ioapics].apicid = m->apicid;
+       mp_ioapics[nr_ioapics].type = m->type;
+       mp_ioapics[nr_ioapics].apicver = m->apicver;
+       mp_ioapics[nr_ioapics].flags = m->flags;
        nr_ioapics++;
 }
 
@@ -160,55 +160,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
                m->srcbusirq, m->dstapic, m->dstirq);
 }
 
-static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
+static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
 {
        apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
                " IRQ %02x, APIC ID %x, APIC INT %02x\n",
-               mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
-               (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
-               mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq);
+               mp_irq->irqtype, mp_irq->irqflag & 3,
+               (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
+               mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
 }
 
 static void __init assign_to_mp_irq(struct mpc_intsrc *m,
-                                   struct mp_config_intsrc *mp_irq)
+                                   struct mpc_intsrc *mp_irq)
 {
-       mp_irq->mp_dstapic = m->dstapic;
-       mp_irq->mp_type = m->type;
-       mp_irq->mp_irqtype = m->irqtype;
-       mp_irq->mp_irqflag = m->irqflag;
-       mp_irq->mp_srcbus = m->srcbus;
-       mp_irq->mp_srcbusirq = m->srcbusirq;
-       mp_irq->mp_dstirq = m->dstirq;
+       mp_irq->dstapic = m->dstapic;
+       mp_irq->type = m->type;
+       mp_irq->irqtype = m->irqtype;
+       mp_irq->irqflag = m->irqflag;
+       mp_irq->srcbus = m->srcbus;
+       mp_irq->srcbusirq = m->srcbusirq;
+       mp_irq->dstirq = m->dstirq;
 }
 
-static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq,
+static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
                                        struct mpc_intsrc *m)
 {
-       m->dstapic = mp_irq->mp_dstapic;
-       m->type = mp_irq->mp_type;
-       m->irqtype = mp_irq->mp_irqtype;
-       m->irqflag = mp_irq->mp_irqflag;
-       m->srcbus = mp_irq->mp_srcbus;
-       m->srcbusirq = mp_irq->mp_srcbusirq;
-       m->dstirq = mp_irq->mp_dstirq;
+       m->dstapic = mp_irq->dstapic;
+       m->type = mp_irq->type;
+       m->irqtype = mp_irq->irqtype;
+       m->irqflag = mp_irq->irqflag;
+       m->srcbus = mp_irq->srcbus;
+       m->srcbusirq = mp_irq->srcbusirq;
+       m->dstirq = mp_irq->dstirq;
 }
 
-static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq,
+static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
                                        struct mpc_intsrc *m)
 {
-       if (mp_irq->mp_dstapic != m->dstapic)
+       if (mp_irq->dstapic != m->dstapic)
                return 1;
-       if (mp_irq->mp_type != m->type)
+       if (mp_irq->type != m->type)
                return 2;
-       if (mp_irq->mp_irqtype != m->irqtype)
+       if (mp_irq->irqtype != m->irqtype)
                return 3;
-       if (mp_irq->mp_irqflag != m->irqflag)
+       if (mp_irq->irqflag != m->irqflag)
                return 4;
-       if (mp_irq->mp_srcbus != m->srcbus)
+       if (mp_irq->srcbus != m->srcbus)
                return 5;
-       if (mp_irq->mp_srcbusirq != m->srcbusirq)
+       if (mp_irq->srcbusirq != m->srcbusirq)
                return 6;
-       if (mp_irq->mp_dstirq != m->dstirq)
+       if (mp_irq->dstirq != m->dstirq)
                return 7;
 
        return 0;
@@ -417,7 +417,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
        intsrc.type = MP_INTSRC;
        intsrc.irqflag = 0;     /* conforming */
        intsrc.srcbus = 0;
-       intsrc.dstapic = mp_ioapics[0].mp_apicid;
+       intsrc.dstapic = mp_ioapics[0].apicid;
 
        intsrc.irqtype = mp_INT;
 
@@ -570,14 +570,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
        }
 }
 
-static struct intel_mp_floating *mpf_found;
+static struct mpf_intel *mpf_found;
 
 /*
  * Scan the memory blocks for an SMP configuration block.
  */
 static void __init __get_smp_config(unsigned int early)
 {
-       struct intel_mp_floating *mpf = mpf_found;
+       struct mpf_intel *mpf = mpf_found;
 
        if (!mpf)
                return;
@@ -598,9 +598,9 @@ static void __init __get_smp_config(unsigned int early)
        }
 
        printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
-              mpf->mpf_specification);
+              mpf->specification);
 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
-       if (mpf->mpf_feature2 & (1 << 7)) {
+       if (mpf->feature2 & (1 << 7)) {
                printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
                pic_mode = 1;
        } else {
@@ -611,7 +611,7 @@ static void __init __get_smp_config(unsigned int early)
        /*
         * Now see if we need to read further.
         */
-       if (mpf->mpf_feature1 != 0) {
+       if (mpf->feature1 != 0) {
                if (early) {
                        /*
                         * local APIC has default address
@@ -621,16 +621,16 @@ static void __init __get_smp_config(unsigned int early)
                }
 
                printk(KERN_INFO "Default MP configuration #%d\n",
-                      mpf->mpf_feature1);
-               construct_default_ISA_mptable(mpf->mpf_feature1);
+                      mpf->feature1);
+               construct_default_ISA_mptable(mpf->feature1);
 
-       } else if (mpf->mpf_physptr) {
+       } else if (mpf->physptr) {
 
                /*
                 * Read the physical hardware table.  Anything here will
                 * override the defaults.
                 */
-               if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) {
+               if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) {
 #ifdef CONFIG_X86_LOCAL_APIC
                        smp_found_config = 0;
 #endif
@@ -688,19 +688,19 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
                                  unsigned reserve)
 {
        unsigned int *bp = phys_to_virt(base);
-       struct intel_mp_floating *mpf;
+       struct mpf_intel *mpf;
 
        apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
                        bp, length);
        BUILD_BUG_ON(sizeof(*mpf) != 16);
 
        while (length > 0) {
-               mpf = (struct intel_mp_floating *)bp;
+               mpf = (struct mpf_intel *)bp;
                if ((*bp == SMP_MAGIC_IDENT) &&
-                   (mpf->mpf_length == 1) &&
+                   (mpf->length == 1) &&
                    !mpf_checksum((unsigned char *)bp, 16) &&
-                   ((mpf->mpf_specification == 1)
-                    || (mpf->mpf_specification == 4))) {
+                   ((mpf->specification == 1)
+                    || (mpf->specification == 4))) {
 #ifdef CONFIG_X86_LOCAL_APIC
                        smp_found_config = 1;
 #endif
@@ -713,7 +713,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
                                return 1;
                        reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
                                        BOOTMEM_DEFAULT);
-                       if (mpf->mpf_physptr) {
+                       if (mpf->physptr) {
                                unsigned long size = PAGE_SIZE;
 #ifdef CONFIG_X86_32
                                /*
@@ -722,14 +722,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
                                 * the bottom is mapped now.
                                 * PC-9800's MPC table places on the very last
                                 * of physical memory; so that simply reserving
-                                * PAGE_SIZE from mpg->mpf_physptr yields BUG()
+                                * PAGE_SIZE from mpf->physptr yields BUG()
                                 * in reserve_bootmem.
                                 */
                                unsigned long end = max_low_pfn * PAGE_SIZE;
-                               if (mpf->mpf_physptr + size > end)
-                                       size = end - mpf->mpf_physptr;
+                               if (mpf->physptr + size > end)
+                                       size = end - mpf->physptr;
 #endif
-                               reserve_bootmem_generic(mpf->mpf_physptr, size,
+                               reserve_bootmem_generic(mpf->physptr, size,
                                                BOOTMEM_DEFAULT);
                        }
 
@@ -809,15 +809,15 @@ static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
        /* not legacy */
 
        for (i = 0; i < mp_irq_entries; i++) {
-               if (mp_irqs[i].mp_irqtype != mp_INT)
+               if (mp_irqs[i].irqtype != mp_INT)
                        continue;
 
-               if (mp_irqs[i].mp_irqflag != 0x0f)
+               if (mp_irqs[i].irqflag != 0x0f)
                        continue;
 
-               if (mp_irqs[i].mp_srcbus != m->srcbus)
+               if (mp_irqs[i].srcbus != m->srcbus)
                        continue;
-               if (mp_irqs[i].mp_srcbusirq != m->srcbusirq)
+               if (mp_irqs[i].srcbusirq != m->srcbusirq)
                        continue;
                if (irq_used[i]) {
                        /* already claimed */
@@ -922,10 +922,10 @@ static int  __init replace_intsrc_all(struct mpc_table *mpc,
                if (irq_used[i])
                        continue;
 
-               if (mp_irqs[i].mp_irqtype != mp_INT)
+               if (mp_irqs[i].irqtype != mp_INT)
                        continue;
 
-               if (mp_irqs[i].mp_irqflag != 0x0f)
+               if (mp_irqs[i].irqflag != 0x0f)
                        continue;
 
                if (nr_m_spare > 0) {
@@ -1001,7 +1001,7 @@ static int __init update_mp_table(void)
 {
        char str[16];
        char oem[10];
-       struct intel_mp_floating *mpf;
+       struct mpf_intel *mpf;
        struct mpc_table *mpc, *mpc_new;
 
        if (!enable_update_mptable)
@@ -1014,19 +1014,19 @@ static int __init update_mp_table(void)
        /*
         * Now see if we need to go further.
         */
-       if (mpf->mpf_feature1 != 0)
+       if (mpf->feature1 != 0)
                return 0;
 
-       if (!mpf->mpf_physptr)
+       if (!mpf->physptr)
                return 0;
 
-       mpc = phys_to_virt(mpf->mpf_physptr);
+       mpc = phys_to_virt(mpf->physptr);
 
        if (!smp_check_mpc(mpc, oem, str))
                return 0;
 
        printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
-       printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr);
+       printk(KERN_INFO "physptr: %x\n", mpf->physptr);
 
        if (mpc_new_phys && mpc->length > mpc_new_length) {
                mpc_new_phys = 0;
@@ -1047,23 +1047,23 @@ static int __init update_mp_table(void)
                }
                printk(KERN_INFO "use in-positon replacing\n");
        } else {
-               mpf->mpf_physptr = mpc_new_phys;
+               mpf->physptr = mpc_new_phys;
                mpc_new = phys_to_virt(mpc_new_phys);
                memcpy(mpc_new, mpc, mpc->length);
                mpc = mpc_new;
                /* check if we can modify that */
-               if (mpc_new_phys - mpf->mpf_physptr) {
-                       struct intel_mp_floating *mpf_new;
+               if (mpc_new_phys - mpf->physptr) {
+                       struct mpf_intel *mpf_new;
                        /* steal 16 bytes from [0, 1k) */
                        printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
                        mpf_new = phys_to_virt(0x400 - 16);
                        memcpy(mpf_new, mpf, 16);
                        mpf = mpf_new;
-                       mpf->mpf_physptr = mpc_new_phys;
+                       mpf->physptr = mpc_new_phys;
                }
-               mpf->mpf_checksum = 0;
-               mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16);
-               printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr);
+               mpf->checksum = 0;
+               mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
+               printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
        }
 
        /*
index 7262666..3cf3413 100644 (file)
 #include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
+#include <linux/uaccess.h>
 
 #include <asm/processor.h>
 #include <asm/msr.h>
-#include <asm/uaccess.h>
 #include <asm/system.h>
 
 static struct class *msr_class;
index 2b46eb4..f8536fe 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/reboot.h>
 #include <asm/pci_x86.h>
 #include <asm/virtext.h>
+#include <asm/cpu.h>
 
 #ifdef CONFIG_X86_32
 # include <linux/dmi.h>
index ae0d804..f41c448 100644 (file)
@@ -89,7 +89,7 @@
 
 #include <asm/system.h>
 #include <asm/vsyscall.h>
-#include <asm/smp.h>
+#include <asm/cpu.h>
 #include <asm/desc.h>
 #include <asm/dma.h>
 #include <asm/iommu.h>
index 55c4607..bf63de7 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
 #include <asm/highmem.h>
+#include <asm/cpumask.h>
 
 #ifdef CONFIG_X86_LOCAL_APIC
 unsigned int num_processors;
index bb1a3b1..1a712da 100644 (file)
@@ -53,7 +53,6 @@
 #include <asm/nmi.h>
 #include <asm/irq.h>
 #include <asm/idle.h>
-#include <asm/smp.h>
 #include <asm/trampoline.h>
 #include <asm/cpu.h>
 #include <asm/numa.h>
@@ -1125,6 +1124,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
                printk(KERN_ERR "... forcing use of dummy APIC emulation."
                                "(tell your hw vendor)\n");
                smpboot_clear_io_apic();
+               disable_ioapic_setup();
                return -1;
        }
 
index ce50546..ec53818 100644 (file)
@@ -20,7 +20,7 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
  *     Optimizations Manfred Spraul <manfred@colorfullife.com>
  */
 
-static cpumask_t flush_cpumask;
+static cpumask_var_t flush_cpumask;
 static struct mm_struct *flush_mm;
 static unsigned long flush_va;
 static DEFINE_SPINLOCK(tlbstate_lock);
@@ -92,7 +92,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
 
        cpu = get_cpu();
 
-       if (!cpu_isset(cpu, flush_cpumask))
+       if (!cpumask_test_cpu(cpu, flush_cpumask))
                goto out;
                /*
                 * This was a BUG() but until someone can quote me the
@@ -114,35 +114,22 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
        }
        ack_APIC_irq();
        smp_mb__before_clear_bit();
-       cpu_clear(cpu, flush_cpumask);
+       cpumask_clear_cpu(cpu, flush_cpumask);
        smp_mb__after_clear_bit();
 out:
        put_cpu_no_resched();
        inc_irq_stat(irq_tlb_count);
 }
 
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-                            unsigned long va)
+void native_flush_tlb_others(const struct cpumask *cpumask,
+                            struct mm_struct *mm, unsigned long va)
 {
-       cpumask_t cpumask = *cpumaskp;
-
        /*
-        * A couple of (to be removed) sanity checks:
-        *
-        * - current CPU must not be in mask
         * - mask must exist :)
         */
-       BUG_ON(cpus_empty(cpumask));
-       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+       BUG_ON(cpumask_empty(cpumask));
        BUG_ON(!mm);
 
-#ifdef CONFIG_HOTPLUG_CPU
-       /* If a CPU which we ran on has gone down, OK. */
-       cpus_and(cpumask, cpumask, cpu_online_map);
-       if (unlikely(cpus_empty(cpumask)))
-               return;
-#endif
-
        /*
         * i'm not happy about this global shared spinlock in the
         * MM hot path, but we'll see how contended it is.
@@ -150,9 +137,17 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
         */
        spin_lock(&tlbstate_lock);
 
+       cpumask_andnot(flush_cpumask, cpumask, cpumask_of(smp_processor_id()));
+#ifdef CONFIG_HOTPLUG_CPU
+       /* If a CPU which we ran on has gone down, OK. */
+       cpumask_and(flush_cpumask, flush_cpumask, cpu_online_mask);
+       if (unlikely(cpumask_empty(flush_cpumask))) {
+               spin_unlock(&tlbstate_lock);
+               return;
+       }
+#endif
        flush_mm = mm;
        flush_va = va;
-       cpus_or(flush_cpumask, cpumask, flush_cpumask);
 
        /*
         * Make the above memory operations globally visible before
@@ -163,9 +158,9 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
         * We have to send the IPI only to
         * CPUs affected.
         */
-       send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
+       send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);
 
-       while (!cpus_empty(flush_cpumask))
+       while (!cpumask_empty(flush_cpumask))
                /* nothing. lockup detection does not belong here */
                cpu_relax();
 
@@ -177,25 +172,19 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
 void flush_tlb_current_task(void)
 {
        struct mm_struct *mm = current->mm;
-       cpumask_t cpu_mask;
 
        preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
 
        local_flush_tlb();
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
        preempt_enable();
 }
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
-       cpumask_t cpu_mask;
 
        preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
 
        if (current->active_mm == mm) {
                if (current->mm)
@@ -203,8 +192,8 @@ void flush_tlb_mm(struct mm_struct *mm)
                else
                        leave_mm(smp_processor_id());
        }
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
 
        preempt_enable();
 }
@@ -212,11 +201,8 @@ void flush_tlb_mm(struct mm_struct *mm)
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
 {
        struct mm_struct *mm = vma->vm_mm;
-       cpumask_t cpu_mask;
 
        preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
 
        if (current->active_mm == mm) {
                if (current->mm)
@@ -225,9 +211,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
                        leave_mm(smp_processor_id());
        }
 
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, va);
-
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, va);
        preempt_enable();
 }
 EXPORT_SYMBOL(flush_tlb_page);
@@ -254,3 +239,9 @@ void reset_lazy_tlbstate(void)
        per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
 }
 
+static int init_flush_cpumask(void)
+{
+       alloc_cpumask_var(&flush_cpumask, GFP_KERNEL);
+       return 0;
+}
+early_initcall(init_flush_cpumask);
index f8be6f1..7f4141d 100644 (file)
 
 union smp_flush_state {
        struct {
-               cpumask_t flush_cpumask;
                struct mm_struct *flush_mm;
                unsigned long flush_va;
                spinlock_t tlbstate_lock;
+               DECLARE_BITMAP(flush_cpumask, NR_CPUS);
        };
        char pad[SMP_CACHE_BYTES];
 } ____cacheline_aligned;
@@ -131,7 +131,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
        sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
        f = &per_cpu(flush_state, sender);
 
-       if (!cpu_isset(cpu, f->flush_cpumask))
+       if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
                goto out;
                /*
                 * This was a BUG() but until someone can quote me the
@@ -153,19 +153,15 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
        }
 out:
        ack_APIC_irq();
-       cpu_clear(cpu, f->flush_cpumask);
+       cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
        inc_irq_stat(irq_tlb_count);
 }
 
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-                            unsigned long va)
+static void flush_tlb_others_ipi(const struct cpumask *cpumask,
+                                struct mm_struct *mm, unsigned long va)
 {
        int sender;
        union smp_flush_state *f;
-       cpumask_t cpumask = *cpumaskp;
-
-       if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
-               return;
 
        /* Caller has disabled preemption */
        sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
@@ -180,7 +176,8 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
 
        f->flush_mm = mm;
        f->flush_va = va;
-       cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
+       cpumask_andnot(to_cpumask(f->flush_cpumask),
+                      cpumask, cpumask_of(smp_processor_id()));
 
        /*
         * Make the above memory operations globally visible before
@@ -191,9 +188,10 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
         * We have to send the IPI only to
         * CPUs affected.
         */
-       send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
+       send_IPI_mask(to_cpumask(f->flush_cpumask),
+                     INVALIDATE_TLB_VECTOR_START + sender);
 
-       while (!cpus_empty(f->flush_cpumask))
+       while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
                cpu_relax();
 
        f->flush_mm = NULL;
@@ -201,6 +199,25 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
        spin_unlock(&f->tlbstate_lock);
 }
 
+void native_flush_tlb_others(const struct cpumask *cpumask,
+                            struct mm_struct *mm, unsigned long va)
+{
+       if (is_uv_system()) {
+               /* FIXME: could be an percpu_alloc'd thing */
+               static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
+               struct cpumask *after_uv_flush = &get_cpu_var(flush_tlb_mask);
+
+               cpumask_andnot(after_uv_flush, cpumask,
+                              cpumask_of(smp_processor_id()));
+               if (!uv_flush_tlb_others(after_uv_flush, mm, va))
+                       flush_tlb_others_ipi(after_uv_flush, mm, va);
+
+               put_cpu_var(flush_tlb_uv_cpumask);
+               return;
+       }
+       flush_tlb_others_ipi(cpumask, mm, va);
+}
+
 static int __cpuinit init_smp_flush(void)
 {
        int i;
@@ -215,25 +232,18 @@ core_initcall(init_smp_flush);
 void flush_tlb_current_task(void)
 {
        struct mm_struct *mm = current->mm;
-       cpumask_t cpu_mask;
 
        preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
 
        local_flush_tlb();
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
        preempt_enable();
 }
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
-       cpumask_t cpu_mask;
-
        preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
 
        if (current->active_mm == mm) {
                if (current->mm)
@@ -241,8 +251,8 @@ void flush_tlb_mm(struct mm_struct *mm)
                else
                        leave_mm(smp_processor_id());
        }
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
 
        preempt_enable();
 }
@@ -250,11 +260,8 @@ void flush_tlb_mm(struct mm_struct *mm)
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
 {
        struct mm_struct *mm = vma->vm_mm;
-       cpumask_t cpu_mask;
 
        preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
 
        if (current->active_mm == mm) {
                if (current->mm)
@@ -263,8 +270,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
                        leave_mm(smp_processor_id());
        }
 
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, va);
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, va);
 
        preempt_enable();
 }
index f885023..690dcf1 100644 (file)
@@ -212,11 +212,11 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
  * The cpumaskp mask contains the cpus the broadcast was sent to.
  *
  * Returns 1 if all remote flushing was done. The mask is zeroed.
- * Returns 0 if some remote flushing remains to be done. The mask is left
- * unchanged.
+ * Returns 0 if some remote flushing remains to be done. The mask will have
+ * some bits still set.
  */
 int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
-                          cpumask_t *cpumaskp)
+                          struct cpumask *cpumaskp)
 {
        int completion_status = 0;
        int right_shift;
@@ -263,13 +263,13 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
         * Success, so clear the remote cpu's from the mask so we don't
         * use the IPI method of shootdown on them.
         */
-       for_each_cpu_mask(bit, *cpumaskp) {
+       for_each_cpu(bit, cpumaskp) {
                blade = uv_cpu_to_blade_id(bit);
                if (blade == this_blade)
                        continue;
-               cpu_clear(bit, *cpumaskp);
+               cpumask_clear_cpu(bit, cpumaskp);
        }
-       if (!cpus_empty(*cpumaskp))
+       if (!cpumask_empty(cpumaskp))
                return 0;
        return 1;
 }
@@ -296,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
  * Returns 1 if all remote flushing was done.
  * Returns 0 if some remote flushing remains to be done.
  */
-int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
+int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm,
                        unsigned long va)
 {
        int i;
@@ -315,7 +315,7 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
        bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
 
        i = 0;
-       for_each_cpu_mask(bit, *cpumaskp) {
+       for_each_cpu(bit, cpumaskp) {
                blade = uv_cpu_to_blade_id(bit);
                BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
                if (blade == this_blade) {
index a580b95..0ade625 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/e820.h>
 #include <asm/io.h>
 #include <asm/setup.h>
+#include <asm/cpu.h>
 
 void __init pre_intr_init_hook(void)
 {
index 88f1b10..4a6989e 100644 (file)
@@ -49,7 +49,6 @@
 #include <asm/paravirt.h>
 #include <asm/setup.h>
 #include <asm/cacheflush.h>
-#include <asm/smp.h>
 
 unsigned int __VMALLOC_RESERVE = 128 << 20;
 
index 160c42d..3be3990 100644 (file)
@@ -333,11 +333,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
                                              req_type & _PAGE_CACHE_MASK);
        }
 
-       is_range_ram = pagerange_is_ram(start, end);
-       if (is_range_ram == 1)
-               return reserve_ram_pages_type(start, end, req_type, new_type);
-       else if (is_range_ram < 0)
-               return -EINVAL;
+       /*
+        * For legacy reasons, some parts of the physical address range in the
+        * legacy 1MB region is treated as non-RAM (even when listed as RAM in
+        * the e820 tables).  So we will track the memory attributes of this
+        * legacy 1MB region using the linear memtype_list always.
+        */
+       if (end >= ISA_END_ADDRESS) {
+               is_range_ram = pagerange_is_ram(start, end);
+               if (is_range_ram == 1)
+                       return reserve_ram_pages_type(start, end, req_type,
+                                                     new_type);
+               else if (is_range_ram < 0)
+                       return -EINVAL;
+       }
 
        new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
        if (!new)
@@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end)
        if (is_ISA_range(start, end - 1))
                return 0;
 
-       is_range_ram = pagerange_is_ram(start, end);
-       if (is_range_ram == 1)
-               return free_ram_pages_type(start, end);
-       else if (is_range_ram < 0)
-               return -EINVAL;
+       /*
+        * For legacy reasons, some parts of the physical address range in the
+        * legacy 1MB region is treated as non-RAM (even when listed as RAM in
+        * the e820 tables).  So we will track the memory attributes of this
+        * legacy 1MB region using the linear memtype_list always.
+        */
+       if (end >= ISA_END_ADDRESS) {
+               is_range_ram = pagerange_is_ram(start, end);
+               if (is_range_ram == 1)
+                       return free_ram_pages_type(start, end);
+               else if (is_range_ram < 0)
+                       return -EINVAL;
+       }
 
        spin_lock(&memtype_lock);
        list_for_each_entry(entry, &memtype_list, nd) {
index bea2152..965539e 100644 (file)
@@ -634,35 +634,27 @@ static void xen_flush_tlb_single(unsigned long addr)
        preempt_enable();
 }
 
-static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
-                                unsigned long va)
+static void xen_flush_tlb_others(const struct cpumask *cpus,
+                                struct mm_struct *mm, unsigned long va)
 {
        struct {
                struct mmuext_op op;
-               cpumask_t mask;
+               DECLARE_BITMAP(mask, NR_CPUS);
        } *args;
-       cpumask_t cpumask = *cpus;
        struct multicall_space mcs;
 
-       /*
-        * A couple of (to be removed) sanity checks:
-        *
-        * - current CPU must not be in mask
-        * - mask must exist :)
-        */
-       BUG_ON(cpus_empty(cpumask));
-       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+       BUG_ON(cpumask_empty(cpus));
        BUG_ON(!mm);
 
-       /* If a CPU which we ran on has gone down, OK. */
-       cpus_and(cpumask, cpumask, cpu_online_map);
-       if (cpus_empty(cpumask))
-               return;
-
        mcs = xen_mc_entry(sizeof(*args));
        args = mcs.args;
-       args->mask = cpumask;
-       args->op.arg2.vcpumask = &args->mask;
+       args->op.arg2.vcpumask = to_cpumask(args->mask);
+
+       /* Remove us, and any offline CPUS. */
+       cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
+       if (unlikely(cpumask_empty(to_cpumask(args->mask))))
+               goto issue;
 
        if (va == TLB_FLUSH_ALL) {
                args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
@@ -673,6 +665,7 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
 
        MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
 
+issue:
        xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
 
index 719ee5c..5b257a5 100644 (file)
@@ -107,7 +107,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
 /*
  * Print cpu online, possible, present, and system maps
  */
-static ssize_t print_cpus_map(char *buf, cpumask_t *map)
+static ssize_t print_cpus_map(char *buf, const struct cpumask *map)
 {
        int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map);
 
index a778fb5..bf6b132 100644 (file)
 #include <linux/hardirq.h>
 #include <linux/topology.h>
 
-#define define_one_ro(_name)           \
+#define define_one_ro_named(_name, _func)                              \
+static SYSDEV_ATTR(_name, 0444, _func, NULL)
+
+#define define_one_ro(_name)                           \
 static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
 
 #define define_id_show_func(name)                              \
@@ -42,8 +45,8 @@ static ssize_t show_##name(struct sys_device *dev,            \
        return sprintf(buf, "%d\n", topology_##name(cpu));      \
 }
 
-#if defined(topology_thread_siblings) || defined(topology_core_siblings)
-static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
+#if defined(topology_thread_cpumask) || defined(topology_core_cpumask)
+static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
 {
        ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
        int n = 0;
@@ -65,7 +68,7 @@ static ssize_t show_##name(struct sys_device *dev,                    \
                           struct sysdev_attribute *attr, char *buf)    \
 {                                                                      \
        unsigned int cpu = dev->id;                                     \
-       return show_cpumap(0, &(topology_##name(cpu)), buf);            \
+       return show_cpumap(0, topology_##name(cpu), buf);               \
 }
 
 #define define_siblings_show_list(name)                                        \
@@ -74,7 +77,7 @@ static ssize_t show_##name##_list(struct sys_device *dev,             \
                                  char *buf)                            \
 {                                                                      \
        unsigned int cpu = dev->id;                                     \
-       return show_cpumap(1, &(topology_##name(cpu)), buf);            \
+       return show_cpumap(1, topology_##name(cpu), buf);               \
 }
 
 #else
@@ -82,9 +85,7 @@ static ssize_t show_##name##_list(struct sys_device *dev,             \
 static ssize_t show_##name(struct sys_device *dev,                     \
                           struct sysdev_attribute *attr, char *buf)    \
 {                                                                      \
-       unsigned int cpu = dev->id;                                     \
-       cpumask_t mask = topology_##name(cpu);                          \
-       return show_cpumap(0, &mask, buf);                              \
+       return show_cpumap(0, topology_##name(dev->id), buf);           \
 }
 
 #define define_siblings_show_list(name)                                        \
@@ -92,9 +93,7 @@ static ssize_t show_##name##_list(struct sys_device *dev,             \
                                  struct sysdev_attribute *attr,        \
                                  char *buf)                            \
 {                                                                      \
-       unsigned int cpu = dev->id;                                     \
-       cpumask_t mask = topology_##name(cpu);                          \
-       return show_cpumap(1, &mask, buf);                              \
+       return show_cpumap(1, topology_##name(dev->id), buf);           \
 }
 #endif
 
@@ -107,13 +106,13 @@ define_one_ro(physical_package_id);
 define_id_show_func(core_id);
 define_one_ro(core_id);
 
-define_siblings_show_func(thread_siblings);
-define_one_ro(thread_siblings);
-define_one_ro(thread_siblings_list);
+define_siblings_show_func(thread_cpumask);
+define_one_ro_named(thread_siblings, show_thread_cpumask);
+define_one_ro_named(thread_siblings_list, show_thread_cpumask_list);
 
-define_siblings_show_func(core_siblings);
-define_one_ro(core_siblings);
-define_one_ro(core_siblings_list);
+define_siblings_show_func(core_cpumask);
+define_one_ro_named(core_siblings, show_core_cpumask);
+define_one_ro_named(core_siblings_list, show_core_cpumask_list);
 
 static struct attribute *default_attrs[] = {
        &attr_physical_package_id.attr,
index 777fba4..3009e01 100644 (file)
@@ -244,7 +244,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
  */
 int dcdbas_smi_request(struct smi_cmd *smi_cmd)
 {
-       cpumask_t old_mask;
+       cpumask_var_t old_mask;
        int ret = 0;
 
        if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -254,8 +254,11 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
        }
 
        /* SMI requires CPU 0 */
-       old_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
+       if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_copy(old_mask, &current->cpus_allowed);
+       set_cpus_allowed_ptr(current, cpumask_of(0));
        if (smp_processor_id() != 0) {
                dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
                        __func__);
@@ -275,7 +278,8 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
        );
 
 out:
-       set_cpus_allowed_ptr(current, &old_mask);
+       set_cpus_allowed_ptr(current, old_mask);
+       free_cpumask_var(old_mask);
        return ret;
 }
 
index 89218f7..6576170 100644 (file)
@@ -318,7 +318,7 @@ xpc_hb_checker(void *ignore)
 
        /* this thread was marked active by xpc_hb_init() */
 
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
+       set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
 
        /* set our heartbeating to other partitions into motion */
        xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
index 7673fd9..101c00a 100644 (file)
@@ -854,20 +854,27 @@ static void efx_fini_io(struct efx_nic *efx)
  * interrupts across them. */
 static int efx_wanted_rx_queues(void)
 {
-       cpumask_t core_mask;
+       cpumask_var_t core_mask;
        int count;
        int cpu;
 
-       cpus_clear(core_mask);
+       if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) {
+               printk(KERN_WARNING
+                      "efx.c: allocation failure, irq balancing hobbled\n");
+               return 1;
+       }
+
+       cpumask_clear(core_mask);
        count = 0;
        for_each_online_cpu(cpu) {
-               if (!cpu_isset(cpu, core_mask)) {
+               if (!cpumask_test_cpu(cpu, core_mask)) {
                        ++count;
-                       cpus_or(core_mask, core_mask,
-                               topology_core_siblings(cpu));
+                       cpumask_or(core_mask, core_mask,
+                                  topology_core_cpumask(cpu));
                }
        }
 
+       free_cpumask_var(core_mask);
        return count;
 }
 
index 9da5a4b..c3ea5fa 100644 (file)
@@ -38,7 +38,7 @@
 
 static LIST_HEAD(dying_tasks);
 static LIST_HEAD(dead_tasks);
-static cpumask_t marked_cpus = CPU_MASK_NONE;
+static cpumask_var_t marked_cpus;
 static DEFINE_SPINLOCK(task_mortuary);
 static void process_task_mortuary(void);
 
@@ -456,10 +456,10 @@ static void mark_done(int cpu)
 {
        int i;
 
-       cpu_set(cpu, marked_cpus);
+       cpumask_set_cpu(cpu, marked_cpus);
 
        for_each_online_cpu(i) {
-               if (!cpu_isset(i, marked_cpus))
+               if (!cpumask_test_cpu(i, marked_cpus))
                        return;
        }
 
@@ -468,7 +468,7 @@ static void mark_done(int cpu)
         */
        process_task_mortuary();
 
-       cpus_clear(marked_cpus);
+       cpumask_clear(marked_cpus);
 }
 
 
@@ -565,6 +565,20 @@ void sync_buffer(int cpu)
        mutex_unlock(&buffer_mutex);
 }
 
+int __init buffer_sync_init(void)
+{
+       if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_clear(marked_cpus);
+               return 0;
+}
+
+void __exit buffer_sync_cleanup(void)
+{
+       free_cpumask_var(marked_cpus);
+}
+
 /* The function can be used to add a buffer worth of data directly to
  * the kernel buffer. The buffer is assumed to be a circular buffer.
  * Take the entries from index start and end at index end, wrapping
index 3110732..0ebf5db 100644 (file)
@@ -19,4 +19,8 @@ void sync_stop(void);
 /* sync the given CPU's buffer */
 void sync_buffer(int cpu);
 
+/* initialize/destroy the buffer system. */
+int buffer_sync_init(void);
+void buffer_sync_cleanup(void);
+
 #endif /* OPROFILE_BUFFER_SYNC_H */
index 3cffce9..ced39f6 100644 (file)
@@ -183,6 +183,10 @@ static int __init oprofile_init(void)
 {
        int err;
 
+       err = buffer_sync_init();
+       if (err)
+               return err;
+
        err = oprofile_arch_init(&oprofile_ops);
 
        if (err < 0 || timer) {
@@ -191,8 +195,10 @@ static int __init oprofile_init(void)
        }
 
        err = oprofilefs_register();
-       if (err)
+       if (err) {
                oprofile_arch_exit();
+               buffer_sync_cleanup();
+       }
 
        return err;
 }
@@ -202,6 +208,7 @@ static void __exit oprofile_exit(void)
 {
        oprofilefs_unregister();
        oprofile_arch_exit();
+       buffer_sync_cleanup();
 }
 
 
index f78371b..5a57753 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/irq.h>
 #include <asm/io_apic.h>
 #include <asm/smp.h>
+#include <asm/cpu.h>
 #include <linux/intel-iommu.h>
 #include "intr_remapping.h"
 
index eb0dfde..3141e14 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/string.h>
+#include <linux/bootmem.h>
 
 #include <asm/ptrace.h>
 #include <asm/irq.h>
@@ -75,7 +76,14 @@ enum {
 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
        [0 ... NR_EVENT_CHANNELS-1] = -1
 };
-static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
+struct cpu_evtchn_s {
+       unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
+};
+static struct cpu_evtchn_s *cpu_evtchn_mask_p;
+static inline unsigned long *cpu_evtchn_mask(int cpu)
+{
+       return cpu_evtchn_mask_p[cpu].bits;
+}
 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
 
 /* Reference counts for bindings to IRQs. */
@@ -115,7 +123,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
                                           unsigned int idx)
 {
        return (sh->evtchn_pending[idx] &
-               cpu_evtchn_mask[cpu][idx] &
+               cpu_evtchn_mask(cpu)[idx] &
                ~sh->evtchn_mask[idx]);
 }
 
@@ -125,11 +133,11 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 
        BUG_ON(irq == -1);
 #ifdef CONFIG_SMP
-       irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
 #endif
 
-       __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
-       __set_bit(chn, cpu_evtchn_mask[cpu]);
+       __clear_bit(chn, cpu_evtchn_mask(cpu_evtchn[chn]));
+       __set_bit(chn, cpu_evtchn_mask(cpu));
 
        cpu_evtchn[chn] = cpu;
 }
@@ -142,12 +150,12 @@ static void init_evtchn_cpu_bindings(void)
 
        /* By default all event channels notify CPU#0. */
        for_each_irq_desc(i, desc) {
-               desc->affinity = cpumask_of_cpu(0);
+               cpumask_copy(desc->affinity, cpumask_of(0));
        }
 #endif
 
        memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
-       memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
+       memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
 }
 
 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
@@ -822,6 +830,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
 void __init xen_init_IRQ(void)
 {
        int i;
+       size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
+
+       cpu_evtchn_mask_p = alloc_bootmem(size);
+       BUG_ON(cpu_evtchn_mask_p == NULL);
 
        init_evtchn_cpu_bindings();
 
index 9b91617..e7e83b6 100644 (file)
@@ -100,7 +100,7 @@ static void do_suspend(void)
        /* XXX use normal device tree? */
        xenbus_suspend();
 
-       err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0));
+       err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
        if (err) {
                printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
                goto out;
index 9a3274a..937d7c4 100644 (file)
@@ -9,7 +9,7 @@
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-static inline unsigned long __ffs(unsigned long word)
+static __always_inline unsigned long __ffs(unsigned long word)
 {
        int num = 0;
 
index be24465..a60a7cc 100644 (file)
@@ -9,7 +9,7 @@
  *
  * Undefined if no set bit exists, so code should check against 0 first.
  */
-static inline unsigned long __fls(unsigned long word)
+static __always_inline unsigned long __fls(unsigned long word)
 {
        int num = BITS_PER_LONG - 1;
 
index 850859b..0576d1f 100644 (file)
@@ -9,7 +9,7 @@
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
 
-static inline int fls(int x)
+static __always_inline int fls(int x)
 {
        int r = 32;
 
index 86d403f..b097cf8 100644 (file)
@@ -15,7 +15,7 @@
  * at position 64.
  */
 #if BITS_PER_LONG == 32
-static inline int fls64(__u64 x)
+static __always_inline int fls64(__u64 x)
 {
        __u32 h = x >> 32;
        if (h)
@@ -23,7 +23,7 @@ static inline int fls64(__u64 x)
        return fls(x);
 }
 #elif BITS_PER_LONG == 64
-static inline int fls64(__u64 x)
+static __always_inline int fls64(__u64 x)
 {
        if (x == 0)
                return 0;
index 9127f6b..472f117 100644 (file)
@@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
 struct irq_desc;
 
 extern int early_irq_init(void);
+extern int arch_probe_nr_irqs(void);
 extern int arch_early_irq_init(void);
 extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
 
index f899b50..27a6753 100644 (file)
@@ -182,11 +182,11 @@ struct irq_desc {
        unsigned int            irqs_unhandled;
        spinlock_t              lock;
 #ifdef CONFIG_SMP
-       cpumask_t               affinity;
+       cpumask_var_t           affinity;
        unsigned int            cpu;
-#endif
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-       cpumask_t               pending_mask;
+       cpumask_var_t           pending_mask;
+#endif
 #endif
 #ifdef CONFIG_PROC_FS
        struct proc_dir_entry   *dir;
@@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
 
 #endif /* !CONFIG_S390 */
 
+#ifdef CONFIG_SMP
+/**
+ * init_alloc_desc_masks - allocate cpumasks for irq_desc
+ * @desc:      pointer to irq_desc struct
+ * @cpu:       cpu which will be handling the cpumasks
+ * @boot:      true if need bootmem
+ *
+ * Allocates affinity and pending_mask cpumask if required.
+ * Returns true if successful (or not required).
+ * Side effect: affinity has all bits set, pending_mask has all bits clear.
+ */
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+                                                               bool boot)
+{
+       int node;
+
+       if (boot) {
+               alloc_bootmem_cpumask_var(&desc->affinity);
+               cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+               alloc_bootmem_cpumask_var(&desc->pending_mask);
+               cpumask_clear(desc->pending_mask);
+#endif
+               return true;
+       }
+
+       node = cpu_to_node(cpu);
+
+       if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
+               return false;
+       cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
+               free_cpumask_var(desc->affinity);
+               return false;
+       }
+       cpumask_clear(desc->pending_mask);
+#endif
+       return true;
+}
+
+/**
+ * init_copy_desc_masks - copy cpumasks for irq_desc
+ * @old_desc:  pointer to old irq_desc struct
+ * @new_desc:  pointer to new irq_desc struct
+ *
+ * Insures affinity and pending_masks are copied to new irq_desc.
+ * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
+ * irq_desc struct so the copy is redundant.
+ */
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+{
+#ifdef CONFIG_CPUMASKS_OFFSTACK
+       cpumask_copy(new_desc->affinity, old_desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
+#endif
+#endif
+}
+
+#else /* !CONFIG_SMP */
+
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+                                                               bool boot)
+{
+       return true;
+}
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+{
+}
+
+#endif /* CONFIG_SMP */
+
 #endif /* _LINUX_IRQ_H */
index 86af92e..887477b 100644 (file)
@@ -20,6 +20,7 @@
 
 # define for_each_irq_desc_reverse(irq, desc)                          \
        for (irq = nr_irqs - 1; irq >= 0; irq--)
+
 #else /* CONFIG_GENERIC_HARDIRQS */
 
 extern int nr_irqs;
index e632d29..a16b9e0 100644 (file)
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
 #ifndef topology_core_siblings
 #define topology_core_siblings(cpu)            cpumask_of_cpu(cpu)
 #endif
+#ifndef topology_thread_cpumask
+#define topology_thread_cpumask(cpu)           cpumask_of(cpu)
+#endif
+#ifndef topology_core_cpumask
+#define topology_core_cpumask(cpu)             cpumask_of(cpu)
+#endif
 
 #endif /* _LINUX_TOPOLOGY_H */
index f63c706..c248eba 100644 (file)
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
        desc->irq_count = 0;
        desc->irqs_unhandled = 0;
 #ifdef CONFIG_SMP
-       cpumask_setall(&desc->affinity);
+       cpumask_setall(desc->affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_clear(desc->pending_mask);
+#endif
 #endif
        spin_unlock_irqrestore(&desc->lock, flags);
 }
index c20db0b..375d68c 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/rculist.h>
 #include <linux/hash.h>
+#include <linux/bootmem.h>
 
 #include "internals.h"
 
@@ -57,6 +58,7 @@ int nr_irqs = NR_IRQS;
 EXPORT_SYMBOL_GPL(nr_irqs);
 
 #ifdef CONFIG_SPARSE_IRQ
+
 static struct irq_desc irq_desc_init = {
        .irq        = -1,
        .status     = IRQ_DISABLED,
@@ -64,9 +66,6 @@ static struct irq_desc irq_desc_init = {
        .handle_irq = handle_bad_irq,
        .depth      = 1,
        .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
-       .affinity   = CPU_MASK_ALL
-#endif
 };
 
 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
@@ -101,6 +100,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
                printk(KERN_ERR "can not alloc kstat_irqs\n");
                BUG_ON(1);
        }
+       if (!init_alloc_desc_masks(desc, cpu, false)) {
+               printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
+               BUG_ON(1);
+       }
        arch_init_chip_data(desc, cpu);
 }
 
@@ -109,7 +112,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
  */
 DEFINE_SPINLOCK(sparse_irq_lock);
 
-struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+struct irq_desc **irq_desc_ptrs __read_mostly;
 
 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
        [0 ... NR_IRQS_LEGACY-1] = {
@@ -119,14 +122,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
                .handle_irq = handle_bad_irq,
                .depth      = 1,
                .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
-               .affinity   = CPU_MASK_ALL
-#endif
        }
 };
 
-/* FIXME: use bootmem alloc ...*/
-static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+static unsigned int *kstat_irqs_legacy;
 
 int __init early_irq_init(void)
 {
@@ -134,18 +133,30 @@ int __init early_irq_init(void)
        int legacy_count;
        int i;
 
+        /* initialize nr_irqs based on nr_cpu_ids */
+       arch_probe_nr_irqs();
+       printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
+
        desc = irq_desc_legacy;
        legacy_count = ARRAY_SIZE(irq_desc_legacy);
 
+       /* allocate irq_desc_ptrs array based on nr_irqs */
+       irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
+
+       /* allocate based on nr_cpu_ids */
+       /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
+       kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
+                                         sizeof(int));
+
        for (i = 0; i < legacy_count; i++) {
                desc[i].irq = i;
-               desc[i].kstat_irqs = kstat_irqs_legacy[i];
+               desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-
+               init_alloc_desc_masks(&desc[i], 0, true);
                irq_desc_ptrs[i] = desc + i;
        }
 
-       for (i = legacy_count; i < NR_IRQS; i++)
+       for (i = legacy_count; i < nr_irqs; i++)
                irq_desc_ptrs[i] = NULL;
 
        return arch_early_irq_init();
@@ -153,7 +164,10 @@ int __init early_irq_init(void)
 
 struct irq_desc *irq_to_desc(unsigned int irq)
 {
-       return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+       if (irq_desc_ptrs && irq < nr_irqs)
+               return irq_desc_ptrs[irq];
+
+       return NULL;
 }
 
 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -162,10 +176,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
        unsigned long flags;
        int node;
 
-       if (irq >= NR_IRQS) {
-               printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
-                               irq, NR_IRQS);
-               WARN_ON(1);
+       if (irq >= nr_irqs) {
+               WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
+                       irq, nr_irqs);
                return NULL;
        }
 
@@ -207,9 +220,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
                .handle_irq = handle_bad_irq,
                .depth = 1,
                .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
-#ifdef CONFIG_SMP
-               .affinity = CPU_MASK_ALL
-#endif
        }
 };
 
@@ -219,12 +229,15 @@ int __init early_irq_init(void)
        int count;
        int i;
 
+       printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
        desc = irq_desc;
        count = ARRAY_SIZE(irq_desc);
 
-       for (i = 0; i < count; i++)
+       for (i = 0; i < count; i++) {
                desc[i].irq = i;
-
+               init_alloc_desc_masks(&desc[i], 0, true);
+       }
        return arch_early_irq_init();
 }
 
index e6d0a43..40416a8 100644 (file)
@@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 extern struct lock_class_key irq_desc_lock_class;
 extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
 extern spinlock_t sparse_irq_lock;
+
+#ifdef CONFIG_SPARSE_IRQ
+/* irq_desc_ptrs allocated at boot time */
+extern struct irq_desc **irq_desc_ptrs;
+#else
+/* irq_desc_ptrs is a fixed size array */
 extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+#endif
 
 #ifdef CONFIG_PROC_FS
 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
index cd0cd8d..b98739a 100644 (file)
@@ -98,14 +98,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
-               cpumask_copy(&desc->affinity, cpumask);
+               cpumask_copy(desc->affinity, cpumask);
                desc->chip->set_affinity(irq, cpumask);
        } else {
                desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(&desc->pending_mask, cpumask);
+               cpumask_copy(desc->pending_mask, cpumask);
        }
 #else
-       cpumask_copy(&desc->affinity, cpumask);
+       cpumask_copy(desc->affinity, cpumask);
        desc->chip->set_affinity(irq, cpumask);
 #endif
        desc->status |= IRQ_AFFINITY_SET;
@@ -127,16 +127,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
         * one of the targets is online.
         */
        if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+               if (cpumask_any_and(desc->affinity, cpu_online_mask)
                    < nr_cpu_ids)
                        goto set_affinity;
                else
                        desc->status &= ~IRQ_AFFINITY_SET;
        }
 
-       cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+       cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
 set_affinity:
-       desc->chip->set_affinity(irq, &desc->affinity);
+       desc->chip->set_affinity(irq, desc->affinity);
 
        return 0;
 }
index bd72329..e05ad9b 100644 (file)
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
 
        desc->status &= ~IRQ_MOVE_PENDING;
 
-       if (unlikely(cpumask_empty(&desc->pending_mask)))
+       if (unlikely(cpumask_empty(desc->pending_mask)))
                return;
 
        if (!desc->chip->set_affinity)
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
         * For correct operation this depends on the caller
         * masking the irqs.
         */
-       if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
+       if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
                   < nr_cpu_ids)) {
-               cpumask_and(&desc->affinity,
-                           &desc->pending_mask, cpu_online_mask);
-               desc->chip->set_affinity(irq, &desc->affinity);
+               cpumask_and(desc->affinity,
+                           desc->pending_mask, cpu_online_mask);
+               desc->chip->set_affinity(irq, desc->affinity);
        }
-       cpumask_clear(&desc->pending_mask);
+       cpumask_clear(desc->pending_mask);
 }
 
 void move_native_irq(int irq)
index ecf765c..666260e 100644 (file)
@@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
        old_desc->kstat_irqs = NULL;
 }
 
-static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
+static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
                 struct irq_desc *desc, int cpu)
 {
        memcpy(desc, old_desc, sizeof(struct irq_desc));
+       if (!init_alloc_desc_masks(desc, cpu, false)) {
+               printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
+                               "for migration.\n", irq);
+               return false;
+       }
        spin_lock_init(&desc->lock);
        desc->cpu = cpu;
        lockdep_set_class(&desc->lock, &irq_desc_lock_class);
        init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
+       init_copy_desc_masks(old_desc, desc);
        arch_init_copy_chip_data(old_desc, desc, cpu);
+       return true;
 }
 
 static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
        node = cpu_to_node(cpu);
        desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
        if (!desc) {
-               printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
+               printk(KERN_ERR "irq %d: can not get new irq_desc "
+                               "for migration.\n", irq);
+               /* still use old one */
+               desc = old_desc;
+               goto out_unlock;
+       }
+       if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
                /* still use old one */
+               kfree(desc);
                desc = old_desc;
                goto out_unlock;
        }
-       init_copy_one_irq_desc(irq, old_desc, desc, cpu);
 
        irq_desc_ptrs[irq] = desc;
 
index aae3f74..692363d 100644 (file)
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
 static int irq_affinity_proc_show(struct seq_file *m, void *v)
 {
        struct irq_desc *desc = irq_to_desc((long)m->private);
-       const struct cpumask *mask = &desc->affinity;
+       const struct cpumask *mask = desc->affinity;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PENDING)
-               mask = &desc->pending_mask;
+               mask = desc->pending_mask;
 #endif
        seq_cpumask(m, mask);
        seq_putc(m, '\n');
index 954e1a8..da932f4 100644 (file)
@@ -960,16 +960,17 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
 
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
-static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
+static inline int pick_optimal_cpu(int this_cpu,
+                                  const struct cpumask *mask)
 {
        int first;
 
        /* "this_cpu" is cheaper to preempt than a remote processor */
-       if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
+       if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
                return this_cpu;
 
-       first = first_cpu(*mask);
-       if (first != NR_CPUS)
+       first = cpumask_first(mask);
+       if (first < nr_cpu_ids)
                return first;
 
        return -1;
@@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task)
        struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
+       cpumask_var_t domain_mask;
 
        if (task->rt.nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
@@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task)
        if (this_cpu == cpu)
                this_cpu = -1; /* Skip this_cpu opt if the same */
 
-       for_each_domain(cpu, sd) {
-               if (sd->flags & SD_WAKE_AFFINE) {
-                       cpumask_t domain_mask;
-                       int       best_cpu;
+       if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
+               for_each_domain(cpu, sd) {
+                       if (sd->flags & SD_WAKE_AFFINE) {
+                               int best_cpu;
 
-                       cpumask_and(&domain_mask, sched_domain_span(sd),
-                                   lowest_mask);
+                               cpumask_and(domain_mask,
+                                           sched_domain_span(sd),
+                                           lowest_mask);
 
-                       best_cpu = pick_optimal_cpu(this_cpu,
-                                                   &domain_mask);
-                       if (best_cpu != -1)
-                               return best_cpu;
+                               best_cpu = pick_optimal_cpu(this_cpu,
+                                                           domain_mask);
+
+                               if (best_cpu != -1) {
+                                       free_cpumask_var(domain_mask);
+                                       return best_cpu;
+                               }
+                       }
                }
+               free_cpumask_var(domain_mask);
        }
 
        /*
index bdbe9de..0365b48 100644 (file)
@@ -795,6 +795,11 @@ int __init __weak early_irq_init(void)
        return 0;
 }
 
+int __init __weak arch_probe_nr_irqs(void)
+{
+       return 0;
+}
+
 int __init __weak arch_early_irq_init(void)
 {
        return 0;
index 0f8fc22..4689cb0 100644 (file)
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
         * Kernel threads bound to a single CPU can safely use
         * smp_processor_id():
         */
-       if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
+       if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
                goto out;
 
        /*