2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
19 #include <linux/ftrace.h>
21 #include <linux/err.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/seq_file.h>
25 #include <linux/irq.h>
26 #include <linux/percpu.h>
27 #include <linux/clockchips.h>
29 #include <asm/atomic.h>
30 #include <asm/cacheflush.h>
32 #include <asm/cputype.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
36 #include <asm/processor.h>
37 #include <asm/sections.h>
38 #include <asm/tlbflush.h>
39 #include <asm/ptrace.h>
40 #include <asm/localtimer.h>
41 #include <asm/smp_plat.h>
44 * as from 2.5, kernels no longer have an init_tasks structure
45 * so we need some other way of telling a new secondary core
46 * where to place its SVC stack
48 struct secondary_data secondary_data;
51 * structures for inter-processor calls
52 * - A collection of single bit ipi messages.
56 unsigned long ipi_count;
60 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
61 .lock = SPIN_LOCK_UNLOCKED,
72 static inline void identity_mapping_add(pgd_t *pgd, unsigned long start,
75 unsigned long addr, prot;
78 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
79 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
82 for (addr = start & PGDIR_MASK; addr < end;) {
83 pmd = pmd_offset(pgd + pgd_index(addr), addr);
84 pmd[0] = __pmd(addr | prot);
86 pmd[1] = __pmd(addr | prot);
89 outer_clean_range(__pa(pmd), __pa(pmd + 1));
93 static inline void identity_mapping_del(pgd_t *pgd, unsigned long start,
99 for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) {
100 pmd = pmd_offset(pgd + pgd_index(addr), addr);
103 clean_pmd_entry(pmd);
104 outer_clean_range(__pa(pmd), __pa(pmd + 1));
108 int __cpuinit __cpu_up(unsigned int cpu)
110 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
111 struct task_struct *idle = ci->idle;
116 * Spawn a new process manually, if not already done.
117 * Grab a pointer to its task struct so we can mess with it
120 idle = fork_idle(cpu);
122 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
123 return PTR_ERR(idle);
128 * Since this idle thread is being re-used, call
129 * init_idle() to reinitialize the thread structure.
131 init_idle(idle, cpu);
135 * Allocate initial page tables to allow the new CPU to
136 * enable the MMU safely. This essentially means a set
137 * of our "standard" page tables, with the addition of
138 * a 1:1 mapping for the physical address of the kernel.
140 pgd = pgd_alloc(&init_mm);
144 if (PHYS_OFFSET != PAGE_OFFSET) {
145 #ifndef CONFIG_HOTPLUG_CPU
146 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
148 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
149 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
153 * We need to tell the secondary core where to find
154 * its stack and the page tables.
156 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
157 secondary_data.pgdir = virt_to_phys(pgd);
158 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
159 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
162 * Now bring the CPU into our world.
164 ret = boot_secondary(cpu, idle);
166 unsigned long timeout;
169 * CPU was successfully started, wait for it
170 * to come online or time out.
172 timeout = jiffies + HZ;
173 while (time_before(jiffies, timeout)) {
181 if (!cpu_online(cpu))
185 secondary_data.stack = NULL;
186 secondary_data.pgdir = 0;
188 if (PHYS_OFFSET != PAGE_OFFSET) {
189 #ifndef CONFIG_HOTPLUG_CPU
190 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
192 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
193 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
196 pgd_free(&init_mm, pgd);
199 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
202 * FIXME: We need to clean up the new idle thread. --rmk
209 #ifdef CONFIG_HOTPLUG_CPU
211 * __cpu_disable runs on the processor to be shutdown.
213 int __cpu_disable(void)
215 unsigned int cpu = smp_processor_id();
216 struct task_struct *p;
219 ret = platform_cpu_disable(cpu);
224 * Take this CPU offline. Once we clear this, we can't return,
225 * and we must not schedule until we're ready to give up the cpu.
227 set_cpu_online(cpu, false);
230 * OK - migrate IRQs away from this CPU
235 * Stop the local timer for this CPU.
240 * Flush user cache and TLB mappings, and then remove this CPU
241 * from the vm mask set of all processes.
244 local_flush_tlb_all();
246 read_lock(&tasklist_lock);
247 for_each_process(p) {
249 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
251 read_unlock(&tasklist_lock);
257 * called on the thread which is asking for a CPU to be shutdown -
258 * waits until shutdown has completed, or it is timed out.
260 void __cpu_die(unsigned int cpu)
262 if (!platform_cpu_kill(cpu))
263 printk("CPU%u: unable to kill\n", cpu);
267 * Called from the idle thread for the CPU which has been shutdown.
269 * Note that we disable IRQs here, but do not re-enable them
270 * before returning to the caller. This is also the behaviour
271 * of the other hotplug-cpu capable cores, so presumably coming
272 * out of idle fixes this.
274 void __ref cpu_die(void)
276 unsigned int cpu = smp_processor_id();
282 * actual CPU shutdown procedure is at least platform (if not
285 platform_cpu_die(cpu);
288 * Do not return to the idle loop - jump back to the secondary
289 * cpu initialisation. There's some initialisation which needs
290 * to be repeated to undo the effects of taking the CPU offline.
292 __asm__("mov sp, %0\n"
293 " b secondary_start_kernel"
295 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
297 #endif /* CONFIG_HOTPLUG_CPU */
300 * This is the secondary CPU boot entry. We're using this CPUs
301 * idle thread stack, but a set of temporary page tables.
303 asmlinkage void __cpuinit secondary_start_kernel(void)
305 struct mm_struct *mm = &init_mm;
306 unsigned int cpu = smp_processor_id();
308 printk("CPU%u: Booted secondary processor\n", cpu);
311 * All kernel threads share the same mm context; grab a
312 * reference and switch to it.
314 atomic_inc(&mm->mm_users);
315 atomic_inc(&mm->mm_count);
316 current->active_mm = mm;
317 cpumask_set_cpu(cpu, mm_cpumask(mm));
318 cpu_switch_mm(mm->pgd, mm);
319 enter_lazy_tlb(mm, current);
320 local_flush_tlb_all();
326 * Give the platform a chance to do its own initialisation.
328 platform_secondary_init(cpu);
331 * Enable local interrupts.
333 notify_cpu_starting(cpu);
338 * Setup the percpu timer for this CPU.
340 percpu_timer_setup();
344 smp_store_cpu_info(cpu);
347 * OK, now it's safe to let the boot CPU continue
349 set_cpu_online(cpu, true);
352 * OK, it's off to the idle thread for us
358 * Called by both boot and secondaries to move global data into
359 * per-processor storage.
361 void __cpuinit smp_store_cpu_info(unsigned int cpuid)
363 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
365 cpu_info->loops_per_jiffy = loops_per_jiffy;
368 void __init smp_cpus_done(unsigned int max_cpus)
371 unsigned long bogosum = 0;
373 for_each_online_cpu(cpu)
374 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
376 printk(KERN_INFO "SMP: Total of %d processors activated "
377 "(%lu.%02lu BogoMIPS).\n",
379 bogosum / (500000/HZ),
380 (bogosum / (5000/HZ)) % 100);
383 void __init smp_prepare_boot_cpu(void)
385 unsigned int cpu = smp_processor_id();
387 per_cpu(cpu_data, cpu).idle = current;
390 static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
395 local_irq_save(flags);
397 for_each_cpu(cpu, mask) {
398 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
400 spin_lock(&ipi->lock);
401 ipi->bits |= 1 << msg;
402 spin_unlock(&ipi->lock);
406 * Call the platform specific cross-CPU call function.
408 smp_cross_call(mask);
410 local_irq_restore(flags);
413 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
415 send_ipi_message(mask, IPI_CALL_FUNC);
418 void arch_send_call_function_single_ipi(int cpu)
420 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
423 void show_ipi_list(struct seq_file *p)
429 for_each_present_cpu(cpu)
430 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
435 void show_local_irqs(struct seq_file *p)
439 seq_printf(p, "LOC: ");
441 for_each_present_cpu(cpu)
442 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
448 * Timer (local or broadcast) support
450 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
452 static void ipi_timer(void)
454 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
456 evt->event_handler(evt);
460 #ifdef CONFIG_LOCAL_TIMERS
461 asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
463 struct pt_regs *old_regs = set_irq_regs(regs);
464 int cpu = smp_processor_id();
466 if (local_timer_ack()) {
467 irq_stat[cpu].local_timer_irqs++;
471 set_irq_regs(old_regs);
475 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
476 static void smp_timer_broadcast(const struct cpumask *mask)
478 send_ipi_message(mask, IPI_TIMER);
481 #define smp_timer_broadcast NULL
484 #ifndef CONFIG_LOCAL_TIMERS
485 static void broadcast_timer_set_mode(enum clock_event_mode mode,
486 struct clock_event_device *evt)
490 static void local_timer_setup(struct clock_event_device *evt)
492 evt->name = "dummy_timer";
493 evt->features = CLOCK_EVT_FEAT_ONESHOT |
494 CLOCK_EVT_FEAT_PERIODIC |
495 CLOCK_EVT_FEAT_DUMMY;
498 evt->set_mode = broadcast_timer_set_mode;
500 clockevents_register_device(evt);
504 void __cpuinit percpu_timer_setup(void)
506 unsigned int cpu = smp_processor_id();
507 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
509 evt->cpumask = cpumask_of(cpu);
510 evt->broadcast = smp_timer_broadcast;
512 local_timer_setup(evt);
515 static DEFINE_SPINLOCK(stop_lock);
518 * ipi_cpu_stop - handle IPI from smp_send_stop()
520 static void ipi_cpu_stop(unsigned int cpu)
522 if (system_state == SYSTEM_BOOTING ||
523 system_state == SYSTEM_RUNNING) {
524 spin_lock(&stop_lock);
525 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
527 spin_unlock(&stop_lock);
530 set_cpu_online(cpu, false);
540 * Main handler for inter-processor interrupts
542 * For ARM, the ipimask now only identifies a single
543 * category of IPI (Bit 1 IPIs have been replaced by a
544 * different mechanism):
546 * Bit 0 - Inter-processor function call
548 asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs)
550 unsigned int cpu = smp_processor_id();
551 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
552 struct pt_regs *old_regs = set_irq_regs(regs);
559 spin_lock(&ipi->lock);
562 spin_unlock(&ipi->lock);
570 nextmsg = msgs & -msgs;
572 nextmsg = ffz(~nextmsg);
581 * nothing more to do - eveything is
582 * done on the interrupt return path
587 generic_smp_call_function_interrupt();
590 case IPI_CALL_FUNC_SINGLE:
591 generic_smp_call_function_single_interrupt();
599 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
606 set_irq_regs(old_regs);
609 void smp_send_reschedule(int cpu)
611 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
614 void smp_send_stop(void)
616 cpumask_t mask = cpu_online_map;
617 cpu_clear(smp_processor_id(), mask);
618 if (!cpus_empty(mask))
619 send_ipi_message(&mask, IPI_CPU_STOP);
625 int setup_profiling_timer(unsigned int multiplier)
631 on_each_cpu_mask(void (*func)(void *), void *info, int wait,
632 const struct cpumask *mask)
636 smp_call_function_many(mask, func, info, wait);
637 if (cpumask_test_cpu(smp_processor_id(), mask))
643 /**********************************************************************/
649 struct vm_area_struct *ta_vma;
650 unsigned long ta_start;
651 unsigned long ta_end;
654 static inline void ipi_flush_tlb_all(void *ignored)
656 local_flush_tlb_all();
659 static inline void ipi_flush_tlb_mm(void *arg)
661 struct mm_struct *mm = (struct mm_struct *)arg;
663 local_flush_tlb_mm(mm);
666 static inline void ipi_flush_tlb_page(void *arg)
668 struct tlb_args *ta = (struct tlb_args *)arg;
670 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
673 static inline void ipi_flush_tlb_kernel_page(void *arg)
675 struct tlb_args *ta = (struct tlb_args *)arg;
677 local_flush_tlb_kernel_page(ta->ta_start);
680 static inline void ipi_flush_tlb_range(void *arg)
682 struct tlb_args *ta = (struct tlb_args *)arg;
684 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
687 static inline void ipi_flush_tlb_kernel_range(void *arg)
689 struct tlb_args *ta = (struct tlb_args *)arg;
691 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
694 void flush_tlb_all(void)
696 if (tlb_ops_need_broadcast())
697 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
699 local_flush_tlb_all();
702 void flush_tlb_mm(struct mm_struct *mm)
704 if (tlb_ops_need_broadcast())
705 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
707 local_flush_tlb_mm(mm);
710 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
712 if (tlb_ops_need_broadcast()) {
716 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
718 local_flush_tlb_page(vma, uaddr);
721 void flush_tlb_kernel_page(unsigned long kaddr)
723 if (tlb_ops_need_broadcast()) {
726 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
728 local_flush_tlb_kernel_page(kaddr);
731 void flush_tlb_range(struct vm_area_struct *vma,
732 unsigned long start, unsigned long end)
734 if (tlb_ops_need_broadcast()) {
739 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
741 local_flush_tlb_range(vma, start, end);
744 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
746 if (tlb_ops_need_broadcast()) {
750 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
752 local_flush_tlb_kernel_range(start, end);