2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/irq_work.h>
7 #include <linux/rcupdate.h>
8 #include <linux/rculist.h>
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/percpu.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
23 CSD_FLAG_SYNCHRONOUS = 0x02,
26 struct call_function_data {
27 struct call_single_data __percpu *csd;
28 cpumask_var_t cpumask;
31 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
33 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
35 static void flush_smp_call_function_queue(bool warn_cpu_offline);
36 /* CPU mask indicating which CPUs to bring online during smp_init() */
37 static bool have_boot_cpu_mask;
38 static cpumask_var_t boot_cpu_mask;
41 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
43 long cpu = (long)hcpu;
44 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
48 case CPU_UP_PREPARE_FROZEN:
49 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
51 return notifier_from_errno(-ENOMEM);
52 cfd->csd = alloc_percpu(struct call_single_data);
54 free_cpumask_var(cfd->cpumask);
55 return notifier_from_errno(-ENOMEM);
59 #ifdef CONFIG_HOTPLUG_CPU
61 case CPU_UP_CANCELED_FROZEN:
62 /* Fall-through to the CPU_DEAD[_FROZEN] case. */
66 free_cpumask_var(cfd->cpumask);
67 free_percpu(cfd->csd);
71 case CPU_DYING_FROZEN:
73 * The IPIs for the smp-call-function callbacks queued by other
74 * CPUs might arrive late, either due to hardware latencies or
75 * because this CPU disabled interrupts (inside stop-machine)
76 * before the IPIs were sent. So flush out any pending callbacks
77 * explicitly (without waiting for the IPIs to arrive), to
78 * ensure that the outgoing CPU doesn't go offline with work
81 flush_smp_call_function_queue(false);
89 static struct notifier_block hotplug_cfd_notifier = {
90 .notifier_call = hotplug_cfd,
93 void __init call_function_init(void)
95 void *cpu = (void *)(long)smp_processor_id();
98 for_each_possible_cpu(i)
99 init_llist_head(&per_cpu(call_single_queue, i));
101 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
102 register_cpu_notifier(&hotplug_cfd_notifier);
106 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
108 * For non-synchronous ipi calls the csd can still be in use by the
109 * previous function call. For multi-cpu calls its even more interesting
110 * as we'll have to ensure no other cpu is observing our csd.
112 static void csd_lock_wait(struct call_single_data *csd)
114 while (smp_load_acquire(&csd->flags) & CSD_FLAG_LOCK)
118 static void csd_lock(struct call_single_data *csd)
121 csd->flags |= CSD_FLAG_LOCK;
124 * prevent CPU from reordering the above assignment
125 * to ->flags with any subsequent assignments to other
126 * fields of the specified call_single_data structure:
131 static void csd_unlock(struct call_single_data *csd)
133 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
136 * ensure we're all done before releasing data:
138 smp_store_release(&csd->flags, 0);
141 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
144 * Insert a previously allocated call_single_data element
145 * for execution on the given CPU. data must already have
146 * ->func, ->info, and ->flags set.
148 static int generic_exec_single(int cpu, struct call_single_data *csd,
149 smp_call_func_t func, void *info)
151 if (cpu == smp_processor_id()) {
155 * We can unlock early even for the synchronous on-stack case,
156 * since we're doing this from the same CPU..
159 local_irq_save(flags);
161 local_irq_restore(flags);
166 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
175 * The list addition should be visible before sending the IPI
176 * handler locks the list to pull the entry off it because of
177 * normal cache coherency rules implied by spinlocks.
179 * If IPIs can go out of order to the cache coherency protocol
180 * in an architecture, sufficient synchronisation should be added
181 * to arch code to make it appear to obey cache coherency WRT
182 * locking and barrier primitives. Generic code isn't really
183 * equipped to do the right thing...
185 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
186 arch_send_call_function_single_ipi(cpu);
192 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
194 * Invoked by arch to handle an IPI for call function single.
195 * Must be called with interrupts disabled.
197 void generic_smp_call_function_single_interrupt(void)
199 flush_smp_call_function_queue(true);
203 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
205 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
206 * offline CPU. Skip this check if set to 'false'.
208 * Flush any pending smp-call-function callbacks queued on this CPU. This is
209 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
210 * to ensure that all pending IPI callbacks are run before it goes completely
213 * Loop through the call_single_queue and run all the queued callbacks.
214 * Must be called with interrupts disabled.
216 static void flush_smp_call_function_queue(bool warn_cpu_offline)
218 struct llist_head *head;
219 struct llist_node *entry;
220 struct call_single_data *csd, *csd_next;
223 WARN_ON(!irqs_disabled());
225 head = this_cpu_ptr(&call_single_queue);
226 entry = llist_del_all(head);
227 entry = llist_reverse_order(entry);
229 /* There shouldn't be any pending callbacks on an offline CPU. */
230 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
231 !warned && !llist_empty(head))) {
233 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
236 * We don't have to use the _safe() variant here
237 * because we are not invoking the IPI handlers yet.
239 llist_for_each_entry(csd, entry, llist)
240 pr_warn("IPI callback %pS sent to offline CPU\n",
244 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
245 smp_call_func_t func = csd->func;
246 void *info = csd->info;
248 /* Do we wait until *after* callback? */
249 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
259 * Handle irq works queued remotely by irq_work_queue_on().
260 * Smp functions above are typically synchronous so they
261 * better run first since some other CPUs may be busy waiting
268 * smp_call_function_single - Run a function on a specific CPU
269 * @func: The function to run. This must be fast and non-blocking.
270 * @info: An arbitrary pointer to pass to the function.
271 * @wait: If true, wait until function has completed on other CPUs.
273 * Returns 0 on success, else a negative status code.
275 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
278 struct call_single_data *csd;
279 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
284 * prevent preemption and reschedule on another processor,
285 * as well as CPU removal
287 this_cpu = get_cpu();
290 * Can deadlock when called with interrupts disabled.
291 * We allow cpu's that are not yet online though, as no one else can
292 * send smp call function interrupt to this cpu and as such deadlocks
295 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
296 && !oops_in_progress);
300 csd = this_cpu_ptr(&csd_data);
304 err = generic_exec_single(cpu, csd, func, info);
313 EXPORT_SYMBOL(smp_call_function_single);
316 * smp_call_function_single_async(): Run an asynchronous function on a
318 * @cpu: The CPU to run on.
319 * @csd: Pre-allocated and setup data structure
321 * Like smp_call_function_single(), but the call is asynchonous and
322 * can thus be done from contexts with disabled interrupts.
324 * The caller passes his own pre-allocated data structure
325 * (ie: embedded in an object) and is responsible for synchronizing it
326 * such that the IPIs performed on the @csd are strictly serialized.
328 * NOTE: Be careful, there is unfortunately no current debugging facility to
329 * validate the correctness of this serialization.
331 int smp_call_function_single_async(int cpu, struct call_single_data *csd)
337 /* We could deadlock if we have to wait here with interrupts disabled! */
338 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
341 csd->flags = CSD_FLAG_LOCK;
344 err = generic_exec_single(cpu, csd, csd->func, csd->info);
349 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
352 * smp_call_function_any - Run a function on any of the given cpus
353 * @mask: The mask of cpus it can run on.
354 * @func: The function to run. This must be fast and non-blocking.
355 * @info: An arbitrary pointer to pass to the function.
356 * @wait: If true, wait until function has completed.
358 * Returns 0 on success, else a negative status code (if no cpus were online).
360 * Selection preference:
361 * 1) current cpu if in @mask
362 * 2) any cpu of current node if in @mask
363 * 3) any other online cpu in @mask
365 int smp_call_function_any(const struct cpumask *mask,
366 smp_call_func_t func, void *info, int wait)
369 const struct cpumask *nodemask;
372 /* Try for same CPU (cheapest) */
374 if (cpumask_test_cpu(cpu, mask))
377 /* Try for same node. */
378 nodemask = cpumask_of_node(cpu_to_node(cpu));
379 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
380 cpu = cpumask_next_and(cpu, nodemask, mask)) {
385 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
386 cpu = cpumask_any_and(mask, cpu_online_mask);
388 ret = smp_call_function_single(cpu, func, info, wait);
392 EXPORT_SYMBOL_GPL(smp_call_function_any);
395 * smp_call_function_many(): Run a function on a set of other CPUs.
396 * @mask: The set of cpus to run on (only runs on online subset).
397 * @func: The function to run. This must be fast and non-blocking.
398 * @info: An arbitrary pointer to pass to the function.
399 * @wait: If true, wait (atomically) until function has completed
402 * If @wait is true, then returns once @func has returned.
404 * You must not call this function with disabled interrupts or from a
405 * hardware interrupt handler or from a bottom half handler. Preemption
406 * must be disabled when calling this function.
408 void smp_call_function_many(const struct cpumask *mask,
409 smp_call_func_t func, void *info, bool wait)
411 struct call_function_data *cfd;
412 int cpu, next_cpu, this_cpu = smp_processor_id();
415 * Can deadlock when called with interrupts disabled.
416 * We allow cpu's that are not yet online though, as no one else can
417 * send smp call function interrupt to this cpu and as such deadlocks
420 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
421 && !oops_in_progress && !early_boot_irqs_disabled);
423 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
424 cpu = cpumask_first_and(mask, cpu_online_mask);
426 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
428 /* No online cpus? We're done. */
429 if (cpu >= nr_cpu_ids)
432 /* Do we have another CPU which isn't us? */
433 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
434 if (next_cpu == this_cpu)
435 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
437 /* Fastpath: do that cpu by itself. */
438 if (next_cpu >= nr_cpu_ids) {
439 smp_call_function_single(cpu, func, info, wait);
443 cfd = this_cpu_ptr(&cfd_data);
445 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
446 cpumask_clear_cpu(this_cpu, cfd->cpumask);
448 /* Some callers race with other cpus changing the passed mask */
449 if (unlikely(!cpumask_weight(cfd->cpumask)))
452 for_each_cpu(cpu, cfd->cpumask) {
453 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
457 csd->flags |= CSD_FLAG_SYNCHRONOUS;
460 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
463 /* Send a message to all CPUs in the map */
464 arch_send_call_function_ipi_mask(cfd->cpumask);
467 for_each_cpu(cpu, cfd->cpumask) {
468 struct call_single_data *csd;
470 csd = per_cpu_ptr(cfd->csd, cpu);
475 EXPORT_SYMBOL(smp_call_function_many);
478 * smp_call_function(): Run a function on all other CPUs.
479 * @func: The function to run. This must be fast and non-blocking.
480 * @info: An arbitrary pointer to pass to the function.
481 * @wait: If true, wait (atomically) until function has completed
486 * If @wait is true, then returns once @func has returned; otherwise
487 * it returns just before the target cpu calls @func.
489 * You must not call this function with disabled interrupts or from a
490 * hardware interrupt handler or from a bottom half handler.
492 int smp_call_function(smp_call_func_t func, void *info, int wait)
495 smp_call_function_many(cpu_online_mask, func, info, wait);
500 EXPORT_SYMBOL(smp_call_function);
502 /* Setup configured maximum number of CPUs to activate */
503 unsigned int setup_max_cpus = NR_CPUS;
504 EXPORT_SYMBOL(setup_max_cpus);
508 * Setup routine for controlling SMP activation
510 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
511 * activation entirely (the MPS table probe still happens, though).
513 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
514 * greater than 0, limits the maximum number of CPUs activated in
518 void __weak arch_disable_smp_support(void) { }
520 static int __init nosmp(char *str)
523 arch_disable_smp_support();
528 early_param("nosmp", nosmp);
530 /* this is hard limit */
531 static int __init nrcpus(char *str)
535 get_option(&str, &nr_cpus);
536 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
537 nr_cpu_ids = nr_cpus;
542 early_param("nr_cpus", nrcpus);
544 static int __init maxcpus(char *str)
546 get_option(&str, &setup_max_cpus);
547 if (setup_max_cpus == 0)
548 arch_disable_smp_support();
553 early_param("maxcpus", maxcpus);
555 static int __init boot_cpus(char *str)
557 alloc_bootmem_cpumask_var(&boot_cpu_mask);
558 if (cpulist_parse(str, boot_cpu_mask) < 0) {
559 pr_warn("SMP: Incorrect boot_cpus cpumask\n");
562 have_boot_cpu_mask = true;
566 early_param("boot_cpus", boot_cpus);
568 /* Setup number of possible processor ids */
569 int nr_cpu_ids __read_mostly = NR_CPUS;
570 EXPORT_SYMBOL(nr_cpu_ids);
572 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
573 void __init setup_nr_cpu_ids(void)
575 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
578 void __weak smp_announce(void)
580 printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
583 /* Should the given CPU be booted during smp_init() ? */
584 static inline bool boot_cpu(int cpu)
586 if (!have_boot_cpu_mask)
589 return cpumask_test_cpu(cpu, boot_cpu_mask);
592 static inline void free_boot_cpu_mask(void)
594 if (have_boot_cpu_mask) /* Allocated from boot_cpus() */
595 free_bootmem_cpumask_var(boot_cpu_mask);
598 /* Called by boot processor to activate the rest. */
599 void __init smp_init(void)
605 /* FIXME: This should be done in userspace --RR */
606 for_each_present_cpu(cpu) {
607 if (num_online_cpus() >= setup_max_cpus)
609 if (!cpu_online(cpu) && boot_cpu(cpu))
613 free_boot_cpu_mask();
615 /* Any cleanup work */
617 smp_cpus_done(setup_max_cpus);
621 * Call a function on all processors. May be used during early boot while
622 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
623 * of local_irq_disable/enable().
625 int on_each_cpu(void (*func) (void *info), void *info, int wait)
631 ret = smp_call_function(func, info, wait);
632 local_irq_save(flags);
634 local_irq_restore(flags);
638 EXPORT_SYMBOL(on_each_cpu);
641 * on_each_cpu_mask(): Run a function on processors specified by
642 * cpumask, which may include the local processor.
643 * @mask: The set of cpus to run on (only runs on online subset).
644 * @func: The function to run. This must be fast and non-blocking.
645 * @info: An arbitrary pointer to pass to the function.
646 * @wait: If true, wait (atomically) until function has completed
649 * If @wait is true, then returns once @func has returned.
651 * You must not call this function with disabled interrupts or from a
652 * hardware interrupt handler or from a bottom half handler. The
653 * exception is that it may be used during early boot while
654 * early_boot_irqs_disabled is set.
656 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
657 void *info, bool wait)
661 smp_call_function_many(mask, func, info, wait);
662 if (cpumask_test_cpu(cpu, mask)) {
664 local_irq_save(flags);
666 local_irq_restore(flags);
670 EXPORT_SYMBOL(on_each_cpu_mask);
673 * on_each_cpu_cond(): Call a function on each processor for which
674 * the supplied function cond_func returns true, optionally waiting
675 * for all the required CPUs to finish. This may include the local
677 * @cond_func: A callback function that is passed a cpu id and
678 * the the info parameter. The function is called
679 * with preemption disabled. The function should
680 * return a blooean value indicating whether to IPI
682 * @func: The function to run on all applicable CPUs.
683 * This must be fast and non-blocking.
684 * @info: An arbitrary pointer to pass to both functions.
685 * @wait: If true, wait (atomically) until function has
686 * completed on other CPUs.
687 * @gfp_flags: GFP flags to use when allocating the cpumask
688 * used internally by the function.
690 * The function might sleep if the GFP flags indicates a non
691 * atomic allocation is allowed.
693 * Preemption is disabled to protect against CPUs going offline but not online.
694 * CPUs going online during the call will not be seen or sent an IPI.
696 * You must not call this function with disabled interrupts or
697 * from a hardware interrupt handler or from a bottom half handler.
699 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
700 smp_call_func_t func, void *info, bool wait,
706 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
708 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
710 for_each_online_cpu(cpu)
711 if (cond_func(cpu, info))
712 cpumask_set_cpu(cpu, cpus);
713 on_each_cpu_mask(cpus, func, info, wait);
715 free_cpumask_var(cpus);
718 * No free cpumask, bother. No matter, we'll
719 * just have to IPI them one by one.
722 for_each_online_cpu(cpu)
723 if (cond_func(cpu, info)) {
724 ret = smp_call_function_single(cpu, func,
731 EXPORT_SYMBOL(on_each_cpu_cond);
733 static void do_nothing(void *unused)
738 * kick_all_cpus_sync - Force all cpus out of idle
740 * Used to synchronize the update of pm_idle function pointer. It's
741 * called after the pointer is updated and returns after the dummy
742 * callback function has been executed on all cpus. The execution of
743 * the function can only happen on the remote cpus after they have
744 * left the idle function which had been called via pm_idle function
745 * pointer. So it's guaranteed that nothing uses the previous pointer
748 void kick_all_cpus_sync(void)
750 /* Make sure the change is visible before we kick the cpus */
752 smp_call_function(do_nothing, NULL, 1);
754 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
757 * wake_up_all_idle_cpus - break all cpus out of idle
758 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
759 * including idle polling cpus, for non-idle cpus, we will do nothing
762 void wake_up_all_idle_cpus(void)
767 for_each_online_cpu(cpu) {
768 if (cpu == smp_processor_id())
770 if (suspend_freeze_state == FREEZE_STATE_ENTER ||
772 wake_up_if_idle(cpu);
776 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);