2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25 #include <linux/smpboot.h>
26 #include <linux/relay.h>
27 #include <linux/slab.h>
29 #include <trace/events/power.h>
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/cpuhp.h>
36 * cpuhp_cpu_state - Per cpu hotplug state storage
37 * @state: The current cpu state
38 * @target: The target state
39 * @thread: Pointer to the hotplug thread
40 * @should_run: Thread should execute
41 * @rollback: Perform a rollback
42 * @single: Single callback invocation
43 * @bringup: Single callback bringup or teardown selector
44 * @cb_state: The state for a single callback (install/uninstall)
45 * @result: Result of the operation
46 * @done: Signal completion to the issuer of the task
48 struct cpuhp_cpu_state {
49 enum cpuhp_state state;
50 enum cpuhp_state target;
52 struct task_struct *thread;
58 struct hlist_node *node;
59 enum cpuhp_state cb_state;
61 struct completion done;
65 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
67 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
68 static struct lock_class_key cpuhp_state_key;
69 static struct lockdep_map cpuhp_state_lock_map =
70 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
74 * cpuhp_step - Hotplug state machine step
75 * @name: Name of the step
76 * @startup: Startup function of the step
77 * @teardown: Teardown function of the step
78 * @skip_onerr: Do not invoke the functions on error rollback
79 * Will go away once the notifiers are gone
80 * @cant_stop: Bringup/teardown can't be stopped at this step
85 int (*single)(unsigned int cpu);
86 int (*multi)(unsigned int cpu,
87 struct hlist_node *node);
90 int (*single)(unsigned int cpu);
91 int (*multi)(unsigned int cpu,
92 struct hlist_node *node);
94 struct hlist_head list;
100 static DEFINE_MUTEX(cpuhp_state_mutex);
101 static struct cpuhp_step cpuhp_bp_states[];
102 static struct cpuhp_step cpuhp_ap_states[];
104 static bool cpuhp_is_ap_state(enum cpuhp_state state)
107 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
108 * purposes as that state is handled explicitly in cpu_down.
110 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
113 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
115 struct cpuhp_step *sp;
117 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
122 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
123 * @cpu: The cpu for which the callback should be invoked
124 * @step: The step in the state machine
125 * @bringup: True if the bringup callback should be invoked
127 * Called from cpu hotplug and from the state register machinery.
129 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
130 bool bringup, struct hlist_node *node)
132 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
133 struct cpuhp_step *step = cpuhp_get_step(state);
134 int (*cbm)(unsigned int cpu, struct hlist_node *node);
135 int (*cb)(unsigned int cpu);
138 if (!step->multi_instance) {
139 cb = bringup ? step->startup.single : step->teardown.single;
142 trace_cpuhp_enter(cpu, st->target, state, cb);
144 trace_cpuhp_exit(cpu, st->state, state, ret);
147 cbm = bringup ? step->startup.multi : step->teardown.multi;
151 /* Single invocation for instance add/remove */
153 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
154 ret = cbm(cpu, node);
155 trace_cpuhp_exit(cpu, st->state, state, ret);
159 /* State transition. Invoke on all instances */
161 hlist_for_each(node, &step->list) {
162 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
163 ret = cbm(cpu, node);
164 trace_cpuhp_exit(cpu, st->state, state, ret);
171 /* Rollback the instances if one failed */
172 cbm = !bringup ? step->startup.multi : step->teardown.multi;
176 hlist_for_each(node, &step->list) {
185 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
186 static DEFINE_MUTEX(cpu_add_remove_lock);
187 bool cpuhp_tasks_frozen;
188 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
191 * The following two APIs (cpu_maps_update_begin/done) must be used when
192 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
193 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
194 * hotplug callback (un)registration performed using __register_cpu_notifier()
195 * or __unregister_cpu_notifier().
197 void cpu_maps_update_begin(void)
199 mutex_lock(&cpu_add_remove_lock);
201 EXPORT_SYMBOL(cpu_notifier_register_begin);
203 void cpu_maps_update_done(void)
205 mutex_unlock(&cpu_add_remove_lock);
207 EXPORT_SYMBOL(cpu_notifier_register_done);
209 static RAW_NOTIFIER_HEAD(cpu_chain);
211 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
212 * Should always be manipulated under cpu_add_remove_lock
214 static int cpu_hotplug_disabled;
216 #ifdef CONFIG_HOTPLUG_CPU
219 struct task_struct *active_writer;
220 /* wait queue to wake up the active_writer */
221 wait_queue_head_t wq;
222 /* verifies that no writer will get active while readers are active */
225 * Also blocks the new readers during
226 * an ongoing cpu hotplug operation.
230 #ifdef CONFIG_DEBUG_LOCK_ALLOC
231 struct lockdep_map dep_map;
234 .active_writer = NULL,
235 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
236 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
237 #ifdef CONFIG_DEBUG_LOCK_ALLOC
238 .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
242 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
243 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
244 #define cpuhp_lock_acquire_tryread() \
245 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
246 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
247 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
250 void get_online_cpus(void)
253 if (cpu_hotplug.active_writer == current)
255 cpuhp_lock_acquire_read();
256 mutex_lock(&cpu_hotplug.lock);
257 atomic_inc(&cpu_hotplug.refcount);
258 mutex_unlock(&cpu_hotplug.lock);
260 EXPORT_SYMBOL_GPL(get_online_cpus);
262 void put_online_cpus(void)
266 if (cpu_hotplug.active_writer == current)
269 refcount = atomic_dec_return(&cpu_hotplug.refcount);
270 if (WARN_ON(refcount < 0)) /* try to fix things up */
271 atomic_inc(&cpu_hotplug.refcount);
273 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
274 wake_up(&cpu_hotplug.wq);
276 cpuhp_lock_release();
279 EXPORT_SYMBOL_GPL(put_online_cpus);
282 * This ensures that the hotplug operation can begin only when the
283 * refcount goes to zero.
285 * Note that during a cpu-hotplug operation, the new readers, if any,
286 * will be blocked by the cpu_hotplug.lock
288 * Since cpu_hotplug_begin() is always called after invoking
289 * cpu_maps_update_begin(), we can be sure that only one writer is active.
291 * Note that theoretically, there is a possibility of a livelock:
292 * - Refcount goes to zero, last reader wakes up the sleeping
294 * - Last reader unlocks the cpu_hotplug.lock.
295 * - A new reader arrives at this moment, bumps up the refcount.
296 * - The writer acquires the cpu_hotplug.lock finds the refcount
297 * non zero and goes to sleep again.
299 * However, this is very difficult to achieve in practice since
300 * get_online_cpus() not an api which is called all that often.
303 void cpu_hotplug_begin(void)
307 cpu_hotplug.active_writer = current;
308 cpuhp_lock_acquire();
311 mutex_lock(&cpu_hotplug.lock);
312 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
313 if (likely(!atomic_read(&cpu_hotplug.refcount)))
315 mutex_unlock(&cpu_hotplug.lock);
318 finish_wait(&cpu_hotplug.wq, &wait);
321 void cpu_hotplug_done(void)
323 cpu_hotplug.active_writer = NULL;
324 mutex_unlock(&cpu_hotplug.lock);
325 cpuhp_lock_release();
329 * Wait for currently running CPU hotplug operations to complete (if any) and
330 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
331 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
332 * hotplug path before performing hotplug operations. So acquiring that lock
333 * guarantees mutual exclusion from any currently running hotplug operations.
335 void cpu_hotplug_disable(void)
337 cpu_maps_update_begin();
338 cpu_hotplug_disabled++;
339 cpu_maps_update_done();
341 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
343 static void __cpu_hotplug_enable(void)
345 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
347 cpu_hotplug_disabled--;
350 void cpu_hotplug_enable(void)
352 cpu_maps_update_begin();
353 __cpu_hotplug_enable();
354 cpu_maps_update_done();
356 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
357 #endif /* CONFIG_HOTPLUG_CPU */
359 #ifdef CONFIG_HOTPLUG_SMT
360 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
361 EXPORT_SYMBOL_GPL(cpu_smt_control);
363 void __init cpu_smt_disable(bool force)
365 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
366 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
370 pr_info("SMT: Force disabled\n");
371 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
373 cpu_smt_control = CPU_SMT_DISABLED;
378 * The decision whether SMT is supported can only be done after the full
379 * CPU identification. Called from architecture code.
381 void __init cpu_smt_check_topology(void)
383 if (!topology_smt_supported())
384 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
387 static int __init smt_cmdline_disable(char *str)
389 cpu_smt_disable(str && !strcmp(str, "force"));
392 early_param("nosmt", smt_cmdline_disable);
394 static inline bool cpu_smt_allowed(unsigned int cpu)
396 if (cpu_smt_control == CPU_SMT_ENABLED)
399 if (topology_is_primary_thread(cpu))
403 * On x86 it's required to boot all logical CPUs at least once so
404 * that the init code can get a chance to set CR4.MCE on each
405 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
406 * core will shutdown the machine.
408 return !per_cpu(cpuhp_state, cpu).booted_once;
411 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
414 /* Need to know about CPUs going up/down? */
415 int register_cpu_notifier(struct notifier_block *nb)
418 cpu_maps_update_begin();
419 ret = raw_notifier_chain_register(&cpu_chain, nb);
420 cpu_maps_update_done();
424 int __register_cpu_notifier(struct notifier_block *nb)
426 return raw_notifier_chain_register(&cpu_chain, nb);
429 static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
432 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
433 void *hcpu = (void *)(long)cpu;
437 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
440 return notifier_to_errno(ret);
443 static int cpu_notify(unsigned long val, unsigned int cpu)
445 return __cpu_notify(val, cpu, -1, NULL);
448 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
450 BUG_ON(cpu_notify(val, cpu));
453 /* Notifier wrappers for transitioning to state machine */
454 static int notify_prepare(unsigned int cpu)
459 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
462 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
464 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
469 static int notify_online(unsigned int cpu)
471 cpu_notify(CPU_ONLINE, cpu);
475 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
477 static int bringup_wait_for_ap(unsigned int cpu)
479 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
481 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
482 wait_for_completion(&st->done);
483 if (WARN_ON_ONCE((!cpu_online(cpu))))
486 /* Unpark the stopper thread and the hotplug thread of the target cpu */
487 stop_machine_unpark(cpu);
488 kthread_unpark(st->thread);
491 * SMT soft disabling on X86 requires to bring the CPU out of the
492 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
493 * CPU marked itself as booted_once in cpu_notify_starting() so the
494 * cpu_smt_allowed() check will now return false if this is not the
497 if (!cpu_smt_allowed(cpu))
500 /* Should we go further up ? */
501 if (st->target > CPUHP_AP_ONLINE_IDLE) {
502 __cpuhp_kick_ap_work(st);
503 wait_for_completion(&st->done);
508 static int bringup_cpu(unsigned int cpu)
510 struct task_struct *idle = idle_thread_get(cpu);
514 * Some architectures have to walk the irq descriptors to
515 * setup the vector space for the cpu which comes online.
516 * Prevent irq alloc/free across the bringup.
520 /* Arch-specific enabling code. */
521 ret = __cpu_up(cpu, idle);
524 cpu_notify(CPU_UP_CANCELED, cpu);
527 return bringup_wait_for_ap(cpu);
531 * Hotplug state machine related functions
533 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
535 for (st->state++; st->state < st->target; st->state++) {
536 struct cpuhp_step *step = cpuhp_get_step(st->state);
538 if (!step->skip_onerr)
539 cpuhp_invoke_callback(cpu, st->state, true, NULL);
543 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
544 enum cpuhp_state target)
546 enum cpuhp_state prev_state = st->state;
549 for (; st->state > target; st->state--) {
550 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
552 st->target = prev_state;
553 undo_cpu_down(cpu, st);
560 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
562 for (st->state--; st->state > st->target; st->state--) {
563 struct cpuhp_step *step = cpuhp_get_step(st->state);
565 if (!step->skip_onerr)
566 cpuhp_invoke_callback(cpu, st->state, false, NULL);
570 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
571 enum cpuhp_state target)
573 enum cpuhp_state prev_state = st->state;
576 while (st->state < target) {
578 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
580 st->target = prev_state;
581 undo_cpu_up(cpu, st);
589 * The cpu hotplug threads manage the bringup and teardown of the cpus
591 static void cpuhp_create(unsigned int cpu)
593 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
595 init_completion(&st->done);
598 static int cpuhp_should_run(unsigned int cpu)
600 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
602 return st->should_run;
605 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
606 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
608 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
610 return cpuhp_down_callbacks(cpu, st, target);
613 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
614 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
616 return cpuhp_up_callbacks(cpu, st, st->target);
620 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
621 * callbacks when a state gets [un]installed at runtime.
623 static void cpuhp_thread_fun(unsigned int cpu)
625 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
629 * Paired with the mb() in cpuhp_kick_ap_work and
630 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
636 st->should_run = false;
638 lock_map_acquire(&cpuhp_state_lock_map);
639 /* Single callback invocation for [un]install ? */
641 if (st->cb_state < CPUHP_AP_ONLINE) {
643 ret = cpuhp_invoke_callback(cpu, st->cb_state,
644 st->bringup, st->node);
647 ret = cpuhp_invoke_callback(cpu, st->cb_state,
648 st->bringup, st->node);
650 } else if (st->rollback) {
651 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
653 undo_cpu_down(cpu, st);
655 * This is a momentary workaround to keep the notifier users
656 * happy. Will go away once we got rid of the notifiers.
658 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
659 st->rollback = false;
661 /* Cannot happen .... */
662 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
664 /* Regular hotplug work */
665 if (st->state < st->target)
666 ret = cpuhp_ap_online(cpu, st);
667 else if (st->state > st->target)
668 ret = cpuhp_ap_offline(cpu, st);
670 lock_map_release(&cpuhp_state_lock_map);
675 /* Invoke a single callback on a remote cpu */
677 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
678 struct hlist_node *node)
680 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
682 if (!cpu_online(cpu))
685 lock_map_acquire(&cpuhp_state_lock_map);
686 lock_map_release(&cpuhp_state_lock_map);
689 * If we are up and running, use the hotplug thread. For early calls
690 * we invoke the thread function directly.
693 return cpuhp_invoke_callback(cpu, state, bringup, node);
695 st->cb_state = state;
697 st->bringup = bringup;
701 * Make sure the above stores are visible before should_run becomes
702 * true. Paired with the mb() above in cpuhp_thread_fun()
705 st->should_run = true;
706 wake_up_process(st->thread);
707 wait_for_completion(&st->done);
711 /* Regular hotplug invocation of the AP hotplug thread */
712 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
717 * Make sure the above stores are visible before should_run becomes
718 * true. Paired with the mb() above in cpuhp_thread_fun()
721 st->should_run = true;
722 wake_up_process(st->thread);
725 static int cpuhp_kick_ap_work(unsigned int cpu)
727 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
728 enum cpuhp_state state = st->state;
730 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
731 lock_map_acquire(&cpuhp_state_lock_map);
732 lock_map_release(&cpuhp_state_lock_map);
733 __cpuhp_kick_ap_work(st);
734 wait_for_completion(&st->done);
735 trace_cpuhp_exit(cpu, st->state, state, st->result);
739 static struct smp_hotplug_thread cpuhp_threads = {
740 .store = &cpuhp_state.thread,
741 .create = &cpuhp_create,
742 .thread_should_run = cpuhp_should_run,
743 .thread_fn = cpuhp_thread_fun,
744 .thread_comm = "cpuhp/%u",
748 void __init cpuhp_threads_init(void)
750 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
751 kthread_unpark(this_cpu_read(cpuhp_state.thread));
754 EXPORT_SYMBOL(register_cpu_notifier);
755 EXPORT_SYMBOL(__register_cpu_notifier);
756 void unregister_cpu_notifier(struct notifier_block *nb)
758 cpu_maps_update_begin();
759 raw_notifier_chain_unregister(&cpu_chain, nb);
760 cpu_maps_update_done();
762 EXPORT_SYMBOL(unregister_cpu_notifier);
764 void __unregister_cpu_notifier(struct notifier_block *nb)
766 raw_notifier_chain_unregister(&cpu_chain, nb);
768 EXPORT_SYMBOL(__unregister_cpu_notifier);
770 #ifdef CONFIG_HOTPLUG_CPU
772 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
775 * This function walks all processes, finds a valid mm struct for each one and
776 * then clears a corresponding bit in mm's cpumask. While this all sounds
777 * trivial, there are various non-obvious corner cases, which this function
778 * tries to solve in a safe manner.
780 * Also note that the function uses a somewhat relaxed locking scheme, so it may
781 * be called only for an already offlined CPU.
783 void clear_tasks_mm_cpumask(int cpu)
785 struct task_struct *p;
788 * This function is called after the cpu is taken down and marked
789 * offline, so its not like new tasks will ever get this cpu set in
790 * their mm mask. -- Peter Zijlstra
791 * Thus, we may use rcu_read_lock() here, instead of grabbing
792 * full-fledged tasklist_lock.
794 WARN_ON(cpu_online(cpu));
796 for_each_process(p) {
797 struct task_struct *t;
800 * Main thread might exit, but other threads may still have
801 * a valid mm. Find one.
803 t = find_lock_task_mm(p);
806 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
812 static inline void check_for_tasks(int dead_cpu)
814 struct task_struct *g, *p;
816 read_lock(&tasklist_lock);
817 for_each_process_thread(g, p) {
821 * We do the check with unlocked task_rq(p)->lock.
822 * Order the reading to do not warn about a task,
823 * which was running on this cpu in the past, and
824 * it's just been woken on another cpu.
827 if (task_cpu(p) != dead_cpu)
830 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
831 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
833 read_unlock(&tasklist_lock);
836 static int notify_down_prepare(unsigned int cpu)
838 int err, nr_calls = 0;
840 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
843 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
844 pr_warn("%s: attempt to take down CPU %u failed\n",
850 /* Take this CPU down. */
851 static int take_cpu_down(void *_param)
853 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
854 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
855 int err, cpu = smp_processor_id();
857 /* Ensure this CPU doesn't handle any more interrupts. */
858 err = __cpu_disable();
863 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
864 * do this step again.
866 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
868 /* Invoke the former CPU_DYING callbacks */
869 for (; st->state > target; st->state--)
870 cpuhp_invoke_callback(cpu, st->state, false, NULL);
872 /* Give up timekeeping duties */
873 tick_handover_do_timer();
874 /* Park the stopper thread */
875 stop_machine_park(cpu);
879 static int takedown_cpu(unsigned int cpu)
881 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
884 /* Park the smpboot threads */
885 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
888 * Prevent irq alloc/free while the dying cpu reorganizes the
889 * interrupt affinities.
894 * So now all preempt/rcu users must observe !cpu_active().
896 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
898 /* CPU refused to die */
900 /* Unpark the hotplug thread so we can rollback there */
901 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
904 BUG_ON(cpu_online(cpu));
907 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
908 * runnable tasks from the cpu, there's only the idle task left now
909 * that the migration thread is done doing the stop_machine thing.
911 * Wait for the stop thread to go away.
913 wait_for_completion(&st->done);
914 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
916 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
919 hotplug_cpu__broadcast_tick_pull(cpu);
920 /* This actually kills the CPU. */
923 tick_cleanup_dead_cpu(cpu);
927 static int notify_dead(unsigned int cpu)
929 cpu_notify_nofail(CPU_DEAD, cpu);
930 check_for_tasks(cpu);
934 static void cpuhp_complete_idle_dead(void *arg)
936 struct cpuhp_cpu_state *st = arg;
941 void cpuhp_report_idle_dead(void)
943 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
945 BUG_ON(st->state != CPUHP_AP_OFFLINE);
946 rcu_report_dead(smp_processor_id());
947 st->state = CPUHP_AP_IDLE_DEAD;
949 * We cannot call complete after rcu_report_dead() so we delegate it
952 smp_call_function_single(cpumask_first(cpu_online_mask),
953 cpuhp_complete_idle_dead, st, 0);
957 #define notify_down_prepare NULL
958 #define takedown_cpu NULL
959 #define notify_dead NULL
962 #ifdef CONFIG_HOTPLUG_CPU
964 /* Requires cpu_add_remove_lock to be held */
965 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
966 enum cpuhp_state target)
968 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
969 int prev_state, ret = 0;
970 bool hasdied = false;
972 if (num_online_cpus() == 1)
975 if (!cpu_present(cpu))
980 cpuhp_tasks_frozen = tasks_frozen;
982 prev_state = st->state;
985 * If the current CPU state is in the range of the AP hotplug thread,
986 * then we need to kick the thread.
988 if (st->state > CPUHP_TEARDOWN_CPU) {
989 ret = cpuhp_kick_ap_work(cpu);
991 * The AP side has done the error rollback already. Just
992 * return the error code..
998 * We might have stopped still in the range of the AP hotplug
999 * thread. Nothing to do anymore.
1001 if (st->state > CPUHP_TEARDOWN_CPU)
1005 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1006 * to do the further cleanups.
1008 ret = cpuhp_down_callbacks(cpu, st, target);
1009 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1010 st->target = prev_state;
1011 st->rollback = true;
1012 cpuhp_kick_ap_work(cpu);
1015 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1018 /* This post dead nonsense must die */
1019 if (!ret && hasdied)
1020 cpu_notify_nofail(CPU_POST_DEAD, cpu);
1024 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1026 if (cpu_hotplug_disabled)
1028 return _cpu_down(cpu, 0, target);
1031 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1035 cpu_maps_update_begin();
1036 err = cpu_down_maps_locked(cpu, target);
1037 cpu_maps_update_done();
1040 int cpu_down(unsigned int cpu)
1042 return do_cpu_down(cpu, CPUHP_OFFLINE);
1044 EXPORT_SYMBOL(cpu_down);
1045 #endif /*CONFIG_HOTPLUG_CPU*/
1048 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1049 * @cpu: cpu that just started
1051 * It must be called by the arch code on the new cpu, before the new cpu
1052 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1054 void notify_cpu_starting(unsigned int cpu)
1056 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1057 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1059 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1060 st->booted_once = true;
1061 while (st->state < target) {
1063 cpuhp_invoke_callback(cpu, st->state, true, NULL);
1068 * Called from the idle task. Wake up the controlling task which brings the
1069 * stopper and the hotplug thread of the upcoming CPU up and then delegates
1070 * the rest of the online bringup to the hotplug thread.
1072 void cpuhp_online_idle(enum cpuhp_state state)
1074 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1076 /* Happens for the boot cpu */
1077 if (state != CPUHP_AP_ONLINE_IDLE)
1080 st->state = CPUHP_AP_ONLINE_IDLE;
1081 complete(&st->done);
1084 /* Requires cpu_add_remove_lock to be held */
1085 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1087 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1088 struct task_struct *idle;
1091 cpu_hotplug_begin();
1093 if (!cpu_present(cpu)) {
1099 * The caller of do_cpu_up might have raced with another
1100 * caller. Ignore it for now.
1102 if (st->state >= target)
1105 if (st->state == CPUHP_OFFLINE) {
1106 /* Let it fail before we try to bring the cpu up */
1107 idle = idle_thread_get(cpu);
1109 ret = PTR_ERR(idle);
1114 cpuhp_tasks_frozen = tasks_frozen;
1116 st->target = target;
1118 * If the current CPU state is in the range of the AP hotplug thread,
1119 * then we need to kick the thread once more.
1121 if (st->state > CPUHP_BRINGUP_CPU) {
1122 ret = cpuhp_kick_ap_work(cpu);
1124 * The AP side has done the error rollback already. Just
1125 * return the error code..
1132 * Try to reach the target state. We max out on the BP at
1133 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1134 * responsible for bringing it up to the target state.
1136 target = min((int)target, CPUHP_BRINGUP_CPU);
1137 ret = cpuhp_up_callbacks(cpu, st, target);
1143 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1147 if (!cpu_possible(cpu)) {
1148 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1150 #if defined(CONFIG_IA64)
1151 pr_err("please check additional_cpus= boot parameter\n");
1156 err = try_online_node(cpu_to_node(cpu));
1160 cpu_maps_update_begin();
1162 if (cpu_hotplug_disabled) {
1166 if (!cpu_smt_allowed(cpu)) {
1171 err = _cpu_up(cpu, 0, target);
1173 cpu_maps_update_done();
1177 int cpu_up(unsigned int cpu)
1179 return do_cpu_up(cpu, CPUHP_ONLINE);
1181 EXPORT_SYMBOL_GPL(cpu_up);
1183 #ifdef CONFIG_PM_SLEEP_SMP
1184 static cpumask_var_t frozen_cpus;
1186 int freeze_secondary_cpus(int primary)
1190 cpu_maps_update_begin();
1191 if (!cpu_online(primary))
1192 primary = cpumask_first(cpu_online_mask);
1194 * We take down all of the non-boot CPUs in one shot to avoid races
1195 * with the userspace trying to use the CPU hotplug at the same time
1197 cpumask_clear(frozen_cpus);
1199 pr_info("Disabling non-boot CPUs ...\n");
1200 for_each_online_cpu(cpu) {
1203 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1204 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1205 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1207 cpumask_set_cpu(cpu, frozen_cpus);
1209 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1215 BUG_ON(num_online_cpus() > 1);
1217 pr_err("Non-boot CPUs are not disabled\n");
1220 * Make sure the CPUs won't be enabled by someone else. We need to do
1221 * this even in case of failure as all disable_nonboot_cpus() users are
1222 * supposed to do enable_nonboot_cpus() on the failure path.
1224 cpu_hotplug_disabled++;
1226 cpu_maps_update_done();
1230 void __weak arch_enable_nonboot_cpus_begin(void)
1234 void __weak arch_enable_nonboot_cpus_end(void)
1238 void enable_nonboot_cpus(void)
1242 /* Allow everyone to use the CPU hotplug again */
1243 cpu_maps_update_begin();
1244 __cpu_hotplug_enable();
1245 if (cpumask_empty(frozen_cpus))
1248 pr_info("Enabling non-boot CPUs ...\n");
1250 arch_enable_nonboot_cpus_begin();
1252 for_each_cpu(cpu, frozen_cpus) {
1253 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1254 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1255 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1257 pr_info("CPU%d is up\n", cpu);
1260 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1263 arch_enable_nonboot_cpus_end();
1265 cpumask_clear(frozen_cpus);
1267 cpu_maps_update_done();
1270 static int __init alloc_frozen_cpus(void)
1272 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1276 core_initcall(alloc_frozen_cpus);
1279 * When callbacks for CPU hotplug notifications are being executed, we must
1280 * ensure that the state of the system with respect to the tasks being frozen
1281 * or not, as reported by the notification, remains unchanged *throughout the
1282 * duration* of the execution of the callbacks.
1283 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1285 * This synchronization is implemented by mutually excluding regular CPU
1286 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1287 * Hibernate notifications.
1290 cpu_hotplug_pm_callback(struct notifier_block *nb,
1291 unsigned long action, void *ptr)
1295 case PM_SUSPEND_PREPARE:
1296 case PM_HIBERNATION_PREPARE:
1297 cpu_hotplug_disable();
1300 case PM_POST_SUSPEND:
1301 case PM_POST_HIBERNATION:
1302 cpu_hotplug_enable();
1313 static int __init cpu_hotplug_pm_sync_init(void)
1316 * cpu_hotplug_pm_callback has higher priority than x86
1317 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1318 * to disable cpu hotplug to avoid cpu hotplug race.
1320 pm_notifier(cpu_hotplug_pm_callback, 0);
1323 core_initcall(cpu_hotplug_pm_sync_init);
1325 #endif /* CONFIG_PM_SLEEP_SMP */
1327 #endif /* CONFIG_SMP */
1329 /* Boot processor state steps */
1330 static struct cpuhp_step cpuhp_bp_states[] = {
1333 .startup.single = NULL,
1334 .teardown.single = NULL,
1337 [CPUHP_CREATE_THREADS]= {
1338 .name = "threads:prepare",
1339 .startup.single = smpboot_create_threads,
1340 .teardown.single = NULL,
1343 [CPUHP_PERF_PREPARE] = {
1344 .name = "perf:prepare",
1345 .startup.single = perf_event_init_cpu,
1346 .teardown.single = perf_event_exit_cpu,
1348 [CPUHP_WORKQUEUE_PREP] = {
1349 .name = "workqueue:prepare",
1350 .startup.single = workqueue_prepare_cpu,
1351 .teardown.single = NULL,
1353 [CPUHP_HRTIMERS_PREPARE] = {
1354 .name = "hrtimers:prepare",
1355 .startup.single = hrtimers_prepare_cpu,
1356 .teardown.single = hrtimers_dead_cpu,
1358 [CPUHP_SMPCFD_PREPARE] = {
1359 .name = "smpcfd:prepare",
1360 .startup.single = smpcfd_prepare_cpu,
1361 .teardown.single = smpcfd_dead_cpu,
1363 [CPUHP_RELAY_PREPARE] = {
1364 .name = "relay:prepare",
1365 .startup.single = relay_prepare_cpu,
1366 .teardown.single = NULL,
1368 [CPUHP_SLAB_PREPARE] = {
1369 .name = "slab:prepare",
1370 .startup.single = slab_prepare_cpu,
1371 .teardown.single = slab_dead_cpu,
1373 [CPUHP_RCUTREE_PREP] = {
1374 .name = "RCU/tree:prepare",
1375 .startup.single = rcutree_prepare_cpu,
1376 .teardown.single = rcutree_dead_cpu,
1379 * Preparatory and dead notifiers. Will be replaced once the notifiers
1380 * are converted to states.
1382 [CPUHP_NOTIFY_PREPARE] = {
1383 .name = "notify:prepare",
1384 .startup.single = notify_prepare,
1385 .teardown.single = notify_dead,
1390 * On the tear-down path, timers_dead_cpu() must be invoked
1391 * before blk_mq_queue_reinit_notify() from notify_dead(),
1392 * otherwise a RCU stall occurs.
1394 [CPUHP_TIMERS_PREPARE] = {
1395 .name = "timers:dead",
1396 .startup.single = timers_prepare_cpu,
1397 .teardown.single = timers_dead_cpu,
1399 /* Kicks the plugged cpu into life */
1400 [CPUHP_BRINGUP_CPU] = {
1401 .name = "cpu:bringup",
1402 .startup.single = bringup_cpu,
1403 .teardown.single = NULL,
1407 * Handled on controll processor until the plugged processor manages
1410 [CPUHP_TEARDOWN_CPU] = {
1411 .name = "cpu:teardown",
1412 .startup.single = NULL,
1413 .teardown.single = takedown_cpu,
1417 [CPUHP_BRINGUP_CPU] = { },
1421 /* Application processor state steps */
1422 static struct cpuhp_step cpuhp_ap_states[] = {
1424 /* Final state before CPU kills itself */
1425 [CPUHP_AP_IDLE_DEAD] = {
1426 .name = "idle:dead",
1429 * Last state before CPU enters the idle loop to die. Transient state
1430 * for synchronization.
1432 [CPUHP_AP_OFFLINE] = {
1433 .name = "ap:offline",
1436 /* First state is scheduler control. Interrupts are disabled */
1437 [CPUHP_AP_SCHED_STARTING] = {
1438 .name = "sched:starting",
1439 .startup.single = sched_cpu_starting,
1440 .teardown.single = sched_cpu_dying,
1442 [CPUHP_AP_RCUTREE_DYING] = {
1443 .name = "RCU/tree:dying",
1444 .startup.single = NULL,
1445 .teardown.single = rcutree_dying_cpu,
1447 [CPUHP_AP_SMPCFD_DYING] = {
1448 .name = "smpcfd:dying",
1449 .startup.single = NULL,
1450 .teardown.single = smpcfd_dying_cpu,
1452 /* Entry state on starting. Interrupts enabled from here on. Transient
1453 * state for synchronsization */
1454 [CPUHP_AP_ONLINE] = {
1455 .name = "ap:online",
1457 /* Handle smpboot threads park/unpark */
1458 [CPUHP_AP_SMPBOOT_THREADS] = {
1459 .name = "smpboot/threads:online",
1460 .startup.single = smpboot_unpark_threads,
1461 .teardown.single = smpboot_park_threads,
1463 [CPUHP_AP_PERF_ONLINE] = {
1464 .name = "perf:online",
1465 .startup.single = perf_event_init_cpu,
1466 .teardown.single = perf_event_exit_cpu,
1468 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1469 .name = "workqueue:online",
1470 .startup.single = workqueue_online_cpu,
1471 .teardown.single = workqueue_offline_cpu,
1473 [CPUHP_AP_RCUTREE_ONLINE] = {
1474 .name = "RCU/tree:online",
1475 .startup.single = rcutree_online_cpu,
1476 .teardown.single = rcutree_offline_cpu,
1480 * Online/down_prepare notifiers. Will be removed once the notifiers
1481 * are converted to states.
1483 [CPUHP_AP_NOTIFY_ONLINE] = {
1484 .name = "notify:online",
1485 .startup.single = notify_online,
1486 .teardown.single = notify_down_prepare,
1491 * The dynamically registered state space is here
1495 /* Last state is scheduler control setting the cpu active */
1496 [CPUHP_AP_ACTIVE] = {
1497 .name = "sched:active",
1498 .startup.single = sched_cpu_activate,
1499 .teardown.single = sched_cpu_deactivate,
1503 /* CPU is fully up and running. */
1506 .startup.single = NULL,
1507 .teardown.single = NULL,
1511 /* Sanity check for callbacks */
1512 static int cpuhp_cb_check(enum cpuhp_state state)
1514 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1519 static void cpuhp_store_callbacks(enum cpuhp_state state,
1521 int (*startup)(unsigned int cpu),
1522 int (*teardown)(unsigned int cpu),
1523 bool multi_instance)
1525 /* (Un)Install the callbacks for further cpu hotplug operations */
1526 struct cpuhp_step *sp;
1528 sp = cpuhp_get_step(state);
1529 sp->startup.single = startup;
1530 sp->teardown.single = teardown;
1532 sp->multi_instance = multi_instance;
1533 INIT_HLIST_HEAD(&sp->list);
1536 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1538 return cpuhp_get_step(state)->teardown.single;
1542 * Call the startup/teardown function for a step either on the AP or
1543 * on the current CPU.
1545 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1546 struct hlist_node *node)
1548 struct cpuhp_step *sp = cpuhp_get_step(state);
1551 if ((bringup && !sp->startup.single) ||
1552 (!bringup && !sp->teardown.single))
1555 * The non AP bound callbacks can fail on bringup. On teardown
1556 * e.g. module removal we crash for now.
1559 if (cpuhp_is_ap_state(state))
1560 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1562 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1564 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1566 BUG_ON(ret && !bringup);
1571 * Called from __cpuhp_setup_state on a recoverable failure.
1573 * Note: The teardown callbacks for rollback are not allowed to fail!
1575 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1576 struct hlist_node *node)
1580 /* Roll back the already executed steps on the other cpus */
1581 for_each_present_cpu(cpu) {
1582 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1583 int cpustate = st->state;
1585 if (cpu >= failedcpu)
1588 /* Did we invoke the startup call on that cpu ? */
1589 if (cpustate >= state)
1590 cpuhp_issue_call(cpu, state, false, node);
1595 * Returns a free for dynamic slot assignment of the Online state. The states
1596 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1597 * by having no name assigned.
1599 static int cpuhp_reserve_state(enum cpuhp_state state)
1603 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1604 if (cpuhp_ap_states[i].name)
1607 cpuhp_ap_states[i].name = "Reserved";
1610 WARN(1, "No more dynamic states available for CPU hotplug\n");
1614 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1617 struct cpuhp_step *sp;
1621 sp = cpuhp_get_step(state);
1622 if (sp->multi_instance == false)
1626 mutex_lock(&cpuhp_state_mutex);
1628 if (!invoke || !sp->startup.multi)
1632 * Try to call the startup callback for each present cpu
1633 * depending on the hotplug state of the cpu.
1635 for_each_present_cpu(cpu) {
1636 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1637 int cpustate = st->state;
1639 if (cpustate < state)
1642 ret = cpuhp_issue_call(cpu, state, true, node);
1644 if (sp->teardown.multi)
1645 cpuhp_rollback_install(cpu, state, node);
1651 hlist_add_head(node, &sp->list);
1654 mutex_unlock(&cpuhp_state_mutex);
1658 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1661 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1662 * @state: The state to setup
1663 * @invoke: If true, the startup function is invoked for cpus where
1664 * cpu state >= @state
1665 * @startup: startup callback function
1666 * @teardown: teardown callback function
1668 * Returns 0 if successful, otherwise a proper error code
1670 int __cpuhp_setup_state(enum cpuhp_state state,
1671 const char *name, bool invoke,
1672 int (*startup)(unsigned int cpu),
1673 int (*teardown)(unsigned int cpu),
1674 bool multi_instance)
1679 if (cpuhp_cb_check(state) || !name)
1683 mutex_lock(&cpuhp_state_mutex);
1685 /* currently assignments for the ONLINE state are possible */
1686 if (state == CPUHP_AP_ONLINE_DYN) {
1688 ret = cpuhp_reserve_state(state);
1694 cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
1696 if (!invoke || !startup)
1700 * Try to call the startup callback for each present cpu
1701 * depending on the hotplug state of the cpu.
1703 for_each_present_cpu(cpu) {
1704 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1705 int cpustate = st->state;
1707 if (cpustate < state)
1710 ret = cpuhp_issue_call(cpu, state, true, NULL);
1713 cpuhp_rollback_install(cpu, state, NULL);
1714 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1719 mutex_unlock(&cpuhp_state_mutex);
1722 if (!ret && dyn_state)
1726 EXPORT_SYMBOL(__cpuhp_setup_state);
1728 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1729 struct hlist_node *node, bool invoke)
1731 struct cpuhp_step *sp = cpuhp_get_step(state);
1734 BUG_ON(cpuhp_cb_check(state));
1736 if (!sp->multi_instance)
1740 mutex_lock(&cpuhp_state_mutex);
1742 if (!invoke || !cpuhp_get_teardown_cb(state))
1745 * Call the teardown callback for each present cpu depending
1746 * on the hotplug state of the cpu. This function is not
1747 * allowed to fail currently!
1749 for_each_present_cpu(cpu) {
1750 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1751 int cpustate = st->state;
1753 if (cpustate >= state)
1754 cpuhp_issue_call(cpu, state, false, node);
1759 mutex_unlock(&cpuhp_state_mutex);
1764 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1766 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1767 * @state: The state to remove
1768 * @invoke: If true, the teardown function is invoked for cpus where
1769 * cpu state >= @state
1771 * The teardown callback is currently not allowed to fail. Think
1772 * about module removal!
1774 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1776 struct cpuhp_step *sp = cpuhp_get_step(state);
1779 BUG_ON(cpuhp_cb_check(state));
1782 mutex_lock(&cpuhp_state_mutex);
1784 if (sp->multi_instance) {
1785 WARN(!hlist_empty(&sp->list),
1786 "Error: Removing state %d which has instances left.\n",
1791 if (!invoke || !cpuhp_get_teardown_cb(state))
1795 * Call the teardown callback for each present cpu depending
1796 * on the hotplug state of the cpu. This function is not
1797 * allowed to fail currently!
1799 for_each_present_cpu(cpu) {
1800 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1801 int cpustate = st->state;
1803 if (cpustate >= state)
1804 cpuhp_issue_call(cpu, state, false, NULL);
1807 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1808 mutex_unlock(&cpuhp_state_mutex);
1811 EXPORT_SYMBOL(__cpuhp_remove_state);
1813 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1814 static ssize_t show_cpuhp_state(struct device *dev,
1815 struct device_attribute *attr, char *buf)
1817 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1819 return sprintf(buf, "%d\n", st->state);
1821 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1823 static ssize_t write_cpuhp_target(struct device *dev,
1824 struct device_attribute *attr,
1825 const char *buf, size_t count)
1827 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1828 struct cpuhp_step *sp;
1831 ret = kstrtoint(buf, 10, &target);
1835 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1836 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1839 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1843 ret = lock_device_hotplug_sysfs();
1847 mutex_lock(&cpuhp_state_mutex);
1848 sp = cpuhp_get_step(target);
1849 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1850 mutex_unlock(&cpuhp_state_mutex);
1854 if (st->state < target)
1855 ret = do_cpu_up(dev->id, target);
1857 ret = do_cpu_down(dev->id, target);
1859 unlock_device_hotplug();
1860 return ret ? ret : count;
1863 static ssize_t show_cpuhp_target(struct device *dev,
1864 struct device_attribute *attr, char *buf)
1866 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1868 return sprintf(buf, "%d\n", st->target);
1870 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1872 static struct attribute *cpuhp_cpu_attrs[] = {
1873 &dev_attr_state.attr,
1874 &dev_attr_target.attr,
1878 static struct attribute_group cpuhp_cpu_attr_group = {
1879 .attrs = cpuhp_cpu_attrs,
1884 static ssize_t show_cpuhp_states(struct device *dev,
1885 struct device_attribute *attr, char *buf)
1887 ssize_t cur, res = 0;
1890 mutex_lock(&cpuhp_state_mutex);
1891 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1892 struct cpuhp_step *sp = cpuhp_get_step(i);
1895 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1900 mutex_unlock(&cpuhp_state_mutex);
1903 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1905 static struct attribute *cpuhp_cpu_root_attrs[] = {
1906 &dev_attr_states.attr,
1910 static struct attribute_group cpuhp_cpu_root_attr_group = {
1911 .attrs = cpuhp_cpu_root_attrs,
1916 #ifdef CONFIG_HOTPLUG_SMT
1918 static const char *smt_states[] = {
1919 [CPU_SMT_ENABLED] = "on",
1920 [CPU_SMT_DISABLED] = "off",
1921 [CPU_SMT_FORCE_DISABLED] = "forceoff",
1922 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
1926 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
1928 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
1931 static void cpuhp_offline_cpu_device(unsigned int cpu)
1933 struct device *dev = get_cpu_device(cpu);
1935 dev->offline = true;
1936 /* Tell user space about the state change */
1937 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
1940 static void cpuhp_online_cpu_device(unsigned int cpu)
1942 struct device *dev = get_cpu_device(cpu);
1944 dev->offline = false;
1945 /* Tell user space about the state change */
1946 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
1949 static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
1953 cpu_maps_update_begin();
1954 for_each_online_cpu(cpu) {
1955 if (topology_is_primary_thread(cpu))
1957 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1961 * As this needs to hold the cpu maps lock it's impossible
1962 * to call device_offline() because that ends up calling
1963 * cpu_down() which takes cpu maps lock. cpu maps lock
1964 * needs to be held as this might race against in kernel
1965 * abusers of the hotplug machinery (thermal management).
1967 * So nothing would update device:offline state. That would
1968 * leave the sysfs entry stale and prevent onlining after
1969 * smt control has been changed to 'off' again. This is
1970 * called under the sysfs hotplug lock, so it is properly
1971 * serialized against the regular offline usage.
1973 cpuhp_offline_cpu_device(cpu);
1976 cpu_smt_control = ctrlval;
1977 cpu_maps_update_done();
1981 static int cpuhp_smt_enable(void)
1985 cpu_maps_update_begin();
1986 cpu_smt_control = CPU_SMT_ENABLED;
1987 for_each_present_cpu(cpu) {
1988 /* Skip online CPUs and CPUs on offline nodes */
1989 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
1991 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
1994 /* See comment in cpuhp_smt_disable() */
1995 cpuhp_online_cpu_device(cpu);
1997 cpu_maps_update_done();
2002 store_smt_control(struct device *dev, struct device_attribute *attr,
2003 const char *buf, size_t count)
2007 if (sysfs_streq(buf, "on"))
2008 ctrlval = CPU_SMT_ENABLED;
2009 else if (sysfs_streq(buf, "off"))
2010 ctrlval = CPU_SMT_DISABLED;
2011 else if (sysfs_streq(buf, "forceoff"))
2012 ctrlval = CPU_SMT_FORCE_DISABLED;
2016 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2019 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2022 ret = lock_device_hotplug_sysfs();
2026 if (ctrlval != cpu_smt_control) {
2028 case CPU_SMT_ENABLED:
2029 ret = cpuhp_smt_enable();
2031 case CPU_SMT_DISABLED:
2032 case CPU_SMT_FORCE_DISABLED:
2033 ret = cpuhp_smt_disable(ctrlval);
2038 unlock_device_hotplug();
2039 return ret ? ret : count;
2041 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2044 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2046 bool active = topology_max_smt_threads() > 1;
2048 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2050 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2052 static struct attribute *cpuhp_smt_attrs[] = {
2053 &dev_attr_control.attr,
2054 &dev_attr_active.attr,
2058 static const struct attribute_group cpuhp_smt_attr_group = {
2059 .attrs = cpuhp_smt_attrs,
2064 static int __init cpu_smt_state_init(void)
2067 * If SMT was disabled by BIOS, detect it here, after the CPUs have
2068 * been brought online. This ensures the smt/l1tf sysfs entries are
2069 * consistent with reality. Note this may overwrite cpu_smt_control's
2072 if (topology_max_smt_threads() == 1)
2073 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
2075 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2076 &cpuhp_smt_attr_group);
2080 static inline int cpu_smt_state_init(void) { return 0; }
2083 static int __init cpuhp_sysfs_init(void)
2087 ret = cpu_smt_state_init();
2091 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2092 &cpuhp_cpu_root_attr_group);
2096 for_each_possible_cpu(cpu) {
2097 struct device *dev = get_cpu_device(cpu);
2101 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2107 device_initcall(cpuhp_sysfs_init);
2111 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2112 * represents all NR_CPUS bits binary values of 1<<nr.
2114 * It is used by cpumask_of() to get a constant address to a CPU
2115 * mask value that has a single bit set only.
2118 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2119 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2120 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2121 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2122 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2124 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2126 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2127 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2128 #if BITS_PER_LONG > 32
2129 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2130 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2133 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2135 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2136 EXPORT_SYMBOL(cpu_all_bits);
2138 #ifdef CONFIG_INIT_ALL_POSSIBLE
2139 struct cpumask __cpu_possible_mask __read_mostly
2142 struct cpumask __cpu_possible_mask __read_mostly;
2144 EXPORT_SYMBOL(__cpu_possible_mask);
2146 struct cpumask __cpu_online_mask __read_mostly;
2147 EXPORT_SYMBOL(__cpu_online_mask);
2149 struct cpumask __cpu_present_mask __read_mostly;
2150 EXPORT_SYMBOL(__cpu_present_mask);
2152 struct cpumask __cpu_active_mask __read_mostly;
2153 EXPORT_SYMBOL(__cpu_active_mask);
2155 void init_cpu_present(const struct cpumask *src)
2157 cpumask_copy(&__cpu_present_mask, src);
2160 void init_cpu_possible(const struct cpumask *src)
2162 cpumask_copy(&__cpu_possible_mask, src);
2165 void init_cpu_online(const struct cpumask *src)
2167 cpumask_copy(&__cpu_online_mask, src);
2171 * Activate the first processor.
2173 void __init boot_cpu_init(void)
2175 int cpu = smp_processor_id();
2177 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2178 set_cpu_online(cpu, true);
2179 set_cpu_active(cpu, true);
2180 set_cpu_present(cpu, true);
2181 set_cpu_possible(cpu, true);
2185 * Must be called _AFTER_ setting up the per_cpu areas
2187 void __init boot_cpu_hotplug_init(void)
2189 this_cpu_write(cpuhp_state.booted_once, true);
2190 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);