2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/sched/smt.h>
12 #include <linux/unistd.h>
13 #include <linux/cpu.h>
14 #include <linux/oom.h>
15 #include <linux/rcupdate.h>
16 #include <linux/export.h>
17 #include <linux/bug.h>
18 #include <linux/kthread.h>
19 #include <linux/stop_machine.h>
20 #include <linux/mutex.h>
21 #include <linux/gfp.h>
22 #include <linux/suspend.h>
23 #include <linux/lockdep.h>
24 #include <linux/tick.h>
25 #include <linux/irq.h>
26 #include <trace/events/power.h>
28 #include <trace/events/sched.h>
33 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
34 static DEFINE_MUTEX(cpu_add_remove_lock);
37 * The following two APIs (cpu_maps_update_begin/done) must be used when
38 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
39 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
40 * hotplug callback (un)registration performed using __register_cpu_notifier()
41 * or __unregister_cpu_notifier().
43 void cpu_maps_update_begin(void)
45 mutex_lock(&cpu_add_remove_lock);
47 EXPORT_SYMBOL(cpu_notifier_register_begin);
49 void cpu_maps_update_done(void)
51 mutex_unlock(&cpu_add_remove_lock);
53 EXPORT_SYMBOL(cpu_notifier_register_done);
55 static RAW_NOTIFIER_HEAD(cpu_chain);
57 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
58 * Should always be manipulated under cpu_add_remove_lock
60 static int cpu_hotplug_disabled;
62 #ifdef CONFIG_HOTPLUG_CPU
65 struct task_struct *active_writer;
66 /* wait queue to wake up the active_writer */
68 /* verifies that no writer will get active while readers are active */
71 * Also blocks the new readers during
72 * an ongoing cpu hotplug operation.
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 struct lockdep_map dep_map;
80 .active_writer = NULL,
81 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
82 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
83 #ifdef CONFIG_DEBUG_LOCK_ALLOC
84 .dep_map = {.name = "cpu_hotplug.lock" },
88 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
89 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
90 #define cpuhp_lock_acquire_tryread() \
91 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
92 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
93 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
95 void cpu_hotplug_mutex_held(void)
97 lockdep_assert_held(&cpu_hotplug.lock);
99 EXPORT_SYMBOL(cpu_hotplug_mutex_held);
101 void get_online_cpus(void)
104 if (cpu_hotplug.active_writer == current)
106 cpuhp_lock_acquire_read();
107 mutex_lock(&cpu_hotplug.lock);
108 atomic_inc(&cpu_hotplug.refcount);
109 mutex_unlock(&cpu_hotplug.lock);
111 EXPORT_SYMBOL_GPL(get_online_cpus);
113 void put_online_cpus(void)
117 if (cpu_hotplug.active_writer == current)
120 refcount = atomic_dec_return(&cpu_hotplug.refcount);
121 if (WARN_ON(refcount < 0)) /* try to fix things up */
122 atomic_inc(&cpu_hotplug.refcount);
124 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
125 wake_up(&cpu_hotplug.wq);
127 cpuhp_lock_release();
130 EXPORT_SYMBOL_GPL(put_online_cpus);
133 * This ensures that the hotplug operation can begin only when the
134 * refcount goes to zero.
136 * Note that during a cpu-hotplug operation, the new readers, if any,
137 * will be blocked by the cpu_hotplug.lock
139 * Since cpu_hotplug_begin() is always called after invoking
140 * cpu_maps_update_begin(), we can be sure that only one writer is active.
142 * Note that theoretically, there is a possibility of a livelock:
143 * - Refcount goes to zero, last reader wakes up the sleeping
145 * - Last reader unlocks the cpu_hotplug.lock.
146 * - A new reader arrives at this moment, bumps up the refcount.
147 * - The writer acquires the cpu_hotplug.lock finds the refcount
148 * non zero and goes to sleep again.
150 * However, this is very difficult to achieve in practice since
151 * get_online_cpus() not an api which is called all that often.
154 void cpu_hotplug_begin(void)
158 cpu_hotplug.active_writer = current;
159 cpuhp_lock_acquire();
162 mutex_lock(&cpu_hotplug.lock);
163 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
164 if (likely(!atomic_read(&cpu_hotplug.refcount)))
166 mutex_unlock(&cpu_hotplug.lock);
169 finish_wait(&cpu_hotplug.wq, &wait);
172 void cpu_hotplug_done(void)
174 cpu_hotplug.active_writer = NULL;
175 mutex_unlock(&cpu_hotplug.lock);
176 cpuhp_lock_release();
180 * Wait for currently running CPU hotplug operations to complete (if any) and
181 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
182 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
183 * hotplug path before performing hotplug operations. So acquiring that lock
184 * guarantees mutual exclusion from any currently running hotplug operations.
186 void cpu_hotplug_disable(void)
188 cpu_maps_update_begin();
189 cpu_hotplug_disabled++;
190 cpu_maps_update_done();
192 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
194 static void __cpu_hotplug_enable(void)
196 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
198 cpu_hotplug_disabled--;
201 void cpu_hotplug_enable(void)
203 cpu_maps_update_begin();
204 __cpu_hotplug_enable();
205 cpu_maps_update_done();
207 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
208 #endif /* CONFIG_HOTPLUG_CPU */
211 * Architectures that need SMT-specific errata handling during SMT hotplug
212 * should override this.
214 void __weak arch_smt_update(void) { }
216 /* Need to know about CPUs going up/down? */
217 int register_cpu_notifier(struct notifier_block *nb)
220 cpu_maps_update_begin();
221 ret = raw_notifier_chain_register(&cpu_chain, nb);
222 cpu_maps_update_done();
226 int __register_cpu_notifier(struct notifier_block *nb)
228 return raw_notifier_chain_register(&cpu_chain, nb);
231 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
236 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
239 return notifier_to_errno(ret);
242 static int cpu_notify(unsigned long val, void *v)
244 return __cpu_notify(val, v, -1, NULL);
247 EXPORT_SYMBOL(register_cpu_notifier);
248 EXPORT_SYMBOL(__register_cpu_notifier);
250 void unregister_cpu_notifier(struct notifier_block *nb)
252 cpu_maps_update_begin();
253 raw_notifier_chain_unregister(&cpu_chain, nb);
254 cpu_maps_update_done();
256 EXPORT_SYMBOL(unregister_cpu_notifier);
258 void __unregister_cpu_notifier(struct notifier_block *nb)
260 raw_notifier_chain_unregister(&cpu_chain, nb);
262 EXPORT_SYMBOL(__unregister_cpu_notifier);
264 #ifdef CONFIG_HOTPLUG_CPU
265 static void cpu_notify_nofail(unsigned long val, void *v)
267 BUG_ON(cpu_notify(val, v));
271 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
274 * This function walks all processes, finds a valid mm struct for each one and
275 * then clears a corresponding bit in mm's cpumask. While this all sounds
276 * trivial, there are various non-obvious corner cases, which this function
277 * tries to solve in a safe manner.
279 * Also note that the function uses a somewhat relaxed locking scheme, so it may
280 * be called only for an already offlined CPU.
282 void clear_tasks_mm_cpumask(int cpu)
284 struct task_struct *p;
287 * This function is called after the cpu is taken down and marked
288 * offline, so its not like new tasks will ever get this cpu set in
289 * their mm mask. -- Peter Zijlstra
290 * Thus, we may use rcu_read_lock() here, instead of grabbing
291 * full-fledged tasklist_lock.
293 WARN_ON(cpu_online(cpu));
295 for_each_process(p) {
296 struct task_struct *t;
299 * Main thread might exit, but other threads may still have
300 * a valid mm. Find one.
302 t = find_lock_task_mm(p);
305 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
311 static inline void check_for_tasks(int dead_cpu)
313 struct task_struct *g, *p;
315 read_lock(&tasklist_lock);
316 for_each_process_thread(g, p) {
320 * We do the check with unlocked task_rq(p)->lock.
321 * Order the reading to do not warn about a task,
322 * which was running on this cpu in the past, and
323 * it's just been woken on another cpu.
326 if (task_cpu(p) != dead_cpu)
329 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
330 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
332 read_unlock(&tasklist_lock);
335 struct take_cpu_down_param {
340 /* Take this CPU down. */
341 static int take_cpu_down(void *_param)
343 struct take_cpu_down_param *param = _param;
346 /* Ensure this CPU doesn't handle any more interrupts. */
347 err = __cpu_disable();
351 cpu_notify(CPU_DYING | param->mod, param->hcpu);
352 /* Give up timekeeping duties */
353 tick_handover_do_timer();
354 /* Park the stopper thread */
355 stop_machine_park((long)param->hcpu);
359 /* Requires cpu_add_remove_lock to be held */
360 static int _cpu_down(unsigned int cpu, int tasks_frozen)
362 int err, nr_calls = 0;
363 void *hcpu = (void *)(long)cpu;
364 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
365 struct take_cpu_down_param tcd_param = {
370 if (num_online_cpus() == 1)
373 if (!cpu_online(cpu))
376 if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1)
381 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
384 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
385 pr_warn("%s: attempt to take down CPU %u failed\n",
391 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
392 * and RCU users of this state to go away such that all new such users
395 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
396 * not imply sync_sched(), so wait for both.
398 * Do sync before park smpboot threads to take care the rcu boost case.
400 if (IS_ENABLED(CONFIG_PREEMPT))
401 synchronize_rcu_mult(call_rcu, call_rcu_sched);
405 smpboot_park_threads(cpu);
408 * Prevent irq alloc/free while the dying cpu reorganizes the
409 * interrupt affinities.
414 * So now all preempt/rcu users must observe !cpu_active().
416 err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
418 /* CPU didn't die: tell everyone. Can't complain. */
419 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
423 BUG_ON(cpu_online(cpu));
426 * The migration_call() CPU_DYING callback will have removed all
427 * runnable tasks from the cpu, there's only the idle task left now
428 * that the migration thread is done doing the stop_machine thing.
430 * Wait for the stop thread to go away.
432 while (!per_cpu(cpu_dead_idle, cpu))
434 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
435 per_cpu(cpu_dead_idle, cpu) = false;
437 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
440 hotplug_cpu__broadcast_tick_pull(cpu);
441 /* This actually kills the CPU. */
444 /* CPU is completely dead: tell everyone. Too late to complain. */
445 tick_cleanup_dead_cpu(cpu);
446 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
448 check_for_tasks(cpu);
452 trace_sched_cpu_hotplug(cpu, err, 0);
454 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
459 int cpu_down(unsigned int cpu)
463 cpu_maps_update_begin();
465 if (cpu_hotplug_disabled) {
470 err = _cpu_down(cpu, 0);
473 cpu_maps_update_done();
476 EXPORT_SYMBOL(cpu_down);
477 #endif /*CONFIG_HOTPLUG_CPU*/
480 * Unpark per-CPU smpboot kthreads at CPU-online time.
482 static int smpboot_thread_call(struct notifier_block *nfb,
483 unsigned long action, void *hcpu)
485 int cpu = (long)hcpu;
487 switch (action & ~CPU_TASKS_FROZEN) {
489 case CPU_DOWN_FAILED:
491 smpboot_unpark_threads(cpu);
501 static struct notifier_block smpboot_thread_notifier = {
502 .notifier_call = smpboot_thread_call,
503 .priority = CPU_PRI_SMPBOOT,
506 void smpboot_thread_init(void)
508 register_cpu_notifier(&smpboot_thread_notifier);
511 /* Requires cpu_add_remove_lock to be held */
512 static int _cpu_up(unsigned int cpu, int tasks_frozen)
514 int ret, nr_calls = 0;
515 void *hcpu = (void *)(long)cpu;
516 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
517 struct task_struct *idle;
521 if (cpu_online(cpu) || !cpu_present(cpu)) {
526 idle = idle_thread_get(cpu);
532 ret = smpboot_create_threads(cpu);
536 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
539 pr_warn_ratelimited("%s: attempt to bring up CPU %u failed\n",
544 /* Arch-specific enabling code. */
545 ret = __cpu_up(cpu, idle);
549 BUG_ON(!cpu_online(cpu));
551 /* Now call notifier in preparation. */
552 cpu_notify(CPU_ONLINE | mod, hcpu);
556 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
559 trace_sched_cpu_hotplug(cpu, ret, 1);
564 static int switch_to_rt_policy(void)
566 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
567 unsigned int policy = current->policy;
570 /* Nobody should be attempting hotplug from these policy contexts. */
571 if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
572 policy == SCHED_DEADLINE)
575 if (policy == SCHED_FIFO || policy == SCHED_RR)
578 /* Only SCHED_NORMAL left. */
579 err = sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
584 static int switch_to_fair_policy(void)
586 struct sched_param param = { .sched_priority = 0 };
588 return sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
591 int cpu_up(unsigned int cpu)
596 switch_err = switch_to_rt_policy();
600 if (!cpu_possible(cpu)) {
601 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
603 #if defined(CONFIG_IA64)
604 pr_err("please check additional_cpus= boot parameter\n");
609 err = try_online_node(cpu_to_node(cpu));
613 cpu_maps_update_begin();
615 if (cpu_hotplug_disabled) {
620 err = _cpu_up(cpu, 0);
623 cpu_maps_update_done();
626 switch_err = switch_to_fair_policy();
628 pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
629 switch_err, current->comm, current->pid);
634 EXPORT_SYMBOL_GPL(cpu_up);
636 #ifdef CONFIG_PM_SLEEP_SMP
637 static cpumask_var_t frozen_cpus;
639 int disable_nonboot_cpus(void)
641 int cpu, first_cpu, error = 0;
643 cpu_maps_update_begin();
644 first_cpu = cpumask_first(cpu_online_mask);
646 * We take down all of the non-boot CPUs in one shot to avoid races
647 * with the userspace trying to use the CPU hotplug at the same time
649 cpumask_clear(frozen_cpus);
651 pr_info("Disabling non-boot CPUs ...\n");
652 for_each_online_cpu(cpu) {
653 if (cpu == first_cpu)
655 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
656 error = _cpu_down(cpu, 1);
657 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
659 cpumask_set_cpu(cpu, frozen_cpus);
661 pr_err("Error taking CPU%d down: %d\n", cpu, error);
667 BUG_ON(num_online_cpus() > 1);
669 pr_err("Non-boot CPUs are not disabled\n");
672 * Make sure the CPUs won't be enabled by someone else. We need to do
673 * this even in case of failure as all disable_nonboot_cpus() users are
674 * supposed to do enable_nonboot_cpus() on the failure path.
676 cpu_hotplug_disabled++;
678 cpu_maps_update_done();
682 void __weak arch_enable_nonboot_cpus_begin(void)
686 void __weak arch_enable_nonboot_cpus_end(void)
690 void enable_nonboot_cpus(void)
693 struct device *cpu_device;
695 /* Allow everyone to use the CPU hotplug again */
696 cpu_maps_update_begin();
697 __cpu_hotplug_enable();
698 if (cpumask_empty(frozen_cpus))
701 pr_info("Enabling non-boot CPUs ...\n");
703 arch_enable_nonboot_cpus_begin();
705 for_each_cpu(cpu, frozen_cpus) {
706 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
707 error = _cpu_up(cpu, 1);
708 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
710 pr_info("CPU%d is up\n", cpu);
711 cpu_device = get_cpu_device(cpu);
713 pr_err("%s: failed to get cpu%d device\n",
716 kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
719 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
722 arch_enable_nonboot_cpus_end();
724 cpumask_clear(frozen_cpus);
726 cpu_maps_update_done();
729 static int __init alloc_frozen_cpus(void)
731 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
735 core_initcall(alloc_frozen_cpus);
738 * When callbacks for CPU hotplug notifications are being executed, we must
739 * ensure that the state of the system with respect to the tasks being frozen
740 * or not, as reported by the notification, remains unchanged *throughout the
741 * duration* of the execution of the callbacks.
742 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
744 * This synchronization is implemented by mutually excluding regular CPU
745 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
746 * Hibernate notifications.
749 cpu_hotplug_pm_callback(struct notifier_block *nb,
750 unsigned long action, void *ptr)
754 case PM_SUSPEND_PREPARE:
755 case PM_HIBERNATION_PREPARE:
756 cpu_hotplug_disable();
759 case PM_POST_SUSPEND:
760 case PM_POST_HIBERNATION:
761 cpu_hotplug_enable();
772 static int __init cpu_hotplug_pm_sync_init(void)
775 * cpu_hotplug_pm_callback has higher priority than x86
776 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
777 * to disable cpu hotplug to avoid cpu hotplug race.
779 pm_notifier(cpu_hotplug_pm_callback, 0);
782 core_initcall(cpu_hotplug_pm_sync_init);
784 #endif /* CONFIG_PM_SLEEP_SMP */
787 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
788 * @cpu: cpu that just started
790 * This function calls the cpu_chain notifiers with CPU_STARTING.
791 * It must be called by the arch code on the new cpu, before the new cpu
792 * enables interrupts and before the "boot" cpu returns from __cpu_up().
794 void notify_cpu_starting(unsigned int cpu)
796 unsigned long val = CPU_STARTING;
798 #ifdef CONFIG_PM_SLEEP_SMP
799 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
800 val = CPU_STARTING_FROZEN;
801 #endif /* CONFIG_PM_SLEEP_SMP */
802 cpu_notify(val, (void *)(long)cpu);
805 #endif /* CONFIG_SMP */
808 * cpu_bit_bitmap[] is a special, "compressed" data structure that
809 * represents all NR_CPUS bits binary values of 1<<nr.
811 * It is used by cpumask_of() to get a constant address to a CPU
812 * mask value that has a single bit set only.
815 /* cpu_bit_bitmap[0] is empty - so we can back into it */
816 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
817 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
818 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
819 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
821 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
823 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
824 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
825 #if BITS_PER_LONG > 32
826 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
827 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
830 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
832 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
833 EXPORT_SYMBOL(cpu_all_bits);
835 #ifdef CONFIG_INIT_ALL_POSSIBLE
836 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
839 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
841 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
842 EXPORT_SYMBOL(cpu_possible_mask);
844 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
845 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
846 EXPORT_SYMBOL(cpu_online_mask);
848 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
849 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
850 EXPORT_SYMBOL(cpu_present_mask);
852 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
853 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
854 EXPORT_SYMBOL(cpu_active_mask);
856 static DECLARE_BITMAP(cpu_isolated_bits, CONFIG_NR_CPUS) __read_mostly;
857 const struct cpumask *const cpu_isolated_mask = to_cpumask(cpu_isolated_bits);
858 EXPORT_SYMBOL(cpu_isolated_mask);
860 void set_cpu_possible(unsigned int cpu, bool possible)
863 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
865 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
868 void set_cpu_present(unsigned int cpu, bool present)
871 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
873 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
876 void set_cpu_online(unsigned int cpu, bool online)
879 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
880 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
882 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
886 void set_cpu_active(unsigned int cpu, bool active)
889 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
891 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
894 void set_cpu_isolated(unsigned int cpu, bool isolated)
897 cpumask_set_cpu(cpu, to_cpumask(cpu_isolated_bits));
899 cpumask_clear_cpu(cpu, to_cpumask(cpu_isolated_bits));
902 void init_cpu_present(const struct cpumask *src)
904 cpumask_copy(to_cpumask(cpu_present_bits), src);
907 void init_cpu_possible(const struct cpumask *src)
909 cpumask_copy(to_cpumask(cpu_possible_bits), src);
912 void init_cpu_online(const struct cpumask *src)
914 cpumask_copy(to_cpumask(cpu_online_bits), src);
917 void init_cpu_isolated(const struct cpumask *src)
919 cpumask_copy(to_cpumask(cpu_isolated_bits), src);
922 enum cpu_mitigations cpu_mitigations = CPU_MITIGATIONS_AUTO;
924 static int __init mitigations_parse_cmdline(char *arg)
926 if (!strcmp(arg, "off"))
927 cpu_mitigations = CPU_MITIGATIONS_OFF;
928 else if (!strcmp(arg, "auto"))
929 cpu_mitigations = CPU_MITIGATIONS_AUTO;
933 early_param("mitigations", mitigations_parse_cmdline);
935 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
937 void idle_notifier_register(struct notifier_block *n)
939 atomic_notifier_chain_register(&idle_notifier, n);
941 EXPORT_SYMBOL_GPL(idle_notifier_register);
943 void idle_notifier_unregister(struct notifier_block *n)
945 atomic_notifier_chain_unregister(&idle_notifier, n);
947 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
949 void idle_notifier_call_chain(unsigned long val)
951 atomic_notifier_call_chain(&idle_notifier, val, NULL);
953 EXPORT_SYMBOL_GPL(idle_notifier_call_chain);