2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched/debug.h>
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/kthread.h>
32 static DEFINE_MUTEX(watchdog_mutex);
34 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
35 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
36 # define NMI_WATCHDOG_DEFAULT 1
38 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
39 # define NMI_WATCHDOG_DEFAULT 0
42 unsigned long __read_mostly watchdog_enabled;
43 int __read_mostly watchdog_user_enabled = 1;
44 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
45 int __read_mostly soft_watchdog_user_enabled = 1;
46 int __read_mostly watchdog_thresh = 10;
47 int __read_mostly nmi_watchdog_available;
49 struct cpumask watchdog_allowed_mask __read_mostly;
50 static bool softlockup_threads_initialized __read_mostly;
52 struct cpumask watchdog_cpumask __read_mostly;
53 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
55 #ifdef CONFIG_HARDLOCKUP_DETECTOR
57 * Should we panic when a soft-lockup or hard-lockup occurs:
59 unsigned int __read_mostly hardlockup_panic =
60 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
62 * We may not want to enable hard lockup detection by default in all cases,
63 * for example when running the kernel as a guest on a hypervisor. In these
64 * cases this function can be called to disable hard lockup detection. This
65 * function should only be executed once by the boot processor before the
66 * kernel command line parameters are parsed, because otherwise it is not
67 * possible to override this in hardlockup_panic_setup().
69 void __init hardlockup_detector_disable(void)
71 nmi_watchdog_user_enabled = 0;
74 static int __init hardlockup_panic_setup(char *str)
76 if (!strncmp(str, "panic", 5))
78 else if (!strncmp(str, "nopanic", 7))
80 else if (!strncmp(str, "0", 1))
81 nmi_watchdog_user_enabled = 0;
82 else if (!strncmp(str, "1", 1))
83 nmi_watchdog_user_enabled = 1;
86 __setup("nmi_watchdog=", hardlockup_panic_setup);
89 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
91 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
93 sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
96 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
97 # endif /* CONFIG_SMP */
98 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
101 * These functions can be overridden if an architecture implements its
102 * own hardlockup detector.
104 * watchdog_nmi_enable/disable can be implemented to start and stop when
105 * softlockup watchdog threads start and stop. The arch must select the
106 * SOFTLOCKUP_DETECTOR Kconfig.
108 int __weak watchdog_nmi_enable(unsigned int cpu)
110 hardlockup_detector_perf_enable();
114 void __weak watchdog_nmi_disable(unsigned int cpu)
116 hardlockup_detector_perf_disable();
119 /* Return 0, if a NMI watchdog is available. Error code otherwise */
120 int __weak __init watchdog_nmi_probe(void)
122 return hardlockup_detector_perf_init();
126 * watchdog_nmi_reconfigure - Optional function to reconfigure NMI watchdogs
127 * @run: If false stop the watchdogs on all enabled CPUs
128 * If true start the watchdogs on all enabled CPUs
130 * The core call order is:
131 * watchdog_nmi_reconfigure(false);
132 * update_variables();
133 * watchdog_nmi_reconfigure(true);
135 * The second call which starts the watchdogs again guarantees that the
136 * following variables are stable across the call.
141 * After the call the variables can be changed again.
143 void __weak watchdog_nmi_reconfigure(bool run) { }
146 * lockup_detector_update_enable - Update the sysctl enable bit
148 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
149 * can't race with watchdog_nmi_disable().
151 static void lockup_detector_update_enable(void)
153 watchdog_enabled = 0;
154 if (!watchdog_user_enabled)
156 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
157 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
158 if (soft_watchdog_user_enabled)
159 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
162 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
164 /* Global variables, exported for sysctl */
165 unsigned int __read_mostly softlockup_panic =
166 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
168 static u64 __read_mostly sample_period;
170 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
171 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
172 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
173 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
174 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
175 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
176 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
177 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
178 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
179 static unsigned long soft_lockup_nmi_warn;
181 static int __init softlockup_panic_setup(char *str)
183 softlockup_panic = simple_strtoul(str, NULL, 0);
186 __setup("softlockup_panic=", softlockup_panic_setup);
188 static int __init nowatchdog_setup(char *str)
190 watchdog_user_enabled = 0;
193 __setup("nowatchdog", nowatchdog_setup);
195 static int __init nosoftlockup_setup(char *str)
197 soft_watchdog_user_enabled = 0;
200 __setup("nosoftlockup", nosoftlockup_setup);
203 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
205 static int __init softlockup_all_cpu_backtrace_setup(char *str)
207 sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
210 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
213 static void __lockup_detector_cleanup(void);
216 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
217 * lockups can have false positives under extreme conditions. So we generally
218 * want a higher threshold for soft lockups than for hard lockups. So we couple
219 * the thresholds with a factor: we make the soft threshold twice the amount of
220 * time the hard threshold is.
222 static int get_softlockup_thresh(void)
224 return watchdog_thresh * 2;
228 * Returns seconds, approximately. We don't need nanosecond
229 * resolution, and we don't need to waste time with a big divide when
232 static unsigned long get_timestamp(void)
234 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
237 static void set_sample_period(void)
240 * convert watchdog_thresh from seconds to ns
241 * the divide by 5 is to give hrtimer several chances (two
242 * or three with the current relation between the soft
243 * and hard thresholds) to increment before the
244 * hardlockup detector generates a warning
246 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
247 watchdog_update_hrtimer_threshold(sample_period);
250 /* Commands for resetting the watchdog */
251 static void __touch_watchdog(void)
253 __this_cpu_write(watchdog_touch_ts, get_timestamp());
257 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
259 * Call when the scheduler may have stalled for legitimate reasons
260 * preventing the watchdog task from executing - e.g. the scheduler
261 * entering idle state. This should only be used for scheduler events.
262 * Use touch_softlockup_watchdog() for everything else.
264 void touch_softlockup_watchdog_sched(void)
267 * Preemption can be enabled. It doesn't matter which CPU's timestamp
268 * gets zeroed here, so use the raw_ operation.
270 raw_cpu_write(watchdog_touch_ts, 0);
273 void touch_softlockup_watchdog(void)
275 touch_softlockup_watchdog_sched();
276 wq_watchdog_touch(raw_smp_processor_id());
278 EXPORT_SYMBOL(touch_softlockup_watchdog);
280 void touch_all_softlockup_watchdogs(void)
285 * watchdog_mutex cannpt be taken here, as this might be called
286 * from (soft)interrupt context, so the access to
287 * watchdog_allowed_cpumask might race with a concurrent update.
289 * The watchdog time stamp can race against a concurrent real
290 * update as well, the only side effect might be a cycle delay for
291 * the softlockup check.
293 for_each_cpu(cpu, &watchdog_allowed_mask)
294 per_cpu(watchdog_touch_ts, cpu) = 0;
295 wq_watchdog_touch(-1);
298 void touch_softlockup_watchdog_sync(void)
300 __this_cpu_write(softlockup_touch_sync, true);
301 __this_cpu_write(watchdog_touch_ts, 0);
304 static int is_softlockup(unsigned long touch_ts)
306 unsigned long now = get_timestamp();
308 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
309 /* Warn about unreasonable delays. */
310 if (time_after(now, touch_ts + get_softlockup_thresh()))
311 return now - touch_ts;
316 /* watchdog detector functions */
317 bool is_hardlockup(void)
319 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
321 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
324 __this_cpu_write(hrtimer_interrupts_saved, hrint);
328 static void watchdog_interrupt_count(void)
330 __this_cpu_inc(hrtimer_interrupts);
333 /* watchdog kicker functions */
334 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
336 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
337 struct pt_regs *regs = get_irq_regs();
339 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
341 if (!watchdog_enabled)
342 return HRTIMER_NORESTART;
344 /* kick the hardlockup detector */
345 watchdog_interrupt_count();
347 /* kick the softlockup detector */
348 wake_up_process(__this_cpu_read(softlockup_watchdog));
351 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
354 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
356 * If the time stamp was touched atomically
357 * make sure the scheduler tick is up to date.
359 __this_cpu_write(softlockup_touch_sync, false);
363 /* Clear the guest paused flag on watchdog reset */
364 kvm_check_and_clear_guest_paused();
366 return HRTIMER_RESTART;
369 /* check for a softlockup
370 * This is done by making sure a high priority task is
371 * being scheduled. The task touches the watchdog to
372 * indicate it is getting cpu time. If it hasn't then
373 * this is a good indication some task is hogging the cpu
375 duration = is_softlockup(touch_ts);
376 if (unlikely(duration)) {
378 * If a virtual machine is stopped by the host it can look to
379 * the watchdog like a soft lockup, check to see if the host
380 * stopped the vm before we issue the warning
382 if (kvm_check_and_clear_guest_paused())
383 return HRTIMER_RESTART;
386 if (__this_cpu_read(soft_watchdog_warn) == true) {
388 * When multiple processes are causing softlockups the
389 * softlockup detector only warns on the first one
390 * because the code relies on a full quiet cycle to
391 * re-arm. The second process prevents the quiet cycle
392 * and never gets reported. Use task pointers to detect
395 if (__this_cpu_read(softlockup_task_ptr_saved) !=
397 __this_cpu_write(soft_watchdog_warn, false);
400 return HRTIMER_RESTART;
403 if (softlockup_all_cpu_backtrace) {
404 /* Prevent multiple soft-lockup reports if one cpu is already
405 * engaged in dumping cpu back traces
407 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
408 /* Someone else will report us. Let's give up */
409 __this_cpu_write(soft_watchdog_warn, true);
410 return HRTIMER_RESTART;
414 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
415 smp_processor_id(), duration,
416 current->comm, task_pid_nr(current));
417 __this_cpu_write(softlockup_task_ptr_saved, current);
419 print_irqtrace_events(current);
425 if (softlockup_all_cpu_backtrace) {
426 /* Avoid generating two back traces for current
427 * given that one is already made above
429 trigger_allbutself_cpu_backtrace();
431 clear_bit(0, &soft_lockup_nmi_warn);
432 /* Barrier to sync with other cpus */
433 smp_mb__after_atomic();
436 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
437 if (softlockup_panic)
438 panic("softlockup: hung tasks");
439 __this_cpu_write(soft_watchdog_warn, true);
441 __this_cpu_write(soft_watchdog_warn, false);
443 return HRTIMER_RESTART;
446 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
448 struct sched_param param = { .sched_priority = prio };
450 sched_setscheduler(current, policy, ¶m);
453 static void watchdog_enable(unsigned int cpu)
455 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
458 * Start the timer first to prevent the NMI watchdog triggering
459 * before the timer has a chance to fire.
461 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
462 hrtimer->function = watchdog_timer_fn;
463 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
464 HRTIMER_MODE_REL_PINNED);
466 /* Initialize timestamp */
468 /* Enable the perf event */
469 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
470 watchdog_nmi_enable(cpu);
472 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
475 static void watchdog_disable(unsigned int cpu)
477 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
479 watchdog_set_prio(SCHED_NORMAL, 0);
481 * Disable the perf event first. That prevents that a large delay
482 * between disabling the timer and disabling the perf event causes
483 * the perf NMI to detect a false positive.
485 watchdog_nmi_disable(cpu);
486 hrtimer_cancel(hrtimer);
489 static void watchdog_cleanup(unsigned int cpu, bool online)
491 watchdog_disable(cpu);
494 static int watchdog_should_run(unsigned int cpu)
496 return __this_cpu_read(hrtimer_interrupts) !=
497 __this_cpu_read(soft_lockup_hrtimer_cnt);
501 * The watchdog thread function - touches the timestamp.
503 * It only runs once every sample_period seconds (4 seconds by
504 * default) to reset the softlockup timestamp. If this gets delayed
505 * for more than 2*watchdog_thresh seconds then the debug-printout
506 * triggers in watchdog_timer_fn().
508 static void watchdog(unsigned int cpu)
510 __this_cpu_write(soft_lockup_hrtimer_cnt,
511 __this_cpu_read(hrtimer_interrupts));
515 static struct smp_hotplug_thread watchdog_threads = {
516 .store = &softlockup_watchdog,
517 .thread_should_run = watchdog_should_run,
518 .thread_fn = watchdog,
519 .thread_comm = "watchdog/%u",
520 .setup = watchdog_enable,
521 .cleanup = watchdog_cleanup,
522 .park = watchdog_disable,
523 .unpark = watchdog_enable,
526 static void softlockup_update_smpboot_threads(void)
528 lockdep_assert_held(&watchdog_mutex);
530 if (!softlockup_threads_initialized)
533 smpboot_update_cpumask_percpu_thread(&watchdog_threads,
534 &watchdog_allowed_mask);
535 __lockup_detector_cleanup();
538 /* Temporarily park all watchdog threads */
539 static void softlockup_park_all_threads(void)
541 cpumask_clear(&watchdog_allowed_mask);
542 softlockup_update_smpboot_threads();
545 /* Unpark enabled threads */
546 static void softlockup_unpark_threads(void)
548 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
549 softlockup_update_smpboot_threads();
552 static void softlockup_reconfigure_threads(void)
554 watchdog_nmi_reconfigure(false);
555 softlockup_park_all_threads();
557 lockup_detector_update_enable();
558 if (watchdog_enabled && watchdog_thresh)
559 softlockup_unpark_threads();
560 watchdog_nmi_reconfigure(true);
564 * Create the watchdog thread infrastructure.
566 * The threads are not unparked as watchdog_allowed_mask is empty. When
567 * the threads are sucessfully initialized, take the proper locks and
568 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
570 static __init void softlockup_init_threads(void)
575 * If sysctl is off and watchdog got disabled on the command line,
576 * nothing to do here.
578 lockup_detector_update_enable();
580 if (!IS_ENABLED(CONFIG_SYSCTL) &&
581 !(watchdog_enabled && watchdog_thresh))
584 ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
585 &watchdog_allowed_mask);
587 pr_err("Failed to initialize soft lockup detector threads\n");
591 mutex_lock(&watchdog_mutex);
592 softlockup_threads_initialized = true;
593 softlockup_reconfigure_threads();
594 mutex_unlock(&watchdog_mutex);
597 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
598 static inline int watchdog_park_threads(void) { return 0; }
599 static inline void watchdog_unpark_threads(void) { }
600 static inline int watchdog_enable_all_cpus(void) { return 0; }
601 static inline void watchdog_disable_all_cpus(void) { }
602 static inline void softlockup_init_threads(void) { }
603 static void softlockup_reconfigure_threads(void)
605 watchdog_nmi_reconfigure(false);
606 lockup_detector_update_enable();
607 watchdog_nmi_reconfigure(true);
609 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
611 static void __lockup_detector_cleanup(void)
613 lockdep_assert_held(&watchdog_mutex);
614 hardlockup_detector_perf_cleanup();
618 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
620 * Caller must not hold the cpu hotplug rwsem.
622 void lockup_detector_cleanup(void)
624 mutex_lock(&watchdog_mutex);
625 __lockup_detector_cleanup();
626 mutex_unlock(&watchdog_mutex);
630 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
632 * Special interface for parisc. It prevents lockup detector warnings from
633 * the default pm_poweroff() function which busy loops forever.
635 void lockup_detector_soft_poweroff(void)
637 watchdog_enabled = 0;
642 /* Propagate any changes to the watchdog threads */
643 static void proc_watchdog_update(void)
645 /* Remove impossible cpus to keep sysctl output clean. */
646 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
647 softlockup_reconfigure_threads();
651 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
653 * caller | table->data points to | 'which'
654 * -------------------|----------------------------|--------------------------
655 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
656 * | | SOFT_WATCHDOG_ENABLED
657 * -------------------|----------------------------|--------------------------
658 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
659 * -------------------|----------------------------|--------------------------
660 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
662 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
663 void __user *buffer, size_t *lenp, loff_t *ppos)
665 int err, old, *param = table->data;
667 cpu_hotplug_disable();
668 mutex_lock(&watchdog_mutex);
672 * On read synchronize the userspace interface. This is a
675 *param = (watchdog_enabled & which) != 0;
676 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
678 old = READ_ONCE(*param);
679 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
680 if (!err && old != READ_ONCE(*param))
681 proc_watchdog_update();
683 mutex_unlock(&watchdog_mutex);
684 cpu_hotplug_enable();
689 * /proc/sys/kernel/watchdog
691 int proc_watchdog(struct ctl_table *table, int write,
692 void __user *buffer, size_t *lenp, loff_t *ppos)
694 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
695 table, write, buffer, lenp, ppos);
699 * /proc/sys/kernel/nmi_watchdog
701 int proc_nmi_watchdog(struct ctl_table *table, int write,
702 void __user *buffer, size_t *lenp, loff_t *ppos)
704 if (!nmi_watchdog_available && write)
706 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
707 table, write, buffer, lenp, ppos);
711 * /proc/sys/kernel/soft_watchdog
713 int proc_soft_watchdog(struct ctl_table *table, int write,
714 void __user *buffer, size_t *lenp, loff_t *ppos)
716 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
717 table, write, buffer, lenp, ppos);
721 * /proc/sys/kernel/watchdog_thresh
723 int proc_watchdog_thresh(struct ctl_table *table, int write,
724 void __user *buffer, size_t *lenp, loff_t *ppos)
728 cpu_hotplug_disable();
729 mutex_lock(&watchdog_mutex);
731 old = READ_ONCE(watchdog_thresh);
732 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
734 if (!err && write && old != READ_ONCE(watchdog_thresh))
735 proc_watchdog_update();
737 mutex_unlock(&watchdog_mutex);
738 cpu_hotplug_enable();
743 * The cpumask is the mask of possible cpus that the watchdog can run
744 * on, not the mask of cpus it is actually running on. This allows the
745 * user to specify a mask that will include cpus that have not yet
746 * been brought online, if desired.
748 int proc_watchdog_cpumask(struct ctl_table *table, int write,
749 void __user *buffer, size_t *lenp, loff_t *ppos)
753 cpu_hotplug_disable();
754 mutex_lock(&watchdog_mutex);
756 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
758 proc_watchdog_update();
760 mutex_unlock(&watchdog_mutex);
761 cpu_hotplug_enable();
764 #endif /* CONFIG_SYSCTL */
766 void __init lockup_detector_init(void)
768 #ifdef CONFIG_NO_HZ_FULL
769 if (tick_nohz_full_enabled()) {
770 pr_info("Disabling watchdog on nohz_full cores by default\n");
771 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
773 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
775 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
778 if (!watchdog_nmi_probe())
779 nmi_watchdog_available = true;
780 softlockup_init_threads();