2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched/debug.h>
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/kthread.h>
32 static DEFINE_MUTEX(watchdog_mutex);
34 int __read_mostly nmi_watchdog_enabled;
36 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
37 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
40 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
43 #ifdef CONFIG_HARDLOCKUP_DETECTOR
46 * Should we panic when a soft-lockup or hard-lockup occurs:
48 unsigned int __read_mostly hardlockup_panic =
49 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
51 * We may not want to enable hard lockup detection by default in all cases,
52 * for example when running the kernel as a guest on a hypervisor. In these
53 * cases this function can be called to disable hard lockup detection. This
54 * function should only be executed once by the boot processor before the
55 * kernel command line parameters are parsed, because otherwise it is not
56 * possible to override this in hardlockup_panic_setup().
58 void __init hardlockup_detector_disable(void)
60 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
63 static int __init hardlockup_panic_setup(char *str)
65 if (!strncmp(str, "panic", 5))
67 else if (!strncmp(str, "nopanic", 7))
69 else if (!strncmp(str, "0", 1))
70 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
71 else if (!strncmp(str, "1", 1))
72 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
75 __setup("nmi_watchdog=", hardlockup_panic_setup);
79 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
80 int __read_mostly soft_watchdog_enabled;
83 int __read_mostly watchdog_user_enabled;
84 int __read_mostly watchdog_thresh = 10;
87 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
88 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
90 struct cpumask watchdog_cpumask __read_mostly;
91 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
94 * The 'watchdog_running' variable is set to 1 when the watchdog threads
95 * are registered/started and is set to 0 when the watchdog threads are
96 * unregistered/stopped, so it is an indicator whether the threads exist.
98 static int __read_mostly watchdog_running;
101 * These functions can be overridden if an architecture implements its
102 * own hardlockup detector.
104 * watchdog_nmi_enable/disable can be implemented to start and stop when
105 * softlockup watchdog threads start and stop. The arch must select the
106 * SOFTLOCKUP_DETECTOR Kconfig.
108 int __weak watchdog_nmi_enable(unsigned int cpu)
113 void __weak watchdog_nmi_disable(unsigned int cpu)
115 hardlockup_detector_perf_disable();
119 * watchdog_nmi_reconfigure can be implemented to be notified after any
120 * watchdog configuration change. The arch hardlockup watchdog should
121 * respond to the following variables:
122 * - nmi_watchdog_enabled
125 * - sysctl_hardlockup_all_cpu_backtrace
128 void __weak watchdog_nmi_reconfigure(void) { }
130 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
132 /* Helper for online, unparked cpus. */
133 #define for_each_watchdog_cpu(cpu) \
134 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
136 /* Global variables, exported for sysctl */
137 unsigned int __read_mostly softlockup_panic =
138 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
139 int __read_mostly soft_watchdog_enabled;
141 static u64 __read_mostly sample_period;
143 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
144 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
145 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
146 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
147 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
148 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
149 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
150 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
151 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
152 static unsigned long soft_lockup_nmi_warn;
154 static int __init softlockup_panic_setup(char *str)
156 softlockup_panic = simple_strtoul(str, NULL, 0);
159 __setup("softlockup_panic=", softlockup_panic_setup);
161 static int __init nowatchdog_setup(char *str)
163 watchdog_enabled = 0;
166 __setup("nowatchdog", nowatchdog_setup);
168 static int __init nosoftlockup_setup(char *str)
170 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
173 __setup("nosoftlockup", nosoftlockup_setup);
176 static int __init softlockup_all_cpu_backtrace_setup(char *str)
178 sysctl_softlockup_all_cpu_backtrace =
179 !!simple_strtol(str, NULL, 0);
182 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
183 #ifdef CONFIG_HARDLOCKUP_DETECTOR
184 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
186 sysctl_hardlockup_all_cpu_backtrace =
187 !!simple_strtol(str, NULL, 0);
190 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
194 static void __lockup_detector_cleanup(void);
197 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
198 * lockups can have false positives under extreme conditions. So we generally
199 * want a higher threshold for soft lockups than for hard lockups. So we couple
200 * the thresholds with a factor: we make the soft threshold twice the amount of
201 * time the hard threshold is.
203 static int get_softlockup_thresh(void)
205 return watchdog_thresh * 2;
209 * Returns seconds, approximately. We don't need nanosecond
210 * resolution, and we don't need to waste time with a big divide when
213 static unsigned long get_timestamp(void)
215 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
218 static void set_sample_period(void)
221 * convert watchdog_thresh from seconds to ns
222 * the divide by 5 is to give hrtimer several chances (two
223 * or three with the current relation between the soft
224 * and hard thresholds) to increment before the
225 * hardlockup detector generates a warning
227 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
228 watchdog_update_hrtimer_threshold(sample_period);
231 /* Commands for resetting the watchdog */
232 static void __touch_watchdog(void)
234 __this_cpu_write(watchdog_touch_ts, get_timestamp());
238 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
240 * Call when the scheduler may have stalled for legitimate reasons
241 * preventing the watchdog task from executing - e.g. the scheduler
242 * entering idle state. This should only be used for scheduler events.
243 * Use touch_softlockup_watchdog() for everything else.
245 void touch_softlockup_watchdog_sched(void)
248 * Preemption can be enabled. It doesn't matter which CPU's timestamp
249 * gets zeroed here, so use the raw_ operation.
251 raw_cpu_write(watchdog_touch_ts, 0);
254 void touch_softlockup_watchdog(void)
256 touch_softlockup_watchdog_sched();
257 wq_watchdog_touch(raw_smp_processor_id());
259 EXPORT_SYMBOL(touch_softlockup_watchdog);
261 void touch_all_softlockup_watchdogs(void)
266 * this is done lockless
267 * do we care if a 0 races with a timestamp?
268 * all it means is the softlock check starts one cycle later
270 for_each_watchdog_cpu(cpu)
271 per_cpu(watchdog_touch_ts, cpu) = 0;
272 wq_watchdog_touch(-1);
275 void touch_softlockup_watchdog_sync(void)
277 __this_cpu_write(softlockup_touch_sync, true);
278 __this_cpu_write(watchdog_touch_ts, 0);
281 static int is_softlockup(unsigned long touch_ts)
283 unsigned long now = get_timestamp();
285 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
286 /* Warn about unreasonable delays. */
287 if (time_after(now, touch_ts + get_softlockup_thresh()))
288 return now - touch_ts;
293 /* watchdog detector functions */
294 bool is_hardlockup(void)
296 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
298 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
301 __this_cpu_write(hrtimer_interrupts_saved, hrint);
305 static void watchdog_interrupt_count(void)
307 __this_cpu_inc(hrtimer_interrupts);
310 static int watchdog_enable_all_cpus(void);
311 static void watchdog_disable_all_cpus(void);
313 /* watchdog kicker functions */
314 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
316 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
317 struct pt_regs *regs = get_irq_regs();
319 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
321 if (!watchdog_enabled)
322 return HRTIMER_NORESTART;
324 /* kick the hardlockup detector */
325 watchdog_interrupt_count();
327 /* kick the softlockup detector */
328 wake_up_process(__this_cpu_read(softlockup_watchdog));
331 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
334 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
336 * If the time stamp was touched atomically
337 * make sure the scheduler tick is up to date.
339 __this_cpu_write(softlockup_touch_sync, false);
343 /* Clear the guest paused flag on watchdog reset */
344 kvm_check_and_clear_guest_paused();
346 return HRTIMER_RESTART;
349 /* check for a softlockup
350 * This is done by making sure a high priority task is
351 * being scheduled. The task touches the watchdog to
352 * indicate it is getting cpu time. If it hasn't then
353 * this is a good indication some task is hogging the cpu
355 duration = is_softlockup(touch_ts);
356 if (unlikely(duration)) {
358 * If a virtual machine is stopped by the host it can look to
359 * the watchdog like a soft lockup, check to see if the host
360 * stopped the vm before we issue the warning
362 if (kvm_check_and_clear_guest_paused())
363 return HRTIMER_RESTART;
366 if (__this_cpu_read(soft_watchdog_warn) == true) {
368 * When multiple processes are causing softlockups the
369 * softlockup detector only warns on the first one
370 * because the code relies on a full quiet cycle to
371 * re-arm. The second process prevents the quiet cycle
372 * and never gets reported. Use task pointers to detect
375 if (__this_cpu_read(softlockup_task_ptr_saved) !=
377 __this_cpu_write(soft_watchdog_warn, false);
380 return HRTIMER_RESTART;
383 if (softlockup_all_cpu_backtrace) {
384 /* Prevent multiple soft-lockup reports if one cpu is already
385 * engaged in dumping cpu back traces
387 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
388 /* Someone else will report us. Let's give up */
389 __this_cpu_write(soft_watchdog_warn, true);
390 return HRTIMER_RESTART;
394 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
395 smp_processor_id(), duration,
396 current->comm, task_pid_nr(current));
397 __this_cpu_write(softlockup_task_ptr_saved, current);
399 print_irqtrace_events(current);
405 if (softlockup_all_cpu_backtrace) {
406 /* Avoid generating two back traces for current
407 * given that one is already made above
409 trigger_allbutself_cpu_backtrace();
411 clear_bit(0, &soft_lockup_nmi_warn);
412 /* Barrier to sync with other cpus */
413 smp_mb__after_atomic();
416 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
417 if (softlockup_panic)
418 panic("softlockup: hung tasks");
419 __this_cpu_write(soft_watchdog_warn, true);
421 __this_cpu_write(soft_watchdog_warn, false);
423 return HRTIMER_RESTART;
426 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
428 struct sched_param param = { .sched_priority = prio };
430 sched_setscheduler(current, policy, ¶m);
433 static void watchdog_enable(unsigned int cpu)
435 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
438 * Start the timer first to prevent the NMI watchdog triggering
439 * before the timer has a chance to fire.
441 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
442 hrtimer->function = watchdog_timer_fn;
443 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
444 HRTIMER_MODE_REL_PINNED);
446 /* Initialize timestamp */
448 /* Enable the perf event */
449 watchdog_nmi_enable(cpu);
451 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
454 static void watchdog_disable(unsigned int cpu)
456 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
458 watchdog_set_prio(SCHED_NORMAL, 0);
460 * Disable the perf event first. That prevents that a large delay
461 * between disabling the timer and disabling the perf event causes
462 * the perf NMI to detect a false positive.
464 watchdog_nmi_disable(cpu);
465 hrtimer_cancel(hrtimer);
468 static void watchdog_cleanup(unsigned int cpu, bool online)
470 watchdog_disable(cpu);
473 static int watchdog_should_run(unsigned int cpu)
475 return __this_cpu_read(hrtimer_interrupts) !=
476 __this_cpu_read(soft_lockup_hrtimer_cnt);
480 * The watchdog thread function - touches the timestamp.
482 * It only runs once every sample_period seconds (4 seconds by
483 * default) to reset the softlockup timestamp. If this gets delayed
484 * for more than 2*watchdog_thresh seconds then the debug-printout
485 * triggers in watchdog_timer_fn().
487 static void watchdog(unsigned int cpu)
489 __this_cpu_write(soft_lockup_hrtimer_cnt,
490 __this_cpu_read(hrtimer_interrupts));
494 static struct smp_hotplug_thread watchdog_threads = {
495 .store = &softlockup_watchdog,
496 .thread_should_run = watchdog_should_run,
497 .thread_fn = watchdog,
498 .thread_comm = "watchdog/%u",
499 .setup = watchdog_enable,
500 .cleanup = watchdog_cleanup,
501 .park = watchdog_disable,
502 .unpark = watchdog_enable,
506 * park all watchdog threads that are specified in 'watchdog_cpumask'
508 * This function returns an error if kthread_park() of a watchdog thread
509 * fails. In this situation, the watchdog threads of some CPUs can already
510 * be parked and the watchdog threads of other CPUs can still be runnable.
511 * Callers are expected to handle this special condition as appropriate in
514 * This function may only be called in a context that is protected against
515 * races with CPU hotplug - for example, via get_online_cpus().
517 static int watchdog_park_threads(void)
521 for_each_watchdog_cpu(cpu) {
522 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
530 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
532 * This function may only be called in a context that is protected against
533 * races with CPU hotplug - for example, via get_online_cpus().
535 static void watchdog_unpark_threads(void)
539 for_each_watchdog_cpu(cpu)
540 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
543 static int update_watchdog_all_cpus(void)
547 ret = watchdog_park_threads();
551 watchdog_unpark_threads();
556 static int watchdog_enable_all_cpus(void)
560 if (!watchdog_running) {
561 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
564 pr_err("Failed to create watchdog threads, disabled\n");
566 watchdog_running = 1;
569 * Enable/disable the lockup detectors or
570 * change the sample period 'on the fly'.
572 err = update_watchdog_all_cpus();
575 watchdog_disable_all_cpus();
576 pr_err("Failed to update lockup detectors, disabled\n");
581 watchdog_enabled = 0;
586 static void watchdog_disable_all_cpus(void)
588 if (watchdog_running) {
589 watchdog_running = 0;
590 smpboot_unregister_percpu_thread(&watchdog_threads);
594 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
595 static inline int watchdog_park_threads(void) { return 0; }
596 static inline void watchdog_unpark_threads(void) { }
597 static inline int watchdog_enable_all_cpus(void) { return 0; }
598 static inline void watchdog_disable_all_cpus(void) { }
599 static inline void set_sample_period(void) { }
600 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
602 static void __lockup_detector_cleanup(void)
604 lockdep_assert_held(&watchdog_mutex);
605 hardlockup_detector_perf_cleanup();
609 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
611 * Caller must not hold the cpu hotplug rwsem.
613 void lockup_detector_cleanup(void)
615 mutex_lock(&watchdog_mutex);
616 __lockup_detector_cleanup();
617 mutex_unlock(&watchdog_mutex);
621 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
623 * Special interface for parisc. It prevents lockup detector warnings from
624 * the default pm_poweroff() function which busy loops forever.
626 void lockup_detector_soft_poweroff(void)
628 watchdog_enabled = 0;
634 * Update the run state of the lockup detectors.
636 static int proc_watchdog_update(void)
641 * Watchdog threads won't be started if they are already active.
642 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
643 * care of this. If those threads are already active, the sample
644 * period will be updated and the lockup detectors will be enabled
645 * or disabled 'on the fly'.
647 if (watchdog_enabled && watchdog_thresh)
648 err = watchdog_enable_all_cpus();
650 watchdog_disable_all_cpus();
652 watchdog_nmi_reconfigure();
654 __lockup_detector_cleanup();
661 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
663 * caller | table->data points to | 'which' contains the flag(s)
664 * -------------------|-----------------------|-----------------------------
665 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
666 * | | with SOFT_WATCHDOG_ENABLED
667 * -------------------|-----------------------|-----------------------------
668 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
669 * -------------------|-----------------------|-----------------------------
670 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
672 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
673 void __user *buffer, size_t *lenp, loff_t *ppos)
676 int *watchdog_param = (int *)table->data;
678 cpu_hotplug_disable();
679 mutex_lock(&watchdog_mutex);
682 * If the parameter is being read return the state of the corresponding
683 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
684 * run state of the lockup detectors.
687 *watchdog_param = (watchdog_enabled & which) != 0;
688 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
690 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
695 * There is a race window between fetching the current value
696 * from 'watchdog_enabled' and storing the new value. During
697 * this race window, watchdog_nmi_enable() can sneak in and
698 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
699 * The 'cmpxchg' detects this race and the loop retries.
702 old = watchdog_enabled;
704 * If the parameter value is not zero set the
705 * corresponding bit(s), else clear it(them).
711 } while (cmpxchg(&watchdog_enabled, old, new) != old);
714 * Update the run state of the lockup detectors. There is _no_
715 * need to check the value returned by proc_watchdog_update()
716 * and to restore the previous value of 'watchdog_enabled' as
717 * both lockup detectors are disabled if proc_watchdog_update()
723 err = proc_watchdog_update();
726 mutex_unlock(&watchdog_mutex);
727 cpu_hotplug_enable();
732 * /proc/sys/kernel/watchdog
734 int proc_watchdog(struct ctl_table *table, int write,
735 void __user *buffer, size_t *lenp, loff_t *ppos)
737 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
738 table, write, buffer, lenp, ppos);
742 * /proc/sys/kernel/nmi_watchdog
744 int proc_nmi_watchdog(struct ctl_table *table, int write,
745 void __user *buffer, size_t *lenp, loff_t *ppos)
747 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
748 table, write, buffer, lenp, ppos);
752 * /proc/sys/kernel/soft_watchdog
754 int proc_soft_watchdog(struct ctl_table *table, int write,
755 void __user *buffer, size_t *lenp, loff_t *ppos)
757 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
758 table, write, buffer, lenp, ppos);
762 * /proc/sys/kernel/watchdog_thresh
764 int proc_watchdog_thresh(struct ctl_table *table, int write,
765 void __user *buffer, size_t *lenp, loff_t *ppos)
769 cpu_hotplug_disable();
770 mutex_lock(&watchdog_mutex);
772 old = ACCESS_ONCE(watchdog_thresh);
773 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
779 * Update the sample period. Restore on failure.
781 new = ACCESS_ONCE(watchdog_thresh);
786 err = proc_watchdog_update();
788 watchdog_thresh = old;
792 mutex_unlock(&watchdog_mutex);
793 cpu_hotplug_enable();
797 static int watchdog_update_cpus(void)
799 if (IS_ENABLED(CONFIG_SOFTLOCKUP_DETECTOR)) {
800 return smpboot_update_cpumask_percpu_thread(&watchdog_threads,
807 * The cpumask is the mask of possible cpus that the watchdog can run
808 * on, not the mask of cpus it is actually running on. This allows the
809 * user to specify a mask that will include cpus that have not yet
810 * been brought online, if desired.
812 int proc_watchdog_cpumask(struct ctl_table *table, int write,
813 void __user *buffer, size_t *lenp, loff_t *ppos)
817 cpu_hotplug_disable();
818 mutex_lock(&watchdog_mutex);
820 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
822 /* Remove impossible cpus to keep sysctl output cleaner. */
823 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
826 if (watchdog_running) {
828 * Failure would be due to being unable to allocate
829 * a temporary cpumask, so we are likely not in a
830 * position to do much else to make things better.
832 if (watchdog_update_cpus() != 0)
833 pr_err("cpumask update failed\n");
836 watchdog_nmi_reconfigure();
837 __lockup_detector_cleanup();
840 mutex_unlock(&watchdog_mutex);
841 cpu_hotplug_enable();
845 #endif /* CONFIG_SYSCTL */
847 void __init lockup_detector_init(void)
851 #ifdef CONFIG_NO_HZ_FULL
852 if (tick_nohz_full_enabled()) {
853 pr_info("Disabling watchdog on nohz_full cores by default\n");
854 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
856 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
858 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
861 if (watchdog_enabled)
862 watchdog_enable_all_cpus();