2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
27 #include <asm/irq_regs.h>
28 #include <linux/kvm_para.h>
29 #include <linux/kthread.h>
31 static DEFINE_MUTEX(watchdog_proc_mutex);
33 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
34 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
36 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
38 int __read_mostly nmi_watchdog_enabled;
39 int __read_mostly soft_watchdog_enabled;
40 int __read_mostly watchdog_user_enabled;
41 int __read_mostly watchdog_thresh = 10;
44 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
45 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
47 static struct cpumask watchdog_cpumask __read_mostly;
48 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
50 /* Helper for online, unparked cpus. */
51 #define for_each_watchdog_cpu(cpu) \
52 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
54 atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
57 * The 'watchdog_running' variable is set to 1 when the watchdog threads
58 * are registered/started and is set to 0 when the watchdog threads are
59 * unregistered/stopped, so it is an indicator whether the threads exist.
61 static int __read_mostly watchdog_running;
63 * If a subsystem has a need to deactivate the watchdog temporarily, it
64 * can use the suspend/resume interface to achieve this. The content of
65 * the 'watchdog_suspended' variable reflects this state. Existing threads
66 * are parked/unparked by the lockup_detector_{suspend|resume} functions
67 * (see comment blocks pertaining to those functions for further details).
69 * 'watchdog_suspended' also prevents threads from being registered/started
70 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
71 * of 'watchdog_running' cannot change while the watchdog is deactivated
72 * temporarily (see related code in 'proc' handlers).
74 static int __read_mostly watchdog_suspended;
76 static u64 __read_mostly sample_period;
78 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
79 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
80 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
81 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
82 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
83 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
84 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
85 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
86 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
87 static unsigned long soft_lockup_nmi_warn;
89 unsigned int __read_mostly softlockup_panic =
90 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
92 static int __init softlockup_panic_setup(char *str)
94 softlockup_panic = simple_strtoul(str, NULL, 0);
98 __setup("softlockup_panic=", softlockup_panic_setup);
100 static int __init nowatchdog_setup(char *str)
102 watchdog_enabled = 0;
105 __setup("nowatchdog", nowatchdog_setup);
107 static int __init nosoftlockup_setup(char *str)
109 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
112 __setup("nosoftlockup", nosoftlockup_setup);
115 static int __init softlockup_all_cpu_backtrace_setup(char *str)
117 sysctl_softlockup_all_cpu_backtrace =
118 !!simple_strtol(str, NULL, 0);
121 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
122 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
124 sysctl_hardlockup_all_cpu_backtrace =
125 !!simple_strtol(str, NULL, 0);
128 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
132 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
133 * lockups can have false positives under extreme conditions. So we generally
134 * want a higher threshold for soft lockups than for hard lockups. So we couple
135 * the thresholds with a factor: we make the soft threshold twice the amount of
136 * time the hard threshold is.
138 static int get_softlockup_thresh(void)
140 return watchdog_thresh * 2;
144 * Returns seconds, approximately. We don't need nanosecond
145 * resolution, and we don't need to waste time with a big divide when
148 static unsigned long get_timestamp(void)
150 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
153 static void set_sample_period(void)
156 * convert watchdog_thresh from seconds to ns
157 * the divide by 5 is to give hrtimer several chances (two
158 * or three with the current relation between the soft
159 * and hard thresholds) to increment before the
160 * hardlockup detector generates a warning
162 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
165 /* Commands for resetting the watchdog */
166 static void __touch_watchdog(void)
168 __this_cpu_write(watchdog_touch_ts, get_timestamp());
172 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
174 * Call when the scheduler may have stalled for legitimate reasons
175 * preventing the watchdog task from executing - e.g. the scheduler
176 * entering idle state. This should only be used for scheduler events.
177 * Use touch_softlockup_watchdog() for everything else.
179 void touch_softlockup_watchdog_sched(void)
182 * Preemption can be enabled. It doesn't matter which CPU's timestamp
183 * gets zeroed here, so use the raw_ operation.
185 raw_cpu_write(watchdog_touch_ts, 0);
188 void touch_softlockup_watchdog(void)
190 touch_softlockup_watchdog_sched();
191 wq_watchdog_touch(raw_smp_processor_id());
193 EXPORT_SYMBOL(touch_softlockup_watchdog);
195 void touch_all_softlockup_watchdogs(void)
200 * this is done lockless
201 * do we care if a 0 races with a timestamp?
202 * all it means is the softlock check starts one cycle later
204 for_each_watchdog_cpu(cpu)
205 per_cpu(watchdog_touch_ts, cpu) = 0;
206 wq_watchdog_touch(-1);
209 void touch_softlockup_watchdog_sync(void)
211 __this_cpu_write(softlockup_touch_sync, true);
212 __this_cpu_write(watchdog_touch_ts, 0);
215 /* watchdog detector functions */
216 bool is_hardlockup(void)
218 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
220 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
223 __this_cpu_write(hrtimer_interrupts_saved, hrint);
227 static int is_softlockup(unsigned long touch_ts)
229 unsigned long now = get_timestamp();
231 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
232 /* Warn about unreasonable delays. */
233 if (time_after(now, touch_ts + get_softlockup_thresh()))
234 return now - touch_ts;
239 static void watchdog_interrupt_count(void)
241 __this_cpu_inc(hrtimer_interrupts);
245 * These two functions are mostly architecture specific
246 * defining them as weak here.
248 int __weak watchdog_nmi_enable(unsigned int cpu)
252 void __weak watchdog_nmi_disable(unsigned int cpu)
256 static int watchdog_enable_all_cpus(void);
257 static void watchdog_disable_all_cpus(void);
259 /* watchdog kicker functions */
260 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
262 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
263 struct pt_regs *regs = get_irq_regs();
265 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
267 if (atomic_read(&watchdog_park_in_progress) != 0)
268 return HRTIMER_NORESTART;
270 /* kick the hardlockup detector */
271 watchdog_interrupt_count();
273 /* kick the softlockup detector */
274 wake_up_process(__this_cpu_read(softlockup_watchdog));
277 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
280 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
282 * If the time stamp was touched atomically
283 * make sure the scheduler tick is up to date.
285 __this_cpu_write(softlockup_touch_sync, false);
289 /* Clear the guest paused flag on watchdog reset */
290 kvm_check_and_clear_guest_paused();
292 return HRTIMER_RESTART;
295 /* check for a softlockup
296 * This is done by making sure a high priority task is
297 * being scheduled. The task touches the watchdog to
298 * indicate it is getting cpu time. If it hasn't then
299 * this is a good indication some task is hogging the cpu
301 duration = is_softlockup(touch_ts);
302 if (unlikely(duration)) {
304 * If a virtual machine is stopped by the host it can look to
305 * the watchdog like a soft lockup, check to see if the host
306 * stopped the vm before we issue the warning
308 if (kvm_check_and_clear_guest_paused())
309 return HRTIMER_RESTART;
312 if (__this_cpu_read(soft_watchdog_warn) == true) {
314 * When multiple processes are causing softlockups the
315 * softlockup detector only warns on the first one
316 * because the code relies on a full quiet cycle to
317 * re-arm. The second process prevents the quiet cycle
318 * and never gets reported. Use task pointers to detect
321 if (__this_cpu_read(softlockup_task_ptr_saved) !=
323 __this_cpu_write(soft_watchdog_warn, false);
326 return HRTIMER_RESTART;
329 if (softlockup_all_cpu_backtrace) {
330 /* Prevent multiple soft-lockup reports if one cpu is already
331 * engaged in dumping cpu back traces
333 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
334 /* Someone else will report us. Let's give up */
335 __this_cpu_write(soft_watchdog_warn, true);
336 return HRTIMER_RESTART;
340 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
341 smp_processor_id(), duration,
342 current->comm, task_pid_nr(current));
343 __this_cpu_write(softlockup_task_ptr_saved, current);
345 print_irqtrace_events(current);
351 if (softlockup_all_cpu_backtrace) {
352 /* Avoid generating two back traces for current
353 * given that one is already made above
355 trigger_allbutself_cpu_backtrace();
357 clear_bit(0, &soft_lockup_nmi_warn);
358 /* Barrier to sync with other cpus */
359 smp_mb__after_atomic();
362 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
363 if (softlockup_panic)
364 panic("softlockup: hung tasks");
365 __this_cpu_write(soft_watchdog_warn, true);
367 __this_cpu_write(soft_watchdog_warn, false);
369 return HRTIMER_RESTART;
372 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
374 struct sched_param param = { .sched_priority = prio };
376 sched_setscheduler(current, policy, ¶m);
379 static void watchdog_enable(unsigned int cpu)
381 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
383 /* kick off the timer for the hardlockup detector */
384 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
385 hrtimer->function = watchdog_timer_fn;
387 /* Enable the perf event */
388 watchdog_nmi_enable(cpu);
390 /* done here because hrtimer_start can only pin to smp_processor_id() */
391 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
392 HRTIMER_MODE_REL_PINNED);
394 /* initialize timestamp */
395 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
399 static void watchdog_disable(unsigned int cpu)
401 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
403 watchdog_set_prio(SCHED_NORMAL, 0);
404 hrtimer_cancel(hrtimer);
405 /* disable the perf event */
406 watchdog_nmi_disable(cpu);
409 static void watchdog_cleanup(unsigned int cpu, bool online)
411 watchdog_disable(cpu);
414 static int watchdog_should_run(unsigned int cpu)
416 return __this_cpu_read(hrtimer_interrupts) !=
417 __this_cpu_read(soft_lockup_hrtimer_cnt);
421 * The watchdog thread function - touches the timestamp.
423 * It only runs once every sample_period seconds (4 seconds by
424 * default) to reset the softlockup timestamp. If this gets delayed
425 * for more than 2*watchdog_thresh seconds then the debug-printout
426 * triggers in watchdog_timer_fn().
428 static void watchdog(unsigned int cpu)
430 __this_cpu_write(soft_lockup_hrtimer_cnt,
431 __this_cpu_read(hrtimer_interrupts));
435 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
436 * failure path. Check for failures that can occur asynchronously -
437 * for example, when CPUs are on-lined - and shut down the hardware
438 * perf event on each CPU accordingly.
440 * The only non-obvious place this bit can be cleared is through
441 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
442 * pr_info here would be too noisy as it would result in a message
443 * every few seconds if the hardlockup was disabled but the softlockup
446 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
447 watchdog_nmi_disable(cpu);
450 static struct smp_hotplug_thread watchdog_threads = {
451 .store = &softlockup_watchdog,
452 .thread_should_run = watchdog_should_run,
453 .thread_fn = watchdog,
454 .thread_comm = "watchdog/%u",
455 .setup = watchdog_enable,
456 .cleanup = watchdog_cleanup,
457 .park = watchdog_disable,
458 .unpark = watchdog_enable,
462 * park all watchdog threads that are specified in 'watchdog_cpumask'
464 * This function returns an error if kthread_park() of a watchdog thread
465 * fails. In this situation, the watchdog threads of some CPUs can already
466 * be parked and the watchdog threads of other CPUs can still be runnable.
467 * Callers are expected to handle this special condition as appropriate in
470 * This function may only be called in a context that is protected against
471 * races with CPU hotplug - for example, via get_online_cpus().
473 static int watchdog_park_threads(void)
477 atomic_set(&watchdog_park_in_progress, 1);
479 for_each_watchdog_cpu(cpu) {
480 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
485 atomic_set(&watchdog_park_in_progress, 0);
491 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
493 * This function may only be called in a context that is protected against
494 * races with CPU hotplug - for example, via get_online_cpus().
496 static void watchdog_unpark_threads(void)
500 for_each_watchdog_cpu(cpu)
501 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
505 * Suspend the hard and soft lockup detector by parking the watchdog threads.
507 int lockup_detector_suspend(void)
512 mutex_lock(&watchdog_proc_mutex);
514 * Multiple suspend requests can be active in parallel (counted by
515 * the 'watchdog_suspended' variable). If the watchdog threads are
516 * running, the first caller takes care that they will be parked.
517 * The state of 'watchdog_running' cannot change while a suspend
518 * request is active (see related code in 'proc' handlers).
520 if (watchdog_running && !watchdog_suspended)
521 ret = watchdog_park_threads();
524 watchdog_suspended++;
526 watchdog_disable_all_cpus();
527 pr_err("Failed to suspend lockup detectors, disabled\n");
528 watchdog_enabled = 0;
531 mutex_unlock(&watchdog_proc_mutex);
537 * Resume the hard and soft lockup detector by unparking the watchdog threads.
539 void lockup_detector_resume(void)
541 mutex_lock(&watchdog_proc_mutex);
543 watchdog_suspended--;
545 * The watchdog threads are unparked if they were previously running
546 * and if there is no more active suspend request.
548 if (watchdog_running && !watchdog_suspended)
549 watchdog_unpark_threads();
551 mutex_unlock(&watchdog_proc_mutex);
555 static int update_watchdog_all_cpus(void)
559 ret = watchdog_park_threads();
563 watchdog_unpark_threads();
568 static int watchdog_enable_all_cpus(void)
572 if (!watchdog_running) {
573 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
576 pr_err("Failed to create watchdog threads, disabled\n");
578 watchdog_running = 1;
581 * Enable/disable the lockup detectors or
582 * change the sample period 'on the fly'.
584 err = update_watchdog_all_cpus();
587 watchdog_disable_all_cpus();
588 pr_err("Failed to update lockup detectors, disabled\n");
593 watchdog_enabled = 0;
598 static void watchdog_disable_all_cpus(void)
600 if (watchdog_running) {
601 watchdog_running = 0;
602 smpboot_unregister_percpu_thread(&watchdog_threads);
609 * Update the run state of the lockup detectors.
611 static int proc_watchdog_update(void)
616 * Watchdog threads won't be started if they are already active.
617 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
618 * care of this. If those threads are already active, the sample
619 * period will be updated and the lockup detectors will be enabled
620 * or disabled 'on the fly'.
622 if (watchdog_enabled && watchdog_thresh)
623 err = watchdog_enable_all_cpus();
625 watchdog_disable_all_cpus();
632 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
634 * caller | table->data points to | 'which' contains the flag(s)
635 * -------------------|-----------------------|-----------------------------
636 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
637 * | | with SOFT_WATCHDOG_ENABLED
638 * -------------------|-----------------------|-----------------------------
639 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
640 * -------------------|-----------------------|-----------------------------
641 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
643 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
644 void __user *buffer, size_t *lenp, loff_t *ppos)
647 int *watchdog_param = (int *)table->data;
650 mutex_lock(&watchdog_proc_mutex);
652 if (watchdog_suspended) {
653 /* no parameter changes allowed while watchdog is suspended */
659 * If the parameter is being read return the state of the corresponding
660 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
661 * run state of the lockup detectors.
664 *watchdog_param = (watchdog_enabled & which) != 0;
665 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
667 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
672 * There is a race window between fetching the current value
673 * from 'watchdog_enabled' and storing the new value. During
674 * this race window, watchdog_nmi_enable() can sneak in and
675 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
676 * The 'cmpxchg' detects this race and the loop retries.
679 old = watchdog_enabled;
681 * If the parameter value is not zero set the
682 * corresponding bit(s), else clear it(them).
688 } while (cmpxchg(&watchdog_enabled, old, new) != old);
691 * Update the run state of the lockup detectors. There is _no_
692 * need to check the value returned by proc_watchdog_update()
693 * and to restore the previous value of 'watchdog_enabled' as
694 * both lockup detectors are disabled if proc_watchdog_update()
700 err = proc_watchdog_update();
703 mutex_unlock(&watchdog_proc_mutex);
709 * /proc/sys/kernel/watchdog
711 int proc_watchdog(struct ctl_table *table, int write,
712 void __user *buffer, size_t *lenp, loff_t *ppos)
714 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
715 table, write, buffer, lenp, ppos);
719 * /proc/sys/kernel/nmi_watchdog
721 int proc_nmi_watchdog(struct ctl_table *table, int write,
722 void __user *buffer, size_t *lenp, loff_t *ppos)
724 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
725 table, write, buffer, lenp, ppos);
729 * /proc/sys/kernel/soft_watchdog
731 int proc_soft_watchdog(struct ctl_table *table, int write,
732 void __user *buffer, size_t *lenp, loff_t *ppos)
734 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
735 table, write, buffer, lenp, ppos);
739 * /proc/sys/kernel/watchdog_thresh
741 int proc_watchdog_thresh(struct ctl_table *table, int write,
742 void __user *buffer, size_t *lenp, loff_t *ppos)
747 mutex_lock(&watchdog_proc_mutex);
749 if (watchdog_suspended) {
750 /* no parameter changes allowed while watchdog is suspended */
755 old = ACCESS_ONCE(watchdog_thresh);
756 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
762 * Update the sample period. Restore on failure.
764 new = ACCESS_ONCE(watchdog_thresh);
769 err = proc_watchdog_update();
771 watchdog_thresh = old;
775 mutex_unlock(&watchdog_proc_mutex);
781 * The cpumask is the mask of possible cpus that the watchdog can run
782 * on, not the mask of cpus it is actually running on. This allows the
783 * user to specify a mask that will include cpus that have not yet
784 * been brought online, if desired.
786 int proc_watchdog_cpumask(struct ctl_table *table, int write,
787 void __user *buffer, size_t *lenp, loff_t *ppos)
792 mutex_lock(&watchdog_proc_mutex);
794 if (watchdog_suspended) {
795 /* no parameter changes allowed while watchdog is suspended */
800 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
802 /* Remove impossible cpus to keep sysctl output cleaner. */
803 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
806 if (watchdog_running) {
808 * Failure would be due to being unable to allocate
809 * a temporary cpumask, so we are likely not in a
810 * position to do much else to make things better.
812 if (smpboot_update_cpumask_percpu_thread(
813 &watchdog_threads, &watchdog_cpumask) != 0)
814 pr_err("cpumask update failed\n");
818 mutex_unlock(&watchdog_proc_mutex);
823 #endif /* CONFIG_SYSCTL */
825 void __init lockup_detector_init(void)
829 #ifdef CONFIG_NO_HZ_FULL
830 if (tick_nohz_full_enabled()) {
831 pr_info("Disabling watchdog on nohz_full cores by default\n");
832 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
834 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
836 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
839 if (watchdog_enabled)
840 watchdog_enable_all_cpus();