OSDN Git Service

watchdog/sysctl: Clean up sysctl variable name space
[uclinux-h8/linux.git] / kernel / watchdog.c
1 /*
2  * Detect hard and soft lockups on a system
3  *
4  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  * Note: Most of this code is borrowed heavily from the original softlockup
7  * detector, so thanks to Ingo for the initial implementation.
8  * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9  * to those contributors as well.
10  */
11
12 #define pr_fmt(fmt) "watchdog: " fmt
13
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched/debug.h>
27
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/kthread.h>
31
32 static DEFINE_MUTEX(watchdog_mutex);
33
34 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
35 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
36                                                 NMI_WATCHDOG_ENABLED;
37 #else
38 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
39 #endif
40
41 int __read_mostly nmi_watchdog_user_enabled;
42 int __read_mostly soft_watchdog_user_enabled;
43 int __read_mostly watchdog_user_enabled;
44 int __read_mostly watchdog_thresh = 10;
45
46 struct cpumask watchdog_allowed_mask __read_mostly;
47 static bool softlockup_threads_initialized __read_mostly;
48
49 struct cpumask watchdog_cpumask __read_mostly;
50 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
51
52 #ifdef CONFIG_HARDLOCKUP_DETECTOR
53 /*
54  * Should we panic when a soft-lockup or hard-lockup occurs:
55  */
56 unsigned int __read_mostly hardlockup_panic =
57                         CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
58 /*
59  * We may not want to enable hard lockup detection by default in all cases,
60  * for example when running the kernel as a guest on a hypervisor. In these
61  * cases this function can be called to disable hard lockup detection. This
62  * function should only be executed once by the boot processor before the
63  * kernel command line parameters are parsed, because otherwise it is not
64  * possible to override this in hardlockup_panic_setup().
65  */
66 void __init hardlockup_detector_disable(void)
67 {
68         watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
69 }
70
71 static int __init hardlockup_panic_setup(char *str)
72 {
73         if (!strncmp(str, "panic", 5))
74                 hardlockup_panic = 1;
75         else if (!strncmp(str, "nopanic", 7))
76                 hardlockup_panic = 0;
77         else if (!strncmp(str, "0", 1))
78                 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
79         else if (!strncmp(str, "1", 1))
80                 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
81         return 1;
82 }
83 __setup("nmi_watchdog=", hardlockup_panic_setup);
84
85 # ifdef CONFIG_SMP
86 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
87
88 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
89 {
90         sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
91         return 1;
92 }
93 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
94 # endif /* CONFIG_SMP */
95 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
96
97 /*
98  * These functions can be overridden if an architecture implements its
99  * own hardlockup detector.
100  *
101  * watchdog_nmi_enable/disable can be implemented to start and stop when
102  * softlockup watchdog threads start and stop. The arch must select the
103  * SOFTLOCKUP_DETECTOR Kconfig.
104  */
105 int __weak watchdog_nmi_enable(unsigned int cpu)
106 {
107         return 0;
108 }
109
110 void __weak watchdog_nmi_disable(unsigned int cpu)
111 {
112         hardlockup_detector_perf_disable();
113 }
114
115 /*
116  * watchdog_nmi_reconfigure can be implemented to be notified after any
117  * watchdog configuration change. The arch hardlockup watchdog should
118  * respond to the following variables:
119  * - watchdog_enabled
120  * - watchdog_thresh
121  * - watchdog_cpumask
122  * - sysctl_hardlockup_all_cpu_backtrace
123  * - hardlockup_panic
124  */
125 void __weak watchdog_nmi_reconfigure(void) { }
126
127 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
128
129 /* Global variables, exported for sysctl */
130 unsigned int __read_mostly softlockup_panic =
131                         CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
132
133 static u64 __read_mostly sample_period;
134
135 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
136 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
137 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
138 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
139 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
140 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
141 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
142 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
143 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
144 static unsigned long soft_lockup_nmi_warn;
145
146 static int __init softlockup_panic_setup(char *str)
147 {
148         softlockup_panic = simple_strtoul(str, NULL, 0);
149         return 1;
150 }
151 __setup("softlockup_panic=", softlockup_panic_setup);
152
153 static int __init nowatchdog_setup(char *str)
154 {
155         watchdog_enabled = 0;
156         return 1;
157 }
158 __setup("nowatchdog", nowatchdog_setup);
159
160 static int __init nosoftlockup_setup(char *str)
161 {
162         watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
163         return 1;
164 }
165 __setup("nosoftlockup", nosoftlockup_setup);
166
167 #ifdef CONFIG_SMP
168 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
169
170 static int __init softlockup_all_cpu_backtrace_setup(char *str)
171 {
172         sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
173         return 1;
174 }
175 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
176 #endif
177
178 static void __lockup_detector_cleanup(void);
179
180 /*
181  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
182  * lockups can have false positives under extreme conditions. So we generally
183  * want a higher threshold for soft lockups than for hard lockups. So we couple
184  * the thresholds with a factor: we make the soft threshold twice the amount of
185  * time the hard threshold is.
186  */
187 static int get_softlockup_thresh(void)
188 {
189         return watchdog_thresh * 2;
190 }
191
192 /*
193  * Returns seconds, approximately.  We don't need nanosecond
194  * resolution, and we don't need to waste time with a big divide when
195  * 2^30ns == 1.074s.
196  */
197 static unsigned long get_timestamp(void)
198 {
199         return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
200 }
201
202 static void set_sample_period(void)
203 {
204         /*
205          * convert watchdog_thresh from seconds to ns
206          * the divide by 5 is to give hrtimer several chances (two
207          * or three with the current relation between the soft
208          * and hard thresholds) to increment before the
209          * hardlockup detector generates a warning
210          */
211         sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
212         watchdog_update_hrtimer_threshold(sample_period);
213 }
214
215 /* Commands for resetting the watchdog */
216 static void __touch_watchdog(void)
217 {
218         __this_cpu_write(watchdog_touch_ts, get_timestamp());
219 }
220
221 /**
222  * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
223  *
224  * Call when the scheduler may have stalled for legitimate reasons
225  * preventing the watchdog task from executing - e.g. the scheduler
226  * entering idle state.  This should only be used for scheduler events.
227  * Use touch_softlockup_watchdog() for everything else.
228  */
229 void touch_softlockup_watchdog_sched(void)
230 {
231         /*
232          * Preemption can be enabled.  It doesn't matter which CPU's timestamp
233          * gets zeroed here, so use the raw_ operation.
234          */
235         raw_cpu_write(watchdog_touch_ts, 0);
236 }
237
238 void touch_softlockup_watchdog(void)
239 {
240         touch_softlockup_watchdog_sched();
241         wq_watchdog_touch(raw_smp_processor_id());
242 }
243 EXPORT_SYMBOL(touch_softlockup_watchdog);
244
245 void touch_all_softlockup_watchdogs(void)
246 {
247         int cpu;
248
249         /*
250          * watchdog_mutex cannpt be taken here, as this might be called
251          * from (soft)interrupt context, so the access to
252          * watchdog_allowed_cpumask might race with a concurrent update.
253          *
254          * The watchdog time stamp can race against a concurrent real
255          * update as well, the only side effect might be a cycle delay for
256          * the softlockup check.
257          */
258         for_each_cpu(cpu, &watchdog_allowed_mask)
259                 per_cpu(watchdog_touch_ts, cpu) = 0;
260         wq_watchdog_touch(-1);
261 }
262
263 void touch_softlockup_watchdog_sync(void)
264 {
265         __this_cpu_write(softlockup_touch_sync, true);
266         __this_cpu_write(watchdog_touch_ts, 0);
267 }
268
269 static int is_softlockup(unsigned long touch_ts)
270 {
271         unsigned long now = get_timestamp();
272
273         if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
274                 /* Warn about unreasonable delays. */
275                 if (time_after(now, touch_ts + get_softlockup_thresh()))
276                         return now - touch_ts;
277         }
278         return 0;
279 }
280
281 /* watchdog detector functions */
282 bool is_hardlockup(void)
283 {
284         unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
285
286         if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
287                 return true;
288
289         __this_cpu_write(hrtimer_interrupts_saved, hrint);
290         return false;
291 }
292
293 static void watchdog_interrupt_count(void)
294 {
295         __this_cpu_inc(hrtimer_interrupts);
296 }
297
298 /* watchdog kicker functions */
299 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
300 {
301         unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
302         struct pt_regs *regs = get_irq_regs();
303         int duration;
304         int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
305
306         if (!watchdog_enabled)
307                 return HRTIMER_NORESTART;
308
309         /* kick the hardlockup detector */
310         watchdog_interrupt_count();
311
312         /* kick the softlockup detector */
313         wake_up_process(__this_cpu_read(softlockup_watchdog));
314
315         /* .. and repeat */
316         hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
317
318         if (touch_ts == 0) {
319                 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
320                         /*
321                          * If the time stamp was touched atomically
322                          * make sure the scheduler tick is up to date.
323                          */
324                         __this_cpu_write(softlockup_touch_sync, false);
325                         sched_clock_tick();
326                 }
327
328                 /* Clear the guest paused flag on watchdog reset */
329                 kvm_check_and_clear_guest_paused();
330                 __touch_watchdog();
331                 return HRTIMER_RESTART;
332         }
333
334         /* check for a softlockup
335          * This is done by making sure a high priority task is
336          * being scheduled.  The task touches the watchdog to
337          * indicate it is getting cpu time.  If it hasn't then
338          * this is a good indication some task is hogging the cpu
339          */
340         duration = is_softlockup(touch_ts);
341         if (unlikely(duration)) {
342                 /*
343                  * If a virtual machine is stopped by the host it can look to
344                  * the watchdog like a soft lockup, check to see if the host
345                  * stopped the vm before we issue the warning
346                  */
347                 if (kvm_check_and_clear_guest_paused())
348                         return HRTIMER_RESTART;
349
350                 /* only warn once */
351                 if (__this_cpu_read(soft_watchdog_warn) == true) {
352                         /*
353                          * When multiple processes are causing softlockups the
354                          * softlockup detector only warns on the first one
355                          * because the code relies on a full quiet cycle to
356                          * re-arm.  The second process prevents the quiet cycle
357                          * and never gets reported.  Use task pointers to detect
358                          * this.
359                          */
360                         if (__this_cpu_read(softlockup_task_ptr_saved) !=
361                             current) {
362                                 __this_cpu_write(soft_watchdog_warn, false);
363                                 __touch_watchdog();
364                         }
365                         return HRTIMER_RESTART;
366                 }
367
368                 if (softlockup_all_cpu_backtrace) {
369                         /* Prevent multiple soft-lockup reports if one cpu is already
370                          * engaged in dumping cpu back traces
371                          */
372                         if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
373                                 /* Someone else will report us. Let's give up */
374                                 __this_cpu_write(soft_watchdog_warn, true);
375                                 return HRTIMER_RESTART;
376                         }
377                 }
378
379                 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
380                         smp_processor_id(), duration,
381                         current->comm, task_pid_nr(current));
382                 __this_cpu_write(softlockup_task_ptr_saved, current);
383                 print_modules();
384                 print_irqtrace_events(current);
385                 if (regs)
386                         show_regs(regs);
387                 else
388                         dump_stack();
389
390                 if (softlockup_all_cpu_backtrace) {
391                         /* Avoid generating two back traces for current
392                          * given that one is already made above
393                          */
394                         trigger_allbutself_cpu_backtrace();
395
396                         clear_bit(0, &soft_lockup_nmi_warn);
397                         /* Barrier to sync with other cpus */
398                         smp_mb__after_atomic();
399                 }
400
401                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
402                 if (softlockup_panic)
403                         panic("softlockup: hung tasks");
404                 __this_cpu_write(soft_watchdog_warn, true);
405         } else
406                 __this_cpu_write(soft_watchdog_warn, false);
407
408         return HRTIMER_RESTART;
409 }
410
411 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
412 {
413         struct sched_param param = { .sched_priority = prio };
414
415         sched_setscheduler(current, policy, &param);
416 }
417
418 static void watchdog_enable(unsigned int cpu)
419 {
420         struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
421
422         /*
423          * Start the timer first to prevent the NMI watchdog triggering
424          * before the timer has a chance to fire.
425          */
426         hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
427         hrtimer->function = watchdog_timer_fn;
428         hrtimer_start(hrtimer, ns_to_ktime(sample_period),
429                       HRTIMER_MODE_REL_PINNED);
430
431         /* Initialize timestamp */
432         __touch_watchdog();
433         /* Enable the perf event */
434         watchdog_nmi_enable(cpu);
435
436         watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
437 }
438
439 static void watchdog_disable(unsigned int cpu)
440 {
441         struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
442
443         watchdog_set_prio(SCHED_NORMAL, 0);
444         /*
445          * Disable the perf event first. That prevents that a large delay
446          * between disabling the timer and disabling the perf event causes
447          * the perf NMI to detect a false positive.
448          */
449         watchdog_nmi_disable(cpu);
450         hrtimer_cancel(hrtimer);
451 }
452
453 static void watchdog_cleanup(unsigned int cpu, bool online)
454 {
455         watchdog_disable(cpu);
456 }
457
458 static int watchdog_should_run(unsigned int cpu)
459 {
460         return __this_cpu_read(hrtimer_interrupts) !=
461                 __this_cpu_read(soft_lockup_hrtimer_cnt);
462 }
463
464 /*
465  * The watchdog thread function - touches the timestamp.
466  *
467  * It only runs once every sample_period seconds (4 seconds by
468  * default) to reset the softlockup timestamp. If this gets delayed
469  * for more than 2*watchdog_thresh seconds then the debug-printout
470  * triggers in watchdog_timer_fn().
471  */
472 static void watchdog(unsigned int cpu)
473 {
474         __this_cpu_write(soft_lockup_hrtimer_cnt,
475                          __this_cpu_read(hrtimer_interrupts));
476         __touch_watchdog();
477 }
478
479 static struct smp_hotplug_thread watchdog_threads = {
480         .store                  = &softlockup_watchdog,
481         .thread_should_run      = watchdog_should_run,
482         .thread_fn              = watchdog,
483         .thread_comm            = "watchdog/%u",
484         .setup                  = watchdog_enable,
485         .cleanup                = watchdog_cleanup,
486         .park                   = watchdog_disable,
487         .unpark                 = watchdog_enable,
488 };
489
490 static void softlockup_update_smpboot_threads(void)
491 {
492         lockdep_assert_held(&watchdog_mutex);
493
494         if (!softlockup_threads_initialized)
495                 return;
496
497         smpboot_update_cpumask_percpu_thread(&watchdog_threads,
498                                              &watchdog_allowed_mask);
499         __lockup_detector_cleanup();
500 }
501
502 /* Temporarily park all watchdog threads */
503 static void softlockup_park_all_threads(void)
504 {
505         cpumask_clear(&watchdog_allowed_mask);
506         softlockup_update_smpboot_threads();
507 }
508
509 /* Unpark enabled threads */
510 static void softlockup_unpark_threads(void)
511 {
512         cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
513         softlockup_update_smpboot_threads();
514 }
515
516 static void softlockup_reconfigure_threads(bool enabled)
517 {
518         softlockup_park_all_threads();
519         set_sample_period();
520         if (enabled)
521                 softlockup_unpark_threads();
522 }
523
524 /*
525  * Create the watchdog thread infrastructure.
526  *
527  * The threads are not unparked as watchdog_allowed_mask is empty.  When
528  * the threads are sucessfully initialized, take the proper locks and
529  * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
530  */
531 static __init void softlockup_init_threads(void)
532 {
533         int ret;
534
535         /*
536          * If sysctl is off and watchdog got disabled on the command line,
537          * nothing to do here.
538          */
539         if (!IS_ENABLED(CONFIG_SYSCTL) &&
540             !(watchdog_enabled && watchdog_thresh))
541                 return;
542
543         ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
544                                                      &watchdog_allowed_mask);
545         if (ret) {
546                 pr_err("Failed to initialize soft lockup detector threads\n");
547                 return;
548         }
549
550         mutex_lock(&watchdog_mutex);
551         softlockup_threads_initialized = true;
552         softlockup_reconfigure_threads(watchdog_enabled && watchdog_thresh);
553         mutex_unlock(&watchdog_mutex);
554 }
555
556 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
557 static inline int watchdog_park_threads(void) { return 0; }
558 static inline void watchdog_unpark_threads(void) { }
559 static inline int watchdog_enable_all_cpus(void) { return 0; }
560 static inline void watchdog_disable_all_cpus(void) { }
561 static inline void softlockup_init_threads(void) { }
562 static inline void softlockup_reconfigure_threads(bool enabled) { }
563 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
564
565 static void __lockup_detector_cleanup(void)
566 {
567         lockdep_assert_held(&watchdog_mutex);
568         hardlockup_detector_perf_cleanup();
569 }
570
571 /**
572  * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
573  *
574  * Caller must not hold the cpu hotplug rwsem.
575  */
576 void lockup_detector_cleanup(void)
577 {
578         mutex_lock(&watchdog_mutex);
579         __lockup_detector_cleanup();
580         mutex_unlock(&watchdog_mutex);
581 }
582
583 /**
584  * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
585  *
586  * Special interface for parisc. It prevents lockup detector warnings from
587  * the default pm_poweroff() function which busy loops forever.
588  */
589 void lockup_detector_soft_poweroff(void)
590 {
591         watchdog_enabled = 0;
592 }
593
594 #ifdef CONFIG_SYSCTL
595
596 /* Propagate any changes to the watchdog threads */
597 static void proc_watchdog_update(void)
598 {
599         /* Remove impossible cpus to keep sysctl output clean. */
600         cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
601         softlockup_reconfigure_threads(watchdog_enabled && watchdog_thresh);
602         watchdog_nmi_reconfigure();
603 }
604
605 /*
606  * common function for watchdog, nmi_watchdog and soft_watchdog parameter
607  *
608  * caller             | table->data points to      | 'which'
609  * -------------------|----------------------------|--------------------------
610  * proc_watchdog      | watchdog_user_enabled      | NMI_WATCHDOG_ENABLED |
611  *                    |                            | SOFT_WATCHDOG_ENABLED
612  * -------------------|----------------------------|--------------------------
613  * proc_nmi_watchdog  | nmi_watchdog_user_enabled  | NMI_WATCHDOG_ENABLED
614  * -------------------|----------------------------|--------------------------
615  * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
616  */
617 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
618                                 void __user *buffer, size_t *lenp, loff_t *ppos)
619 {
620         int err, old, new;
621         int *watchdog_param = (int *)table->data;
622
623         cpu_hotplug_disable();
624         mutex_lock(&watchdog_mutex);
625
626         /*
627          * If the parameter is being read return the state of the corresponding
628          * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
629          * run state of the lockup detectors.
630          */
631         if (!write) {
632                 *watchdog_param = (watchdog_enabled & which) != 0;
633                 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
634         } else {
635                 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
636                 if (err)
637                         goto out;
638
639                 /*
640                  * There is a race window between fetching the current value
641                  * from 'watchdog_enabled' and storing the new value. During
642                  * this race window, watchdog_nmi_enable() can sneak in and
643                  * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
644                  * The 'cmpxchg' detects this race and the loop retries.
645                  */
646                 do {
647                         old = watchdog_enabled;
648                         /*
649                          * If the parameter value is not zero set the
650                          * corresponding bit(s), else clear it(them).
651                          */
652                         if (*watchdog_param)
653                                 new = old | which;
654                         else
655                                 new = old & ~which;
656                 } while (cmpxchg(&watchdog_enabled, old, new) != old);
657
658                 if (old != new)
659                         proc_watchdog_update();
660         }
661 out:
662         mutex_unlock(&watchdog_mutex);
663         cpu_hotplug_enable();
664         return err;
665 }
666
667 /*
668  * /proc/sys/kernel/watchdog
669  */
670 int proc_watchdog(struct ctl_table *table, int write,
671                   void __user *buffer, size_t *lenp, loff_t *ppos)
672 {
673         return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
674                                     table, write, buffer, lenp, ppos);
675 }
676
677 /*
678  * /proc/sys/kernel/nmi_watchdog
679  */
680 int proc_nmi_watchdog(struct ctl_table *table, int write,
681                       void __user *buffer, size_t *lenp, loff_t *ppos)
682 {
683         return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
684                                     table, write, buffer, lenp, ppos);
685 }
686
687 /*
688  * /proc/sys/kernel/soft_watchdog
689  */
690 int proc_soft_watchdog(struct ctl_table *table, int write,
691                         void __user *buffer, size_t *lenp, loff_t *ppos)
692 {
693         return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
694                                     table, write, buffer, lenp, ppos);
695 }
696
697 /*
698  * /proc/sys/kernel/watchdog_thresh
699  */
700 int proc_watchdog_thresh(struct ctl_table *table, int write,
701                          void __user *buffer, size_t *lenp, loff_t *ppos)
702 {
703         int err, old;
704
705         cpu_hotplug_disable();
706         mutex_lock(&watchdog_mutex);
707
708         old = READ_ONCE(watchdog_thresh);
709         err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
710
711         if (!err && write && old != READ_ONCE(watchdog_thresh))
712                 proc_watchdog_update();
713
714         mutex_unlock(&watchdog_mutex);
715         cpu_hotplug_enable();
716         return err;
717 }
718
719 /*
720  * The cpumask is the mask of possible cpus that the watchdog can run
721  * on, not the mask of cpus it is actually running on.  This allows the
722  * user to specify a mask that will include cpus that have not yet
723  * been brought online, if desired.
724  */
725 int proc_watchdog_cpumask(struct ctl_table *table, int write,
726                           void __user *buffer, size_t *lenp, loff_t *ppos)
727 {
728         int err;
729
730         cpu_hotplug_disable();
731         mutex_lock(&watchdog_mutex);
732
733         err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
734         if (!err && write)
735                 proc_watchdog_update();
736
737         mutex_unlock(&watchdog_mutex);
738         cpu_hotplug_enable();
739         return err;
740 }
741 #endif /* CONFIG_SYSCTL */
742
743 void __init lockup_detector_init(void)
744 {
745 #ifdef CONFIG_NO_HZ_FULL
746         if (tick_nohz_full_enabled()) {
747                 pr_info("Disabling watchdog on nohz_full cores by default\n");
748                 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
749         } else
750                 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
751 #else
752         cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
753 #endif
754
755         softlockup_init_threads();
756 }