watchdog.c 21.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6
/*
 * Detect hard and soft lockups on a system
 *
 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
 *
7 8 9
 * Note: Most of this code is borrowed heavily from the original softlockup
 * detector, so thanks to Ingo for the initial implementation.
 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 11 12
 * to those contributors as well.
 */

13
#define pr_fmt(fmt) "watchdog: " fmt
14

15 16 17 18 19 20
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/sysctl.h>
21
#include <linux/tick.h>
22
#include <linux/sched/clock.h>
23
#include <linux/sched/debug.h>
24
#include <linux/sched/isolation.h>
25
#include <linux/stop_machine.h>
26 27

#include <asm/irq_regs.h>
28
#include <linux/kvm_para.h>
29

30
static DEFINE_MUTEX(watchdog_mutex);
P
Peter Zijlstra 已提交
31

32
#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
33 34
# define WATCHDOG_DEFAULT	(SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
# define NMI_WATCHDOG_DEFAULT	1
35
#else
36 37
# define WATCHDOG_DEFAULT	(SOFT_WATCHDOG_ENABLED)
# define NMI_WATCHDOG_DEFAULT	0
38
#endif
39

40 41 42 43
unsigned long __read_mostly watchdog_enabled;
int __read_mostly watchdog_user_enabled = 1;
int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
int __read_mostly soft_watchdog_user_enabled = 1;
44
int __read_mostly watchdog_thresh = 10;
45
int __read_mostly nmi_watchdog_available;
46 47 48 49 50 51

struct cpumask watchdog_allowed_mask __read_mostly;

struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);

52 53 54 55 56 57 58 59 60 61 62 63 64 65
#ifdef CONFIG_HARDLOCKUP_DETECTOR
/*
 * Should we panic when a soft-lockup or hard-lockup occurs:
 */
unsigned int __read_mostly hardlockup_panic =
			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
/*
 * We may not want to enable hard lockup detection by default in all cases,
 * for example when running the kernel as a guest on a hypervisor. In these
 * cases this function can be called to disable hard lockup detection. This
 * function should only be executed once by the boot processor before the
 * kernel command line parameters are parsed, because otherwise it is not
 * possible to override this in hardlockup_panic_setup().
 */
66
void __init hardlockup_detector_disable(void)
67
{
68
	nmi_watchdog_user_enabled = 0;
69 70 71 72 73 74 75 76 77
}

static int __init hardlockup_panic_setup(char *str)
{
	if (!strncmp(str, "panic", 5))
		hardlockup_panic = 1;
	else if (!strncmp(str, "nopanic", 7))
		hardlockup_panic = 0;
	else if (!strncmp(str, "0", 1))
78
		nmi_watchdog_user_enabled = 0;
79
	else if (!strncmp(str, "1", 1))
80
		nmi_watchdog_user_enabled = 1;
81 82 83 84
	return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);

85 86
# ifdef CONFIG_SMP
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
87

88 89 90 91 92 93 94 95
static int __init hardlockup_all_cpu_backtrace_setup(char *str)
{
	sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
	return 1;
}
__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
# endif /* CONFIG_SMP */
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
96 97 98 99

/*
 * These functions can be overridden if an architecture implements its
 * own hardlockup detector.
100 101 102 103
 *
 * watchdog_nmi_enable/disable can be implemented to start and stop when
 * softlockup watchdog threads start and stop. The arch must select the
 * SOFTLOCKUP_DETECTOR Kconfig.
104 105 106
 */
int __weak watchdog_nmi_enable(unsigned int cpu)
{
107
	hardlockup_detector_perf_enable();
108 109
	return 0;
}
110

111 112
void __weak watchdog_nmi_disable(unsigned int cpu)
{
113
	hardlockup_detector_perf_disable();
114 115
}

116 117 118 119 120 121
/* Return 0, if a NMI watchdog is available. Error code otherwise */
int __weak __init watchdog_nmi_probe(void)
{
	return hardlockup_detector_perf_init();
}

122
/**
123
 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
124
 *
125 126
 * The reconfiguration steps are:
 * watchdog_nmi_stop();
127
 * update_variables();
128 129 130 131 132 133
 * watchdog_nmi_start();
 */
void __weak watchdog_nmi_stop(void) { }

/**
 * watchdog_nmi_start - Start the watchdog after reconfiguration
134
 *
135 136 137 138
 * Counterpart to watchdog_nmi_stop().
 *
 * The following variables have been updated in update_variables() and
 * contain the currently valid configuration:
139
 * - watchdog_enabled
140 141 142
 * - watchdog_thresh
 * - watchdog_cpumask
 */
143
void __weak watchdog_nmi_start(void) { }
144

145 146 147 148 149 150 151 152 153 154 155
/**
 * lockup_detector_update_enable - Update the sysctl enable bit
 *
 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
 * can't race with watchdog_nmi_disable().
 */
static void lockup_detector_update_enable(void)
{
	watchdog_enabled = 0;
	if (!watchdog_user_enabled)
		return;
156
	if (nmi_watchdog_available && nmi_watchdog_user_enabled)
157 158 159 160 161
		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
	if (soft_watchdog_user_enabled)
		watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
}

162 163
#ifdef CONFIG_SOFTLOCKUP_DETECTOR

164 165 166
/* Global variables, exported for sysctl */
unsigned int __read_mostly softlockup_panic =
			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
167

168
static bool softlockup_initialized __read_mostly;
169
static u64 __read_mostly sample_period;
170 171 172 173 174

static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
175 176
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
177
static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
178
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
179
static unsigned long soft_lockup_nmi_warn;
180 181 182 183 184 185 186 187 188 189

static int __init softlockup_panic_setup(char *str)
{
	softlockup_panic = simple_strtoul(str, NULL, 0);
	return 1;
}
__setup("softlockup_panic=", softlockup_panic_setup);

static int __init nowatchdog_setup(char *str)
{
190
	watchdog_user_enabled = 0;
191 192 193 194 195 196
	return 1;
}
__setup("nowatchdog", nowatchdog_setup);

static int __init nosoftlockup_setup(char *str)
{
197
	soft_watchdog_user_enabled = 0;
198 199 200
	return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
201

202
#ifdef CONFIG_SMP
203 204
int __read_mostly sysctl_softlockup_all_cpu_backtrace;

205 206
static int __init softlockup_all_cpu_backtrace_setup(char *str)
{
207
	sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
208 209 210
	return 1;
}
__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
211
#endif
212

213 214
static void __lockup_detector_cleanup(void);

215 216 217 218 219 220 221
/*
 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
 * lockups can have false positives under extreme conditions. So we generally
 * want a higher threshold for soft lockups than for hard lockups. So we couple
 * the thresholds with a factor: we make the soft threshold twice the amount of
 * time the hard threshold is.
 */
222
static int get_softlockup_thresh(void)
223 224 225
{
	return watchdog_thresh * 2;
}
226 227 228 229 230 231

/*
 * Returns seconds, approximately.  We don't need nanosecond
 * resolution, and we don't need to waste time with a big divide when
 * 2^30ns == 1.074s.
 */
232
static unsigned long get_timestamp(void)
233
{
234
	return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
235 236
}

237
static void set_sample_period(void)
238 239
{
	/*
240
	 * convert watchdog_thresh from seconds to ns
241 242 243 244
	 * the divide by 5 is to give hrtimer several chances (two
	 * or three with the current relation between the soft
	 * and hard thresholds) to increment before the
	 * hardlockup detector generates a warning
245
	 */
246
	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
247
	watchdog_update_hrtimer_threshold(sample_period);
248 249 250 251 252
}

/* Commands for resetting the watchdog */
static void __touch_watchdog(void)
{
253
	__this_cpu_write(watchdog_touch_ts, get_timestamp());
254 255
}

256 257 258 259 260 261 262 263
/**
 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
 *
 * Call when the scheduler may have stalled for legitimate reasons
 * preventing the watchdog task from executing - e.g. the scheduler
 * entering idle state.  This should only be used for scheduler events.
 * Use touch_softlockup_watchdog() for everything else.
 */
264
notrace void touch_softlockup_watchdog_sched(void)
265
{
266 267 268 269 270
	/*
	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
	 * gets zeroed here, so use the raw_ operation.
	 */
	raw_cpu_write(watchdog_touch_ts, 0);
271
}
272

273
notrace void touch_softlockup_watchdog(void)
274 275
{
	touch_softlockup_watchdog_sched();
T
Tejun Heo 已提交
276
	wq_watchdog_touch(raw_smp_processor_id());
277
}
278
EXPORT_SYMBOL(touch_softlockup_watchdog);
279

280
void touch_all_softlockup_watchdogs(void)
281 282 283 284
{
	int cpu;

	/*
285 286 287 288 289 290 291
	 * watchdog_mutex cannpt be taken here, as this might be called
	 * from (soft)interrupt context, so the access to
	 * watchdog_allowed_cpumask might race with a concurrent update.
	 *
	 * The watchdog time stamp can race against a concurrent real
	 * update as well, the only side effect might be a cycle delay for
	 * the softlockup check.
292
	 */
293
	for_each_cpu(cpu, &watchdog_allowed_mask)
294
		per_cpu(watchdog_touch_ts, cpu) = 0;
T
Tejun Heo 已提交
295
	wq_watchdog_touch(-1);
296 297 298 299
}

void touch_softlockup_watchdog_sync(void)
{
300 301
	__this_cpu_write(softlockup_touch_sync, true);
	__this_cpu_write(watchdog_touch_ts, 0);
302 303
}

304
static int is_softlockup(unsigned long touch_ts)
305
{
306
	unsigned long now = get_timestamp();
307

308
	if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
309 310 311 312
		/* Warn about unreasonable delays. */
		if (time_after(now, touch_ts + get_softlockup_thresh()))
			return now - touch_ts;
	}
313 314 315
	return 0;
}

316 317
/* watchdog detector functions */
bool is_hardlockup(void)
318
{
319
	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
320

321 322 323 324 325
	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
		return true;

	__this_cpu_write(hrtimer_interrupts_saved, hrint);
	return false;
326
}
327 328

static void watchdog_interrupt_count(void)
329
{
330
	__this_cpu_inc(hrtimer_interrupts);
331
}
332

333 334 335
static DEFINE_PER_CPU(struct completion, softlockup_completion);
static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);

336 337 338 339 340 341 342 343 344 345 346 347 348
/*
 * The watchdog thread function - touches the timestamp.
 *
 * It only runs once every sample_period seconds (4 seconds by
 * default) to reset the softlockup timestamp. If this gets delayed
 * for more than 2*watchdog_thresh seconds then the debug-printout
 * triggers in watchdog_timer_fn().
 */
static int softlockup_fn(void *data)
{
	__this_cpu_write(soft_lockup_hrtimer_cnt,
			 __this_cpu_read(hrtimer_interrupts));
	__touch_watchdog();
349
	complete(this_cpu_ptr(&softlockup_completion));
350 351 352 353

	return 0;
}

354 355 356
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
357
	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
358 359
	struct pt_regs *regs = get_irq_regs();
	int duration;
360
	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
361

362
	if (!watchdog_enabled)
363 364
		return HRTIMER_NORESTART;

365 366 367 368
	/* kick the hardlockup detector */
	watchdog_interrupt_count();

	/* kick the softlockup detector */
369 370 371 372 373 374
	if (completion_done(this_cpu_ptr(&softlockup_completion))) {
		reinit_completion(this_cpu_ptr(&softlockup_completion));
		stop_one_cpu_nowait(smp_processor_id(),
				softlockup_fn, NULL,
				this_cpu_ptr(&softlockup_stop_work));
	}
375 376

	/* .. and repeat */
377
	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
378 379

	if (touch_ts == 0) {
380
		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
381 382 383 384
			/*
			 * If the time stamp was touched atomically
			 * make sure the scheduler tick is up to date.
			 */
385
			__this_cpu_write(softlockup_touch_sync, false);
386 387
			sched_clock_tick();
		}
388 389 390

		/* Clear the guest paused flag on watchdog reset */
		kvm_check_and_clear_guest_paused();
391 392 393 394 395 396 397 398 399 400
		__touch_watchdog();
		return HRTIMER_RESTART;
	}

	/* check for a softlockup
	 * This is done by making sure a high priority task is
	 * being scheduled.  The task touches the watchdog to
	 * indicate it is getting cpu time.  If it hasn't then
	 * this is a good indication some task is hogging the cpu
	 */
401
	duration = is_softlockup(touch_ts);
402
	if (unlikely(duration)) {
403 404 405 406 407 408 409 410
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like a soft lockup, check to see if the host
		 * stopped the vm before we issue the warning
		 */
		if (kvm_check_and_clear_guest_paused())
			return HRTIMER_RESTART;

411
		/* only warn once */
412 413 414 415 416 417 418 419 420 421 422 423 424 425
		if (__this_cpu_read(soft_watchdog_warn) == true) {
			/*
			 * When multiple processes are causing softlockups the
			 * softlockup detector only warns on the first one
			 * because the code relies on a full quiet cycle to
			 * re-arm.  The second process prevents the quiet cycle
			 * and never gets reported.  Use task pointers to detect
			 * this.
			 */
			if (__this_cpu_read(softlockup_task_ptr_saved) !=
			    current) {
				__this_cpu_write(soft_watchdog_warn, false);
				__touch_watchdog();
			}
426
			return HRTIMER_RESTART;
427
		}
428

429 430 431 432 433 434 435 436 437 438 439
		if (softlockup_all_cpu_backtrace) {
			/* Prevent multiple soft-lockup reports if one cpu is already
			 * engaged in dumping cpu back traces
			 */
			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
				/* Someone else will report us. Let's give up */
				__this_cpu_write(soft_watchdog_warn, true);
				return HRTIMER_RESTART;
			}
		}

440
		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
441
			smp_processor_id(), duration,
442
			current->comm, task_pid_nr(current));
443
		__this_cpu_write(softlockup_task_ptr_saved, current);
444 445 446 447 448 449 450
		print_modules();
		print_irqtrace_events(current);
		if (regs)
			show_regs(regs);
		else
			dump_stack();

451 452 453 454 455 456 457 458 459 460 461
		if (softlockup_all_cpu_backtrace) {
			/* Avoid generating two back traces for current
			 * given that one is already made above
			 */
			trigger_allbutself_cpu_backtrace();

			clear_bit(0, &soft_lockup_nmi_warn);
			/* Barrier to sync with other cpus */
			smp_mb__after_atomic();
		}

J
Josh Hunt 已提交
462
		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
463 464
		if (softlockup_panic)
			panic("softlockup: hung tasks");
465
		__this_cpu_write(soft_watchdog_warn, true);
466
	} else
467
		__this_cpu_write(soft_watchdog_warn, false);
468 469 470 471

	return HRTIMER_RESTART;
}

472
static void watchdog_enable(unsigned int cpu)
473
{
474
	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
475
	struct completion *done = this_cpu_ptr(&softlockup_completion);
476

477 478
	WARN_ON_ONCE(cpu != smp_processor_id());

479 480 481
	init_completion(done);
	complete(done);

482 483 484 485
	/*
	 * Start the timer first to prevent the NMI watchdog triggering
	 * before the timer has a chance to fire.
	 */
486 487
	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hrtimer->function = watchdog_timer_fn;
488 489
	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
		      HRTIMER_MODE_REL_PINNED);
490

491 492
	/* Initialize timestamp */
	__touch_watchdog();
493
	/* Enable the perf event */
494 495
	if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
		watchdog_nmi_enable(cpu);
496
}
497

498 499
static void watchdog_disable(unsigned int cpu)
{
500
	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
501

502 503
	WARN_ON_ONCE(cpu != smp_processor_id());

504 505 506 507 508
	/*
	 * Disable the perf event first. That prevents that a large delay
	 * between disabling the timer and disabling the perf event causes
	 * the perf NMI to detect a false positive.
	 */
509
	watchdog_nmi_disable(cpu);
510
	hrtimer_cancel(hrtimer);
511
	wait_for_completion(this_cpu_ptr(&softlockup_completion));
512 513
}

514
static int softlockup_stop_fn(void *data)
515
{
516 517
	watchdog_disable(smp_processor_id());
	return 0;
518 519
}

520
static void softlockup_stop_all(void)
521
{
522 523 524 525 526 527 528 529 530
	int cpu;

	if (!softlockup_initialized)
		return;

	for_each_cpu(cpu, &watchdog_allowed_mask)
		smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);

	cpumask_clear(&watchdog_allowed_mask);
531 532
}

533
static int softlockup_start_fn(void *data)
534
{
535 536
	watchdog_enable(smp_processor_id());
	return 0;
537
}
538

539
static void softlockup_start_all(void)
540
{
541
	int cpu;
542

543 544 545
	cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
	for_each_cpu(cpu, &watchdog_allowed_mask)
		smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
546 547
}

548
int lockup_detector_online_cpu(unsigned int cpu)
549
{
550 551
	watchdog_enable(cpu);
	return 0;
552 553
}

554
int lockup_detector_offline_cpu(unsigned int cpu)
555
{
556 557
	watchdog_disable(cpu);
	return 0;
558 559
}

560
static void lockup_detector_reconfigure(void)
561
{
562
	cpus_read_lock();
563
	watchdog_nmi_stop();
564 565

	softlockup_stop_all();
566
	set_sample_period();
567 568
	lockup_detector_update_enable();
	if (watchdog_enabled && watchdog_thresh)
569 570
		softlockup_start_all();

571
	watchdog_nmi_start();
572 573 574 575 576 577
	cpus_read_unlock();
	/*
	 * Must be called outside the cpus locked section to prevent
	 * recursive locking in the perf code.
	 */
	__lockup_detector_cleanup();
578 579 580
}

/*
581
 * Create the watchdog thread infrastructure and configure the detector(s).
582 583 584 585 586
 *
 * The threads are not unparked as watchdog_allowed_mask is empty.  When
 * the threads are sucessfully initialized, take the proper locks and
 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
 */
587
static __init void lockup_detector_setup(void)
588 589 590 591 592
{
	/*
	 * If sysctl is off and watchdog got disabled on the command line,
	 * nothing to do here.
	 */
593 594
	lockup_detector_update_enable();

595 596 597 598 599
	if (!IS_ENABLED(CONFIG_SYSCTL) &&
	    !(watchdog_enabled && watchdog_thresh))
		return;

	mutex_lock(&watchdog_mutex);
600
	lockup_detector_reconfigure();
601
	softlockup_initialized = true;
602 603 604
	mutex_unlock(&watchdog_mutex);
}

605
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
606
static void lockup_detector_reconfigure(void)
607
{
608
	cpus_read_lock();
609
	watchdog_nmi_stop();
610
	lockup_detector_update_enable();
611
	watchdog_nmi_start();
612
	cpus_read_unlock();
613
}
614
static inline void lockup_detector_setup(void)
615
{
616
	lockup_detector_reconfigure();
617
}
618
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
619

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
static void __lockup_detector_cleanup(void)
{
	lockdep_assert_held(&watchdog_mutex);
	hardlockup_detector_perf_cleanup();
}

/**
 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
 *
 * Caller must not hold the cpu hotplug rwsem.
 */
void lockup_detector_cleanup(void)
{
	mutex_lock(&watchdog_mutex);
	__lockup_detector_cleanup();
	mutex_unlock(&watchdog_mutex);
}

638 639 640 641 642 643 644 645 646 647 648
/**
 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
 *
 * Special interface for parisc. It prevents lockup detector warnings from
 * the default pm_poweroff() function which busy loops forever.
 */
void lockup_detector_soft_poweroff(void)
{
	watchdog_enabled = 0;
}

649 650
#ifdef CONFIG_SYSCTL

651
/* Propagate any changes to the watchdog threads */
652
static void proc_watchdog_update(void)
653
{
654 655
	/* Remove impossible cpus to keep sysctl output clean. */
	cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
656
	lockup_detector_reconfigure();
657 658
}

659 660 661
/*
 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
 *
662 663 664 665 666 667 668 669
 * caller             | table->data points to      | 'which'
 * -------------------|----------------------------|--------------------------
 * proc_watchdog      | watchdog_user_enabled      | NMI_WATCHDOG_ENABLED |
 *                    |                            | SOFT_WATCHDOG_ENABLED
 * -------------------|----------------------------|--------------------------
 * proc_nmi_watchdog  | nmi_watchdog_user_enabled  | NMI_WATCHDOG_ENABLED
 * -------------------|----------------------------|--------------------------
 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
670 671 672 673
 */
static int proc_watchdog_common(int which, struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp, loff_t *ppos)
{
674
	int err, old, *param = table->data;
675

676
	mutex_lock(&watchdog_mutex);
677 678

	if (!write) {
679 680 681 682 683
		/*
		 * On read synchronize the userspace interface. This is a
		 * racy snapshot.
		 */
		*param = (watchdog_enabled & which) != 0;
684 685
		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	} else {
686
		old = READ_ONCE(*param);
687
		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
688
		if (!err && old != READ_ONCE(*param))
689
			proc_watchdog_update();
690
	}
691
	mutex_unlock(&watchdog_mutex);
692 693 694
	return err;
}

695 696 697 698 699 700 701 702 703 704 705 706
/*
 * /proc/sys/kernel/watchdog
 */
int proc_watchdog(struct ctl_table *table, int write,
		  void __user *buffer, size_t *lenp, loff_t *ppos)
{
	return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
				    table, write, buffer, lenp, ppos);
}

/*
 * /proc/sys/kernel/nmi_watchdog
707
 */
708 709 710
int proc_nmi_watchdog(struct ctl_table *table, int write,
		      void __user *buffer, size_t *lenp, loff_t *ppos)
{
711 712
	if (!nmi_watchdog_available && write)
		return -ENOTSUPP;
713 714 715 716 717 718 719 720 721 722 723 724 725
	return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
				    table, write, buffer, lenp, ppos);
}

/*
 * /proc/sys/kernel/soft_watchdog
 */
int proc_soft_watchdog(struct ctl_table *table, int write,
			void __user *buffer, size_t *lenp, loff_t *ppos)
{
	return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
				    table, write, buffer, lenp, ppos);
}
726

727 728 729 730 731
/*
 * /proc/sys/kernel/watchdog_thresh
 */
int proc_watchdog_thresh(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp, loff_t *ppos)
732
{
733
	int err, old;
734

735
	mutex_lock(&watchdog_mutex);
736

737
	old = READ_ONCE(watchdog_thresh);
738
	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
739

740 741
	if (!err && write && old != READ_ONCE(watchdog_thresh))
		proc_watchdog_update();
742

743
	mutex_unlock(&watchdog_mutex);
744
	return err;
745
}
746 747 748 749 750 751 752 753 754 755 756 757

/*
 * The cpumask is the mask of possible cpus that the watchdog can run
 * on, not the mask of cpus it is actually running on.  This allows the
 * user to specify a mask that will include cpus that have not yet
 * been brought online, if desired.
 */
int proc_watchdog_cpumask(struct ctl_table *table, int write,
			  void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int err;

758
	mutex_lock(&watchdog_mutex);
759

760
	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
761
	if (!err && write)
762
		proc_watchdog_update();
763

764
	mutex_unlock(&watchdog_mutex);
765 766
	return err;
}
767 768
#endif /* CONFIG_SYSCTL */

769
void __init lockup_detector_init(void)
770
{
771
	if (tick_nohz_full_enabled())
772
		pr_info("Disabling watchdog on nohz_full cores by default\n");
773

774 775
	cpumask_copy(&watchdog_cpumask,
		     housekeeping_cpumask(HK_FLAG_TIMER));
776

777 778
	if (!watchdog_nmi_probe())
		nmi_watchdog_available = true;
779
	lockup_detector_setup();
780
}