watchdog_dev.c 29.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 *	watchdog_dev.c
 *
 *	(c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
 *						All Rights Reserved.
 *
 *	(c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
 *
 *
 *	This source code is part of the generic code that can be used
 *	by all the watchdog timer drivers.
 *
 *	This part of the generic code takes care of the following
 *	misc device: /dev/watchdog.
 *
 *	Based on source code of the following authors:
 *	  Matt Domsch <Matt_Domsch@dell.com>,
 *	  Rob Radez <rob@osinvestor.com>,
 *	  Rusty Lynch <rusty@linux.co.intel.com>
 *	  Satyam Sharma <satyam@infradead.org>
 *	  Randy Dunlap <randy.dunlap@oracle.com>
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 *
 *	Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
 *	admit liability nor provide warranty for any of this software.
 *	This material is provided "AS-IS" and at no charge.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

35
#include <linux/cdev.h>		/* For character device */
36 37 38
#include <linux/errno.h>	/* For the -ENODEV/... values */
#include <linux/fs.h>		/* For file operations */
#include <linux/init.h>		/* For __init/__exit/... */
39
#include <linux/hrtimer.h>	/* For hrtimers */
40 41
#include <linux/kernel.h>	/* For printk/panic/... */
#include <linux/kref.h>		/* For data references */
42
#include <linux/kthread.h>	/* For kthread_work */
43 44 45
#include <linux/miscdevice.h>	/* For handling misc devices */
#include <linux/module.h>	/* For module stuff/... */
#include <linux/mutex.h>	/* For mutexes */
46
#include <linux/reboot.h>	/* For reboot notifier */
47 48 49
#include <linux/slab.h>		/* For memory functions */
#include <linux/types.h>	/* For standard types (like size_t) */
#include <linux/watchdog.h>	/* For watchdog specific items */
50 51
#include <linux/uaccess.h>	/* For copy_to_user/put_user/... */

52 53
#include <uapi/linux/sched/types.h>	/* For struct sched_param */

54
#include "watchdog_core.h"
55
#include "watchdog_pretimeout.h"
56

57 58 59 60 61 62 63 64 65 66 67 68 69
/*
 * struct watchdog_core_data - watchdog core internal data
 * @kref:	Reference count.
 * @cdev:	The watchdog's Character device.
 * @wdd:	Pointer to watchdog device.
 * @lock:	Lock for watchdog core.
 * @status:	Watchdog core internal status bits.
 */
struct watchdog_core_data {
	struct kref kref;
	struct cdev cdev;
	struct watchdog_device *wdd;
	struct mutex lock;
70 71 72 73
	ktime_t last_keepalive;
	ktime_t last_hw_keepalive;
	struct hrtimer timer;
	struct kthread_work work;
74 75 76
	unsigned long status;		/* Internal status bits */
#define _WDOG_DEV_OPEN		0	/* Opened ? */
#define _WDOG_ALLOW_RELEASE	1	/* Did we receive the magic char ? */
77
#define _WDOG_KEEPALIVE		2	/* Did we receive a keepalive ? */
78 79
};

80 81
/* the dev_t structure to store the dynamically allocated watchdog devices */
static dev_t watchdog_devt;
82 83
/* Reference to watchdog device behind /dev/watchdog */
static struct watchdog_core_data *old_wd_data;
84

85
static struct kthread_worker *watchdog_kworker;
86

87 88 89
static bool handle_boot_enabled =
	IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED);

90 91 92 93 94 95 96 97 98 99 100 101 102 103
static inline bool watchdog_need_worker(struct watchdog_device *wdd)
{
	/* All variables in milli-seconds */
	unsigned int hm = wdd->max_hw_heartbeat_ms;
	unsigned int t = wdd->timeout * 1000;

	/*
	 * A worker to generate heartbeat requests is needed if all of the
	 * following conditions are true.
	 * - Userspace activated the watchdog.
	 * - The driver provided a value for the maximum hardware timeout, and
	 *   thus is aware that the framework supports generating heartbeat
	 *   requests.
	 * - Userspace requests a longer timeout than the hardware can handle.
104 105 106 107
	 *
	 * Alternatively, if userspace has not opened the watchdog
	 * device, we take care of feeding the watchdog if it is
	 * running.
108
	 */
109 110
	return (hm && watchdog_active(wdd) && t > hm) ||
		(t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
111 112
}

113
static ktime_t watchdog_next_keepalive(struct watchdog_device *wdd)
114 115 116
{
	struct watchdog_core_data *wd_data = wdd->wd_data;
	unsigned int timeout_ms = wdd->timeout * 1000;
117 118 119
	ktime_t keepalive_interval;
	ktime_t last_heartbeat, latest_heartbeat;
	ktime_t virt_timeout;
120 121
	unsigned int hw_heartbeat_ms;

122 123
	virt_timeout = ktime_add(wd_data->last_keepalive,
				 ms_to_ktime(timeout_ms));
124
	hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
125
	keepalive_interval = ms_to_ktime(hw_heartbeat_ms / 2);
126

127 128 129
	if (!watchdog_active(wdd))
		return keepalive_interval;

130 131 132 133 134
	/*
	 * To ensure that the watchdog times out wdd->timeout seconds
	 * after the most recent ping from userspace, the last
	 * worker ping has to come in hw_heartbeat_ms before this timeout.
	 */
135 136 137 138 139
	last_heartbeat = ktime_sub(virt_timeout, ms_to_ktime(hw_heartbeat_ms));
	latest_heartbeat = ktime_sub(last_heartbeat, ktime_get());
	if (ktime_before(latest_heartbeat, keepalive_interval))
		return latest_heartbeat;
	return keepalive_interval;
140 141 142 143 144 145 146
}

static inline void watchdog_update_worker(struct watchdog_device *wdd)
{
	struct watchdog_core_data *wd_data = wdd->wd_data;

	if (watchdog_need_worker(wdd)) {
147
		ktime_t t = watchdog_next_keepalive(wdd);
148 149

		if (t > 0)
150
			hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL);
151
	} else {
152
		hrtimer_cancel(&wd_data->timer);
153 154 155 156 157
	}
}

static int __watchdog_ping(struct watchdog_device *wdd)
{
158
	struct watchdog_core_data *wd_data = wdd->wd_data;
159
	ktime_t earliest_keepalive, now;
160 161
	int err;

162 163 164 165 166 167 168 169
	earliest_keepalive = ktime_add(wd_data->last_hw_keepalive,
				       ms_to_ktime(wdd->min_hw_heartbeat_ms));
	now = ktime_get();

	if (ktime_after(earliest_keepalive, now)) {
		hrtimer_start(&wd_data->timer,
			      ktime_sub(earliest_keepalive, now),
			      HRTIMER_MODE_REL);
170 171 172
		return 0;
	}

173
	wd_data->last_hw_keepalive = now;
174

175 176 177 178 179 180 181 182 183 184
	if (wdd->ops->ping)
		err = wdd->ops->ping(wdd);  /* ping the watchdog */
	else
		err = wdd->ops->start(wdd); /* restart watchdog */

	watchdog_update_worker(wdd);

	return err;
}

185 186
/*
 *	watchdog_ping: ping the watchdog.
187
 *	@wdd: the watchdog device to ping
188
 *
189 190
 *	The caller must hold wd_data->lock.
 *
191 192 193
 *	If the watchdog has no own ping operation then it needs to be
 *	restarted via the start operation. This wrapper function does
 *	exactly that.
194
 *	We only ping when the watchdog device is running.
195 196
 */

197
static int watchdog_ping(struct watchdog_device *wdd)
198
{
199
	struct watchdog_core_data *wd_data = wdd->wd_data;
200

201
	if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
202
		return 0;
203

204 205
	set_bit(_WDOG_KEEPALIVE, &wd_data->status);

206
	wd_data->last_keepalive = ktime_get();
207 208
	return __watchdog_ping(wdd);
}
209

210 211 212 213 214 215 216
static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data)
{
	struct watchdog_device *wdd = wd_data->wdd;

	return wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd));
}

217
static void watchdog_ping_work(struct kthread_work *work)
218 219 220
{
	struct watchdog_core_data *wd_data;

221
	wd_data = container_of(work, struct watchdog_core_data, work);
222 223

	mutex_lock(&wd_data->lock);
224 225
	if (watchdog_worker_should_ping(wd_data))
		__watchdog_ping(wd_data->wdd);
226
	mutex_unlock(&wd_data->lock);
227 228
}

229 230 231 232 233 234 235 236 237 238
static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer)
{
	struct watchdog_core_data *wd_data;

	wd_data = container_of(timer, struct watchdog_core_data, timer);

	kthread_queue_work(watchdog_kworker, &wd_data->work);
	return HRTIMER_NORESTART;
}

239 240
/*
 *	watchdog_start: wrapper to start the watchdog.
241
 *	@wdd: the watchdog device to start
242
 *
243 244
 *	The caller must hold wd_data->lock.
 *
245 246 247 248 249
 *	Start the watchdog if it is not active and mark it active.
 *	This function returns zero on success or a negative errno code for
 *	failure.
 */

250
static int watchdog_start(struct watchdog_device *wdd)
251
{
252
	struct watchdog_core_data *wd_data = wdd->wd_data;
253
	ktime_t started_at;
254
	int err;
255

256
	if (watchdog_active(wdd))
257
		return 0;
258

259 260
	set_bit(_WDOG_KEEPALIVE, &wd_data->status);

261
	started_at = ktime_get();
262 263 264 265
	if (watchdog_hw_running(wdd) && wdd->ops->ping)
		err = wdd->ops->ping(wdd);
	else
		err = wdd->ops->start(wdd);
266
	if (err == 0) {
267
		set_bit(WDOG_ACTIVE, &wdd->status);
268 269 270
		wd_data->last_keepalive = started_at;
		watchdog_update_worker(wdd);
	}
271 272

	return err;
273 274 275 276
}

/*
 *	watchdog_stop: wrapper to stop the watchdog.
277
 *	@wdd: the watchdog device to stop
278
 *
279 280
 *	The caller must hold wd_data->lock.
 *
281 282 283
 *	Stop the watchdog if it is still active and unmark it active.
 *	This function returns zero on success or a negative errno code for
 *	failure.
284
 *	If the 'nowayout' feature was set, the watchdog cannot be stopped.
285 286
 */

287
static int watchdog_stop(struct watchdog_device *wdd)
288
{
289
	int err = 0;
290

291
	if (!watchdog_active(wdd))
292
		return 0;
293

294
	if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
295 296
		pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n",
			wdd->id);
297
		return -EBUSY;
298
	}
299

300 301
	if (wdd->ops->stop) {
		clear_bit(WDOG_HW_RUNNING, &wdd->status);
302
		err = wdd->ops->stop(wdd);
303
	} else {
304
		set_bit(WDOG_HW_RUNNING, &wdd->status);
305
	}
306

307
	if (err == 0) {
308
		clear_bit(WDOG_ACTIVE, &wdd->status);
309
		watchdog_update_worker(wdd);
310
	}
311 312 313 314 315 316

	return err;
}

/*
 *	watchdog_get_status: wrapper to get the watchdog status
317
 *	@wdd: the watchdog device to get the status from
318 319
 *
 *	The caller must hold wd_data->lock.
320 321 322 323
 *
 *	Get the watchdog's status flags.
 */

324
static unsigned int watchdog_get_status(struct watchdog_device *wdd)
325
{
326 327
	struct watchdog_core_data *wd_data = wdd->wd_data;
	unsigned int status;
328

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	if (wdd->ops->status)
		status = wdd->ops->status(wdd);
	else
		status = wdd->bootstatus & (WDIOF_CARDRESET |
					    WDIOF_OVERHEAT |
					    WDIOF_FANFAULT |
					    WDIOF_EXTERN1 |
					    WDIOF_EXTERN2 |
					    WDIOF_POWERUNDER |
					    WDIOF_POWEROVER);

	if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status))
		status |= WDIOF_MAGICCLOSE;

	if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
		status |= WDIOF_KEEPALIVEPING;

	return status;
347 348 349 350
}

/*
 *	watchdog_set_timeout: set the watchdog timer timeout
351
 *	@wdd: the watchdog device to set the timeout for
352
 *	@timeout: timeout to set in seconds
353 354
 *
 *	The caller must hold wd_data->lock.
355 356
 */

357
static int watchdog_set_timeout(struct watchdog_device *wdd,
358 359
							unsigned int timeout)
{
360 361 362
	int err = 0;

	if (!(wdd->info->options & WDIOF_SETTIMEOUT))
363 364
		return -EOPNOTSUPP;

365
	if (watchdog_timeout_invalid(wdd, timeout))
366 367
		return -EINVAL;

368
	if (wdd->ops->set_timeout) {
369
		err = wdd->ops->set_timeout(wdd, timeout);
370
	} else {
371
		wdd->timeout = timeout;
372 373 374 375
		/* Disable pretimeout if it doesn't fit the new timeout */
		if (wdd->pretimeout >= wdd->timeout)
			wdd->pretimeout = 0;
	}
376

377 378
	watchdog_update_worker(wdd);

379
	return err;
380 381
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
/*
 *	watchdog_set_pretimeout: set the watchdog timer pretimeout
 *	@wdd: the watchdog device to set the timeout for
 *	@timeout: pretimeout to set in seconds
 */

static int watchdog_set_pretimeout(struct watchdog_device *wdd,
				   unsigned int timeout)
{
	int err = 0;

	if (!(wdd->info->options & WDIOF_PRETIMEOUT))
		return -EOPNOTSUPP;

	if (watchdog_pretimeout_invalid(wdd, timeout))
		return -EINVAL;

	if (wdd->ops->set_pretimeout)
		err = wdd->ops->set_pretimeout(wdd, timeout);
	else
		wdd->pretimeout = timeout;

	return err;
}

407 408
/*
 *	watchdog_get_timeleft: wrapper to get the time left before a reboot
409
 *	@wdd: the watchdog device to get the remaining time from
410 411
 *	@timeleft: the time that's left
 *
412 413
 *	The caller must hold wd_data->lock.
 *
414 415 416
 *	Get the time before a watchdog will reboot (if not pinged).
 */

417
static int watchdog_get_timeleft(struct watchdog_device *wdd,
418 419 420
							unsigned int *timeleft)
{
	*timeleft = 0;
421

422
	if (!wdd->ops->get_timeleft)
423 424
		return -EOPNOTSUPP;

425
	*timeleft = wdd->ops->get_timeleft(wdd);
426

427
	return 0;
428 429
}

430 431 432 433 434 435 436 437 438 439 440 441 442 443
#ifdef CONFIG_WATCHDOG_SYSFS
static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
				char *buf)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);

	return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
}
static DEVICE_ATTR_RO(nowayout);

static ssize_t status_show(struct device *dev, struct device_attribute *attr,
				char *buf)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);
444 445
	struct watchdog_core_data *wd_data = wdd->wd_data;
	unsigned int status;
446

447 448 449
	mutex_lock(&wd_data->lock);
	status = watchdog_get_status(wdd);
	mutex_unlock(&wd_data->lock);
450

451
	return sprintf(buf, "0x%x\n", status);
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
}
static DEVICE_ATTR_RO(status);

static ssize_t bootstatus_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);

	return sprintf(buf, "%u\n", wdd->bootstatus);
}
static DEVICE_ATTR_RO(bootstatus);

static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
				char *buf)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);
468
	struct watchdog_core_data *wd_data = wdd->wd_data;
469 470 471
	ssize_t status;
	unsigned int val;

472
	mutex_lock(&wd_data->lock);
473
	status = watchdog_get_timeleft(wdd, &val);
474
	mutex_unlock(&wd_data->lock);
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
	if (!status)
		status = sprintf(buf, "%u\n", val);

	return status;
}
static DEVICE_ATTR_RO(timeleft);

static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
				char *buf)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);

	return sprintf(buf, "%u\n", wdd->timeout);
}
static DEVICE_ATTR_RO(timeout);

491 492 493 494 495 496 497 498 499
static ssize_t pretimeout_show(struct device *dev,
			       struct device_attribute *attr, char *buf)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);

	return sprintf(buf, "%u\n", wdd->pretimeout);
}
static DEVICE_ATTR_RO(pretimeout);

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
				char *buf)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);

	return sprintf(buf, "%s\n", wdd->info->identity);
}
static DEVICE_ATTR_RO(identity);

static ssize_t state_show(struct device *dev, struct device_attribute *attr,
				char *buf)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);

	if (watchdog_active(wdd))
		return sprintf(buf, "active\n");

	return sprintf(buf, "inactive\n");
}
static DEVICE_ATTR_RO(state);

521 522 523 524 525 526 527
static ssize_t pretimeout_available_governors_show(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
	return watchdog_pretimeout_available_governors_get(buf);
}
static DEVICE_ATTR_RO(pretimeout_available_governors);

528 529 530 531 532 533 534 535
static ssize_t pretimeout_governor_show(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);

	return watchdog_pretimeout_governor_get(wdd, buf);
}
536 537 538 539 540 541 542 543 544 545 546 547 548 549

static ssize_t pretimeout_governor_store(struct device *dev,
					 struct device_attribute *attr,
					 const char *buf, size_t count)
{
	struct watchdog_device *wdd = dev_get_drvdata(dev);
	int ret = watchdog_pretimeout_governor_set(wdd, buf);

	if (!ret)
		ret = count;

	return ret;
}
static DEVICE_ATTR_RW(pretimeout_governor);
550

551 552 553 554 555 556 557
static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
				int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct watchdog_device *wdd = dev_get_drvdata(dev);
	umode_t mode = attr->mode;

558
	if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
559
		mode = 0;
560 561 562
	else if (attr == &dev_attr_pretimeout.attr &&
		 !(wdd->info->options & WDIOF_PRETIMEOUT))
		mode = 0;
563 564
	else if ((attr == &dev_attr_pretimeout_governor.attr ||
		  attr == &dev_attr_pretimeout_available_governors.attr) &&
565 566 567
		 (!(wdd->info->options & WDIOF_PRETIMEOUT) ||
		  !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
		mode = 0;
568 569 570 571 572 573 574

	return mode;
}
static struct attribute *wdt_attrs[] = {
	&dev_attr_state.attr,
	&dev_attr_identity.attr,
	&dev_attr_timeout.attr,
575
	&dev_attr_pretimeout.attr,
576 577 578 579
	&dev_attr_timeleft.attr,
	&dev_attr_bootstatus.attr,
	&dev_attr_status.attr,
	&dev_attr_nowayout.attr,
580
	&dev_attr_pretimeout_governor.attr,
581
	&dev_attr_pretimeout_available_governors.attr,
582 583 584 585 586 587 588 589 590 591 592 593
	NULL,
};

static const struct attribute_group wdt_group = {
	.attrs = wdt_attrs,
	.is_visible = wdt_is_visible,
};
__ATTRIBUTE_GROUPS(wdt);
#else
#define wdt_groups	NULL
#endif

594 595
/*
 *	watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
596
 *	@wdd: the watchdog device to do the ioctl on
597 598
 *	@cmd: watchdog command
 *	@arg: argument pointer
599 600
 *
 *	The caller must hold wd_data->lock.
601 602
 */

603
static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
604 605
							unsigned long arg)
{
606
	if (!wdd->ops->ioctl)
607 608
		return -ENOIOCTLCMD;

609
	return wdd->ops->ioctl(wdd, cmd, arg);
610 611 612 613 614 615 616 617 618 619
}

/*
 *	watchdog_write: writes to the watchdog.
 *	@file: file from VFS
 *	@data: user address of data
 *	@len: length of data
 *	@ppos: pointer to the file offset
 *
 *	A write to a watchdog device is defined as a keepalive ping.
620
 *	Writing the magic 'V' sequence allows the next close to turn
621
 *	off the watchdog (if 'nowayout' is not set).
622 623 624 625 626
 */

static ssize_t watchdog_write(struct file *file, const char __user *data,
						size_t len, loff_t *ppos)
{
627 628 629
	struct watchdog_core_data *wd_data = file->private_data;
	struct watchdog_device *wdd;
	int err;
630 631 632 633 634 635
	size_t i;
	char c;

	if (len == 0)
		return 0;

636 637 638 639
	/*
	 * Note: just in case someone wrote the magic character
	 * five months ago...
	 */
640
	clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
641 642

	/* scan to see whether or not we got the magic character */
643 644 645
	for (i = 0; i != len; i++) {
		if (get_user(c, data + i))
			return -EFAULT;
646
		if (c == 'V')
647
			set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
648 649 650
	}

	/* someone wrote to us, so we send the watchdog a keepalive ping */
651 652 653 654 655 656 657 658

	err = -ENODEV;
	mutex_lock(&wd_data->lock);
	wdd = wd_data->wdd;
	if (wdd)
		err = watchdog_ping(wdd);
	mutex_unlock(&wd_data->lock);

659 660
	if (err < 0)
		return err;
661 662 663 664

	return len;
}

665 666 667 668 669 670 671 672 673 674 675 676 677
/*
 *	watchdog_ioctl: handle the different ioctl's for the watchdog device.
 *	@file: file handle to the device
 *	@cmd: watchdog command
 *	@arg: argument pointer
 *
 *	The watchdog API defines a common set of functions for all watchdogs
 *	according to their available features.
 */

static long watchdog_ioctl(struct file *file, unsigned int cmd,
							unsigned long arg)
{
678
	struct watchdog_core_data *wd_data = file->private_data;
679
	void __user *argp = (void __user *)arg;
680
	struct watchdog_device *wdd;
681 682
	int __user *p = argp;
	unsigned int val;
683
	int err;
684

685 686 687 688 689 690 691 692
	mutex_lock(&wd_data->lock);

	wdd = wd_data->wdd;
	if (!wdd) {
		err = -ENODEV;
		goto out_ioctl;
	}

693 694
	err = watchdog_ioctl_op(wdd, cmd, arg);
	if (err != -ENOIOCTLCMD)
695
		goto out_ioctl;
696

697 698
	switch (cmd) {
	case WDIOC_GETSUPPORT:
699
		err = copy_to_user(argp, wdd->info,
700
			sizeof(struct watchdog_info)) ? -EFAULT : 0;
701
		break;
702
	case WDIOC_GETSTATUS:
703 704 705
		val = watchdog_get_status(wdd);
		err = put_user(val, p);
		break;
706
	case WDIOC_GETBOOTSTATUS:
707 708
		err = put_user(wdd->bootstatus, p);
		break;
709
	case WDIOC_SETOPTIONS:
710 711 712 713
		if (get_user(val, p)) {
			err = -EFAULT;
			break;
		}
714 715 716
		if (val & WDIOS_DISABLECARD) {
			err = watchdog_stop(wdd);
			if (err < 0)
717
				break;
718
		}
719
		if (val & WDIOS_ENABLECARD)
720
			err = watchdog_start(wdd);
721
		break;
722
	case WDIOC_KEEPALIVE:
723 724 725 726 727 728
		if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) {
			err = -EOPNOTSUPP;
			break;
		}
		err = watchdog_ping(wdd);
		break;
729
	case WDIOC_SETTIMEOUT:
730 731 732 733
		if (get_user(val, p)) {
			err = -EFAULT;
			break;
		}
734
		err = watchdog_set_timeout(wdd, val);
735
		if (err < 0)
736
			break;
737 738 739
		/* If the watchdog is active then we send a keepalive ping
		 * to make sure that the watchdog keep's running (and if
		 * possible that it takes the new timeout) */
740 741
		err = watchdog_ping(wdd);
		if (err < 0)
742
			break;
743
		/* fall through */
744 745
	case WDIOC_GETTIMEOUT:
		/* timeout == 0 means that we don't know the timeout */
746 747 748 749 750 751
		if (wdd->timeout == 0) {
			err = -EOPNOTSUPP;
			break;
		}
		err = put_user(wdd->timeout, p);
		break;
752
	case WDIOC_GETTIMELEFT:
753
		err = watchdog_get_timeleft(wdd, &val);
754 755 756 757
		if (err < 0)
			break;
		err = put_user(val, p);
		break;
758 759 760 761 762 763 764 765 766 767
	case WDIOC_SETPRETIMEOUT:
		if (get_user(val, p)) {
			err = -EFAULT;
			break;
		}
		err = watchdog_set_pretimeout(wdd, val);
		break;
	case WDIOC_GETPRETIMEOUT:
		err = put_user(wdd->pretimeout, p);
		break;
768
	default:
769 770
		err = -ENOTTY;
		break;
771
	}
772 773 774 775

out_ioctl:
	mutex_unlock(&wd_data->lock);
	return err;
776 777
}

778
/*
779
 *	watchdog_open: open the /dev/watchdog* devices.
780 781 782
 *	@inode: inode of device
 *	@file: file handle to device
 *
783
 *	When the /dev/watchdog* device gets opened, we start the watchdog.
784 785 786 787 788 789
 *	Watch out: the /dev/watchdog device is single open, so we make sure
 *	it can only be opened once.
 */

static int watchdog_open(struct inode *inode, struct file *file)
{
790
	struct watchdog_core_data *wd_data;
791
	struct watchdog_device *wdd;
792
	bool hw_running;
793
	int err;
794 795 796

	/* Get the corresponding watchdog device */
	if (imajor(inode) == MISC_MAJOR)
797
		wd_data = old_wd_data;
798
	else
799 800
		wd_data = container_of(inode->i_cdev, struct watchdog_core_data,
				       cdev);
801 802

	/* the watchdog is single open! */
803
	if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status))
804 805
		return -EBUSY;

806 807
	wdd = wd_data->wdd;

808 809 810 811
	/*
	 * If the /dev/watchdog device is open, we don't want the module
	 * to be unloaded.
	 */
812 813
	hw_running = watchdog_hw_running(wdd);
	if (!hw_running && !try_module_get(wdd->ops->owner)) {
814 815 816
		err = -EBUSY;
		goto out_clear;
	}
817

818
	err = watchdog_start(wdd);
819 820 821
	if (err < 0)
		goto out_mod;

822
	file->private_data = wd_data;
823

824
	if (!hw_running)
825
		kref_get(&wd_data->kref);
826

827 828 829 830
	/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
	return nonseekable_open(inode, file);

out_mod:
831 832 833
	module_put(wd_data->wdd->ops->owner);
out_clear:
	clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
834 835 836
	return err;
}

837 838 839 840 841 842 843 844 845
static void watchdog_core_data_release(struct kref *kref)
{
	struct watchdog_core_data *wd_data;

	wd_data = container_of(kref, struct watchdog_core_data, kref);

	kfree(wd_data);
}

846
/*
847 848 849
 *	watchdog_release: release the watchdog device.
 *	@inode: inode of device
 *	@file: file handle to device
850
 *
851
 *	This is the code for when /dev/watchdog gets closed. We will only
852 853
 *	stop the watchdog when we have received the magic char (and nowayout
 *	was not set), else the watchdog will keep running.
854 855 856 857
 */

static int watchdog_release(struct inode *inode, struct file *file)
{
858 859
	struct watchdog_core_data *wd_data = file->private_data;
	struct watchdog_device *wdd;
860
	int err = -EBUSY;
861
	bool running;
862

863 864 865 866 867 868
	mutex_lock(&wd_data->lock);

	wdd = wd_data->wdd;
	if (!wdd)
		goto done;

869 870
	/*
	 * We only stop the watchdog if we received the magic character
871 872
	 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
	 * watchdog_stop will fail.
873
	 */
874 875
	if (!test_bit(WDOG_ACTIVE, &wdd->status))
		err = 0;
876
	else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
877
		 !(wdd->info->options & WDIOF_MAGICCLOSE))
878
		err = watchdog_stop(wdd);
879

880
	/* If the watchdog was not stopped, send a keepalive ping */
881
	if (err < 0) {
882
		pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id);
883 884 885
		watchdog_ping(wdd);
	}

886
	watchdog_update_worker(wdd);
887

888
	/* make sure that /dev/watchdog can be re-opened */
889
	clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
890

891
done:
892
	running = wdd && watchdog_hw_running(wdd);
893
	mutex_unlock(&wd_data->lock);
894 895 896 897 898
	/*
	 * Allow the owner module to be unloaded again unless the watchdog
	 * is still running. If the watchdog is still running, it can not
	 * be stopped, and its driver must not be unloaded.
	 */
899 900
	if (!running) {
		module_put(wd_data->cdev.owner);
901 902
		kref_put(&wd_data->kref, watchdog_core_data_release);
	}
903 904 905 906 907 908
	return 0;
}

static const struct file_operations watchdog_fops = {
	.owner		= THIS_MODULE,
	.write		= watchdog_write,
909
	.unlocked_ioctl	= watchdog_ioctl,
910 911 912 913 914 915 916 917 918 919 920
	.open		= watchdog_open,
	.release	= watchdog_release,
};

static struct miscdevice watchdog_miscdev = {
	.minor		= WATCHDOG_MINOR,
	.name		= "watchdog",
	.fops		= &watchdog_fops,
};

/*
921
 *	watchdog_cdev_register: register watchdog character device
922
 *	@wdd: watchdog device
923
 *	@devno: character device number
924
 *
925
 *	Register a watchdog character device including handling the legacy
926 927
 *	/dev/watchdog node. /dev/watchdog is actually a miscdevice and
 *	thus we set it up like that.
928 929
 */

930
static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
931
{
932
	struct watchdog_core_data *wd_data;
933
	int err;
934

935 936 937 938 939 940 941 942 943
	wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
	if (!wd_data)
		return -ENOMEM;
	kref_init(&wd_data->kref);
	mutex_init(&wd_data->lock);

	wd_data->wdd = wdd;
	wdd->wd_data = wd_data;

944
	if (IS_ERR_OR_NULL(watchdog_kworker))
945 946
		return -ENODEV;

947 948 949
	kthread_init_work(&wd_data->work, watchdog_ping_work);
	hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	wd_data->timer.function = watchdog_timer_expired;
950

951
	if (wdd->id == 0) {
952
		old_wd_data = wd_data;
953
		watchdog_miscdev.parent = wdd->parent;
954 955 956
		err = misc_register(&watchdog_miscdev);
		if (err != 0) {
			pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
957
				wdd->info->identity, WATCHDOG_MINOR, err);
958 959
			if (err == -EBUSY)
				pr_err("%s: a legacy watchdog module is probably present.\n",
960
					wdd->info->identity);
961 962
			old_wd_data = NULL;
			kfree(wd_data);
963 964
			return err;
		}
965 966
	}

967
	/* Fill in the data structures */
968 969
	cdev_init(&wd_data->cdev, &watchdog_fops);
	wd_data->cdev.owner = wdd->ops->owner;
970 971

	/* Add the device */
972
	err = cdev_add(&wd_data->cdev, devno, 1);
973 974
	if (err) {
		pr_err("watchdog%d unable to add device %d:%d\n",
975 976
			wdd->id,  MAJOR(watchdog_devt), wdd->id);
		if (wdd->id == 0) {
977
			misc_deregister(&watchdog_miscdev);
978 979
			old_wd_data = NULL;
			kref_put(&wd_data->kref, watchdog_core_data_release);
980
		}
981
		return err;
982
	}
983

984
	/* Record time of most recent heartbeat as 'just before now'. */
985
	wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
986

987 988 989 990 991
	/*
	 * If the watchdog is running, prevent its driver from being unloaded,
	 * and schedule an immediate ping.
	 */
	if (watchdog_hw_running(wdd)) {
992 993 994
		__module_get(wdd->ops->owner);
		kref_get(&wd_data->kref);
		if (handle_boot_enabled)
995
			hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
996
		else
997
			pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
998
				wdd->id);
999 1000 1001
	}

	return 0;
1002 1003 1004
}

/*
1005
 *	watchdog_cdev_unregister: unregister watchdog character device
1006 1007
 *	@watchdog: watchdog device
 *
1008 1009
 *	Unregister watchdog character device and if needed the legacy
 *	/dev/watchdog device.
1010 1011
 */

1012
static void watchdog_cdev_unregister(struct watchdog_device *wdd)
1013
{
1014
	struct watchdog_core_data *wd_data = wdd->wd_data;
1015

1016
	cdev_del(&wd_data->cdev);
1017
	if (wdd->id == 0) {
1018
		misc_deregister(&watchdog_miscdev);
1019
		old_wd_data = NULL;
1020
	}
1021 1022 1023 1024 1025 1026

	mutex_lock(&wd_data->lock);
	wd_data->wdd = NULL;
	wdd->wd_data = NULL;
	mutex_unlock(&wd_data->lock);

1027 1028 1029 1030 1031
	if (watchdog_active(wdd) &&
	    test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) {
		watchdog_stop(wdd);
	}

1032 1033
	hrtimer_cancel(&wd_data->timer);
	kthread_cancel_work_sync(&wd_data->work);
1034

1035
	kref_put(&wd_data->kref, watchdog_core_data_release);
1036
}
1037

1038 1039 1040
static struct class watchdog_class = {
	.name =		"watchdog",
	.owner =	THIS_MODULE,
1041
	.dev_groups =	wdt_groups,
1042 1043
};

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
static int watchdog_reboot_notifier(struct notifier_block *nb,
				    unsigned long code, void *data)
{
	struct watchdog_device *wdd;

	wdd = container_of(nb, struct watchdog_device, reboot_nb);
	if (code == SYS_DOWN || code == SYS_HALT) {
		if (watchdog_active(wdd)) {
			int ret;

			ret = wdd->ops->stop(wdd);
			if (ret)
				return NOTIFY_BAD;
		}
	}

	return NOTIFY_DONE;
}

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
/*
 *	watchdog_dev_register: register a watchdog device
 *	@wdd: watchdog device
 *
 *	Register a watchdog device including handling the legacy
 *	/dev/watchdog node. /dev/watchdog is actually a miscdevice and
 *	thus we set it up like that.
 */

int watchdog_dev_register(struct watchdog_device *wdd)
{
	struct device *dev;
	dev_t devno;
	int ret;

	devno = MKDEV(MAJOR(watchdog_devt), wdd->id);

	ret = watchdog_cdev_register(wdd, devno);
	if (ret)
		return ret;

1084 1085 1086
	dev = device_create_with_groups(&watchdog_class, wdd->parent,
					devno, wdd, wdd->groups,
					"watchdog%d", wdd->id);
1087 1088 1089 1090 1091
	if (IS_ERR(dev)) {
		watchdog_cdev_unregister(wdd);
		return PTR_ERR(dev);
	}

1092 1093 1094 1095
	ret = watchdog_register_pretimeout(wdd);
	if (ret) {
		device_destroy(&watchdog_class, devno);
		watchdog_cdev_unregister(wdd);
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
		return ret;
	}

	if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
		wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;

		ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
		if (ret) {
			pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
			       wdd->id, ret);
			watchdog_dev_unregister(wdd);
		}
1108 1109
	}

1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
	return ret;
}

/*
 *	watchdog_dev_unregister: unregister a watchdog device
 *	@watchdog: watchdog device
 *
 *	Unregister watchdog device and if needed the legacy
 *	/dev/watchdog device.
 */

void watchdog_dev_unregister(struct watchdog_device *wdd)
{
1123
	watchdog_unregister_pretimeout(wdd);
1124
	device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
1125
	watchdog_cdev_unregister(wdd);
1126 1127
}

1128 1129 1130 1131 1132 1133
/*
 *	watchdog_dev_init: init dev part of watchdog core
 *
 *	Allocate a range of chardev nodes to use for watchdog devices
 */

1134
int __init watchdog_dev_init(void)
1135
{
1136
	int err;
1137
	struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1,};
1138

1139 1140 1141 1142
	watchdog_kworker = kthread_create_worker(0, "watchdogd");
	if (IS_ERR(watchdog_kworker)) {
		pr_err("Failed to create watchdog kworker\n");
		return PTR_ERR(watchdog_kworker);
1143
	}
1144
	sched_setscheduler(watchdog_kworker->task, SCHED_FIFO, &param);
1145

1146 1147 1148
	err = class_register(&watchdog_class);
	if (err < 0) {
		pr_err("couldn't register class\n");
1149
		goto err_register;
1150 1151 1152 1153
	}

	err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
	if (err < 0) {
1154
		pr_err("watchdog: unable to allocate char dev region\n");
1155
		goto err_alloc;
1156 1157
	}

1158
	return 0;
1159 1160 1161 1162

err_alloc:
	class_unregister(&watchdog_class);
err_register:
1163
	kthread_destroy_worker(watchdog_kworker);
1164
	return err;
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
}

/*
 *	watchdog_dev_exit: exit dev part of watchdog core
 *
 *	Release the range of chardev nodes used for watchdog devices
 */

void __exit watchdog_dev_exit(void)
{
	unregister_chrdev_region(watchdog_devt, MAX_DOGS);
1176
	class_unregister(&watchdog_class);
1177
	kthread_destroy_worker(watchdog_kworker);
1178
}
1179 1180 1181 1182 1183

module_param(handle_boot_enabled, bool, 0444);
MODULE_PARM_DESC(handle_boot_enabled,
	"Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default="
	__MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")");
新手
引导
客服 返回
顶部