clocksource.c 18.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/kernel/time/clocksource.c
 *
 * This file contains the functions which manage clocksource drivers.
 *
 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 * TODO WishList:
 *   o Allow clocksource drivers to be unregistered
 */

#include <linux/clocksource.h>
#include <linux/sysdev.h>
#include <linux/init.h>
#include <linux/module.h>
30
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31
#include <linux/tick.h>
32
#include <linux/kthread.h>
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
void timecounter_init(struct timecounter *tc,
		      const struct cyclecounter *cc,
		      u64 start_tstamp)
{
	tc->cc = cc;
	tc->cycle_last = cc->read(cc);
	tc->nsec = start_tstamp;
}
EXPORT_SYMBOL(timecounter_init);

/**
 * timecounter_read_delta - get nanoseconds since last call of this function
 * @tc:         Pointer to time counter
 *
 * When the underlying cycle counter runs over, this will be handled
 * correctly as long as it does not run over more than once between
 * calls.
 *
 * The first call to this function for a new time counter initializes
 * the time tracking and returns an undefined result.
 */
static u64 timecounter_read_delta(struct timecounter *tc)
{
	cycle_t cycle_now, cycle_delta;
	u64 ns_offset;

	/* read cycle counter: */
	cycle_now = tc->cc->read(tc->cc);

	/* calculate the delta since the last timecounter_read_delta(): */
	cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;

	/* convert to nanoseconds: */
	ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);

	/* update time stamp of timecounter_read_delta() call: */
	tc->cycle_last = cycle_now;

	return ns_offset;
}

u64 timecounter_read(struct timecounter *tc)
{
	u64 nsec;

	/* increment time by nanoseconds since last call */
	nsec = timecounter_read_delta(tc);
	nsec += tc->nsec;
	tc->nsec = nsec;

	return nsec;
}
EXPORT_SYMBOL(timecounter_read);

u64 timecounter_cyc2time(struct timecounter *tc,
			 cycle_t cycle_tstamp)
{
	u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
	u64 nsec;

	/*
	 * Instead of always treating cycle_tstamp as more recent
	 * than tc->cycle_last, detect when it is too far in the
	 * future and treat it as old time stamp instead.
	 */
	if (cycle_delta > tc->cc->mask / 2) {
		cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
		nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
	} else {
		nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
	}

	return nsec;
}
EXPORT_SYMBOL(timecounter_cyc2time);

110 111
/*[Clocksource internal variables]---------
 * curr_clocksource:
112
 *	currently selected clocksource.
113 114
 * clocksource_list:
 *	linked list with the registered clocksources
115 116
 * clocksource_mutex:
 *	protects manipulations to curr_clocksource and the clocksource_list
117 118 119
 * override_name:
 *	Name of the user-specified clocksource.
 */
120
static struct clocksource *curr_clocksource;
121
static LIST_HEAD(clocksource_list);
122
static DEFINE_MUTEX(clocksource_mutex);
123 124
static char override_name[32];

125 126 127 128
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
static LIST_HEAD(watchdog_list);
static struct clocksource *watchdog;
static struct timer_list watchdog_timer;
129
static struct work_struct watchdog_work;
130 131
static DEFINE_SPINLOCK(watchdog_lock);
static cycle_t watchdog_last;
132
static int watchdog_running;
T
Thomas Gleixner 已提交
133

134
static int clocksource_watchdog_kthread(void *data);
135
static void __clocksource_change_rating(struct clocksource *cs, int rating);
136

137
/*
138
 * Interval: 0.5sec Threshold: 0.0625s
139 140
 */
#define WATCHDOG_INTERVAL (HZ >> 1)
141
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
142

143 144 145 146 147 148 149 150 151
static void clocksource_watchdog_work(struct work_struct *work)
{
	/*
	 * If kthread_run fails the next watchdog scan over the
	 * watchdog_list will find the unstable clock again.
	 */
	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
}

152
static void clocksource_unstable(struct clocksource *cs, int64_t delta)
153 154 155 156
{
	printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
	       cs->name, delta);
	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
157 158
	cs->flags |= CLOCK_SOURCE_UNSTABLE;
	schedule_work(&watchdog_work);
159 160 161 162
}

static void clocksource_watchdog(unsigned long data)
{
163
	struct clocksource *cs;
164 165
	cycle_t csnow, wdnow;
	int64_t wd_nsec, cs_nsec;
166
	int next_cpu;
167 168

	spin_lock(&watchdog_lock);
169 170
	if (!watchdog_running)
		goto out;
171

172
	wdnow = watchdog->read(watchdog);
173 174
	wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
				     watchdog->mult, watchdog->shift);
175 176
	watchdog_last = wdnow;

177 178 179
	list_for_each_entry(cs, &watchdog_list, wd_list) {

		/* Clocksource already marked unstable? */
180 181
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
			schedule_work(&watchdog_work);
182
			continue;
183
		}
184

185
		csnow = cs->read(cs);
T
Thomas Gleixner 已提交
186

187 188 189
		/* Clocksource initialized ? */
		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
			cs->flags |= CLOCK_SOURCE_WATCHDOG;
T
Thomas Gleixner 已提交
190 191 192 193
			cs->wd_last = csnow;
			continue;
		}

194
		/* Check the deviation from the watchdog clocksource. */
195 196
		cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
					     cs->mask, cs->mult, cs->shift);
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
		cs->wd_last = csnow;
		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
			clocksource_unstable(cs, cs_nsec - wd_nsec);
			continue;
		}

		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
			/*
			 * We just marked the clocksource as highres-capable,
			 * notify the rest of the system as well so that we
			 * transition into high-res mode:
			 */
			tick_clock_notify();
213 214 215
		}
	}

216 217 218 219 220 221 222 223 224
	/*
	 * Cycle through CPUs to check if the CPUs stay synchronized
	 * to each other.
	 */
	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
	if (next_cpu >= nr_cpu_ids)
		next_cpu = cpumask_first(cpu_online_mask);
	watchdog_timer.expires += WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, next_cpu);
225
out:
226 227
	spin_unlock(&watchdog_lock);
}
228

229 230 231 232
static inline void clocksource_start_watchdog(void)
{
	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
		return;
233
	INIT_WORK(&watchdog_work, clocksource_watchdog_work);
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
	init_timer(&watchdog_timer);
	watchdog_timer.function = clocksource_watchdog;
	watchdog_last = watchdog->read(watchdog);
	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
	watchdog_running = 1;
}

static inline void clocksource_stop_watchdog(void)
{
	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
		return;
	del_timer(&watchdog_timer);
	watchdog_running = 0;
}

250 251 252 253 254 255 256 257
static inline void clocksource_reset_watchdog(void)
{
	struct clocksource *cs;

	list_for_each_entry(cs, &watchdog_list, wd_list)
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
}

T
Thomas Gleixner 已提交
258 259
static void clocksource_resume_watchdog(void)
{
260 261 262 263 264
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	clocksource_reset_watchdog();
	spin_unlock_irqrestore(&watchdog_lock, flags);
T
Thomas Gleixner 已提交
265 266
}

267
static void clocksource_enqueue_watchdog(struct clocksource *cs)
268 269 270 271 272
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
273
		/* cs is a clocksource to be watched. */
274
		list_add(&cs->wd_list, &watchdog_list);
275
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
276
	} else {
277
		/* cs is a watchdog. */
278
		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
279
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
280
		/* Pick the best watchdog. */
281 282 283
		if (!watchdog || cs->rating > watchdog->rating) {
			watchdog = cs;
			/* Reset watchdog cycles */
284
			clocksource_reset_watchdog();
285 286
		}
	}
287 288
	/* Check if the watchdog timer needs to be started. */
	clocksource_start_watchdog();
289 290
	spin_unlock_irqrestore(&watchdog_lock, flags);
}
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318

static void clocksource_dequeue_watchdog(struct clocksource *cs)
{
	struct clocksource *tmp;
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
		/* cs is a watched clocksource. */
		list_del_init(&cs->wd_list);
	} else if (cs == watchdog) {
		/* Reset watchdog cycles */
		clocksource_reset_watchdog();
		/* Current watchdog is removed. Find an alternative. */
		watchdog = NULL;
		list_for_each_entry(tmp, &clocksource_list, list) {
			if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
				continue;
			if (!watchdog || tmp->rating > watchdog->rating)
				watchdog = tmp;
		}
	}
	cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
	/* Check if the watchdog timer needs to be stopped. */
	clocksource_stop_watchdog();
	spin_unlock_irqrestore(&watchdog_lock, flags);
}

319
static int clocksource_watchdog_kthread(void *data)
320 321 322
{
	struct clocksource *cs, *tmp;
	unsigned long flags;
323
	LIST_HEAD(unstable);
324

325
	mutex_lock(&clocksource_mutex);
326 327 328 329
	spin_lock_irqsave(&watchdog_lock, flags);
	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
			list_del_init(&cs->wd_list);
330
			list_add(&cs->wd_list, &unstable);
331 332 333
		}
	/* Check if the watchdog timer needs to be stopped. */
	clocksource_stop_watchdog();
334 335 336 337 338
	spin_unlock_irqrestore(&watchdog_lock, flags);

	/* Needs to be done outside of watchdog lock */
	list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
		list_del_init(&cs->wd_list);
339
		__clocksource_change_rating(cs, 0);
340
	}
341
	mutex_unlock(&clocksource_mutex);
342
	return 0;
343 344
}

345 346 347
#else /* CONFIG_CLOCKSOURCE_WATCHDOG */

static void clocksource_enqueue_watchdog(struct clocksource *cs)
348 349 350 351
{
	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
T
Thomas Gleixner 已提交
352

353
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
T
Thomas Gleixner 已提交
354
static inline void clocksource_resume_watchdog(void) { }
355 356

#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
357

T
Thomas Gleixner 已提交
358 359 360 361 362
/**
 * clocksource_resume - resume the clocksource(s)
 */
void clocksource_resume(void)
{
363
	struct clocksource *cs;
T
Thomas Gleixner 已提交
364

365
	mutex_lock(&clocksource_mutex);
T
Thomas Gleixner 已提交
366

367
	list_for_each_entry(cs, &clocksource_list, list)
T
Thomas Gleixner 已提交
368 369 370 371 372
		if (cs->resume)
			cs->resume();

	clocksource_resume_watchdog();

373
	mutex_unlock(&clocksource_mutex);
T
Thomas Gleixner 已提交
374 375
}

J
Jason Wessel 已提交
376 377 378 379 380 381 382 383 384 385 386 387
/**
 * clocksource_touch_watchdog - Update watchdog
 *
 * Update the watchdog after exception contexts such as kgdb so as not
 * to incorrectly trip the watchdog.
 *
 */
void clocksource_touch_watchdog(void)
{
	clocksource_resume_watchdog();
}

388
#ifdef CONFIG_GENERIC_TIME
389

390
static int finished_booting;
391 392

/**
393
 * clocksource_select - Select the best clocksource available
394
 *
395
 * Private function. Must hold clocksource_mutex when called.
396
 *
397 398
 * Select the clocksource with the best rating, or the clocksource,
 * which is selected by userspace override.
399
 */
400
static void clocksource_select(void)
401
{
402
	struct clocksource *best, *cs;
403

404
	if (!finished_booting || list_empty(&clocksource_list))
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
		return;
	/* First clocksource on the list has the best rating. */
	best = list_first_entry(&clocksource_list, struct clocksource, list);
	/* Check for the override clocksource. */
	list_for_each_entry(cs, &clocksource_list, list) {
		if (strcmp(cs->name, override_name) != 0)
			continue;
		/*
		 * Check to make sure we don't switch to a non-highres
		 * capable clocksource if the tick code is in oneshot
		 * mode (highres or nohz)
		 */
		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
		    tick_oneshot_mode_active()) {
			/* Override clocksource cannot be used. */
			printk(KERN_WARNING "Override clocksource %s is not "
			       "HRT compatible. Cannot switch while in "
			       "HRT/NOHZ mode\n", cs->name);
			override_name[0] = 0;
		} else
			/* Override clocksource can be used. */
			best = cs;
		break;
	}
429 430 431 432 433
	if (curr_clocksource != best) {
		printk(KERN_INFO "Switching to clocksource %s\n", best->name);
		curr_clocksource = best;
		timekeeping_notify(curr_clocksource);
	}
434
}
435

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
/*
 * clocksource_done_booting - Called near the end of core bootup
 *
 * Hack to avoid lots of clocksource churn at boot time.
 * We use fs_initcall because we want this to start before
 * device_initcall but after subsys_initcall.
 */
static int __init clocksource_done_booting(void)
{
	finished_booting = 1;
	clocksource_select();
	return 0;
}
fs_initcall(clocksource_done_booting);

451
#else /* CONFIG_GENERIC_TIME */
452

453
static inline void clocksource_select(void) { }
454

455
#endif
456

457 458
/*
 * Enqueue the clocksource sorted by rating
459
 */
460
static void clocksource_enqueue(struct clocksource *cs)
461
{
462 463
	struct list_head *entry = &clocksource_list;
	struct clocksource *tmp;
464

465
	list_for_each_entry(tmp, &clocksource_list, list)
466
		/* Keep track of the place, where to insert */
467 468 469
		if (tmp->rating >= cs->rating)
			entry = &tmp->list;
	list_add(&cs->list, entry);
470 471 472
}

/**
473
 * clocksource_register - Used to install new clocksources
474 475 476 477
 * @t:		clocksource to be registered
 *
 * Returns -EBUSY if registration fails, zero otherwise.
 */
478
int clocksource_register(struct clocksource *cs)
479
{
480
	mutex_lock(&clocksource_mutex);
481 482
	clocksource_enqueue(cs);
	clocksource_select();
483
	clocksource_enqueue_watchdog(cs);
484
	mutex_unlock(&clocksource_mutex);
485
	return 0;
486
}
487
EXPORT_SYMBOL(clocksource_register);
488

489 490 491 492 493 494 495 496
static void __clocksource_change_rating(struct clocksource *cs, int rating)
{
	list_del(&cs->list);
	cs->rating = rating;
	clocksource_enqueue(cs);
	clocksource_select();
}

497
/**
498
 * clocksource_change_rating - Change the rating of a registered clocksource
499
 */
500
void clocksource_change_rating(struct clocksource *cs, int rating)
501
{
502
	mutex_lock(&clocksource_mutex);
503
	__clocksource_change_rating(cs, rating);
504
	mutex_unlock(&clocksource_mutex);
505
}
506
EXPORT_SYMBOL(clocksource_change_rating);
507

508 509 510 511 512
/**
 * clocksource_unregister - remove a registered clocksource
 */
void clocksource_unregister(struct clocksource *cs)
{
513
	mutex_lock(&clocksource_mutex);
514
	clocksource_dequeue_watchdog(cs);
515
	list_del(&cs->list);
516
	clocksource_select();
517
	mutex_unlock(&clocksource_mutex);
518
}
519
EXPORT_SYMBOL(clocksource_unregister);
520

521
#ifdef CONFIG_SYSFS
522 523 524 525 526 527 528 529
/**
 * sysfs_show_current_clocksources - sysfs interface for current clocksource
 * @dev:	unused
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing current clocksource.
 */
static ssize_t
530 531
sysfs_show_current_clocksources(struct sys_device *dev,
				struct sysdev_attribute *attr, char *buf)
532
{
533
	ssize_t count = 0;
534

535
	mutex_lock(&clocksource_mutex);
536
	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
537
	mutex_unlock(&clocksource_mutex);
538

539
	return count;
540 541 542 543 544 545 546 547 548 549 550 551
}

/**
 * sysfs_override_clocksource - interface for manually overriding clocksource
 * @dev:	unused
 * @buf:	name of override clocksource
 * @count:	length of buffer
 *
 * Takes input from sysfs interface for manually overriding the default
 * clocksource selction.
 */
static ssize_t sysfs_override_clocksource(struct sys_device *dev,
552
					  struct sysdev_attribute *attr,
553 554 555
					  const char *buf, size_t count)
{
	size_t ret = count;
556

557 558 559 560 561 562 563 564
	/* strings from sysfs write are not 0 terminated! */
	if (count >= sizeof(override_name))
		return -EINVAL;

	/* strip of \n: */
	if (buf[count-1] == '\n')
		count--;

565
	mutex_lock(&clocksource_mutex);
566

567 568
	if (count > 0)
		memcpy(override_name, buf, count);
569
	override_name[count] = 0;
570
	clocksource_select();
571

572
	mutex_unlock(&clocksource_mutex);
573 574 575 576 577 578 579 580 581 582 583 584

	return ret;
}

/**
 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
 * @dev:	unused
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing registered clocksources
 */
static ssize_t
585 586 587
sysfs_show_available_clocksources(struct sys_device *dev,
				  struct sysdev_attribute *attr,
				  char *buf)
588
{
589
	struct clocksource *src;
590
	ssize_t count = 0;
591

592
	mutex_lock(&clocksource_mutex);
593
	list_for_each_entry(src, &clocksource_list, list) {
594 595 596 597 598 599
		/*
		 * Don't show non-HRES clocksource if the tick code is
		 * in one shot mode (highres=on or nohz=on)
		 */
		if (!tick_oneshot_mode_active() ||
		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
600
			count += snprintf(buf + count,
601 602
				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
				  "%s ", src->name);
603
	}
604
	mutex_unlock(&clocksource_mutex);
605

606 607
	count += snprintf(buf + count,
			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
608

609
	return count;
610 611 612 613 614
}

/*
 * Sysfs setup bits:
 */
615
static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
D
Daniel Walker 已提交
616
		   sysfs_override_clocksource);
617

618
static SYSDEV_ATTR(available_clocksource, 0444,
D
Daniel Walker 已提交
619
		   sysfs_show_available_clocksources, NULL);
620 621

static struct sysdev_class clocksource_sysclass = {
622
	.name = "clocksource",
623 624 625 626 627 628 629
};

static struct sys_device device_clocksource = {
	.id	= 0,
	.cls	= &clocksource_sysclass,
};

630
static int __init init_clocksource_sysfs(void)
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
{
	int error = sysdev_class_register(&clocksource_sysclass);

	if (!error)
		error = sysdev_register(&device_clocksource);
	if (!error)
		error = sysdev_create_file(
				&device_clocksource,
				&attr_current_clocksource);
	if (!error)
		error = sysdev_create_file(
				&device_clocksource,
				&attr_available_clocksource);
	return error;
}

device_initcall(init_clocksource_sysfs);
648
#endif /* CONFIG_SYSFS */
649 650 651 652 653 654 655 656 657 658

/**
 * boot_override_clocksource - boot clock override
 * @str:	override name
 *
 * Takes a clocksource= boot argument and uses it
 * as the clocksource override name.
 */
static int __init boot_override_clocksource(char* str)
{
659
	mutex_lock(&clocksource_mutex);
660 661
	if (str)
		strlcpy(override_name, str, sizeof(override_name));
662
	mutex_unlock(&clocksource_mutex);
663 664 665 666 667 668 669 670 671 672 673 674 675 676
	return 1;
}

__setup("clocksource=", boot_override_clocksource);

/**
 * boot_override_clock - Compatibility layer for deprecated boot option
 * @str:	override name
 *
 * DEPRECATED! Takes a clock= boot argument and uses it
 * as the clocksource override name
 */
static int __init boot_override_clock(char* str)
{
677 678 679 680 681 682 683
	if (!strcmp(str, "pmtmr")) {
		printk("Warning: clock=pmtmr is deprecated. "
			"Use clocksource=acpi_pm.\n");
		return boot_override_clocksource("acpi_pm");
	}
	printk("Warning! clock= boot option is deprecated. "
		"Use clocksource=xyz\n");
684 685 686 687
	return boot_override_clocksource(str);
}

__setup("clock=", boot_override_clock);