clocksource.c 18.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/kernel/time/clocksource.c
 *
 * This file contains the functions which manage clocksource drivers.
 *
 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 * TODO WishList:
 *   o Allow clocksource drivers to be unregistered
 */

#include <linux/clocksource.h>
#include <linux/sysdev.h>
#include <linux/init.h>
#include <linux/module.h>
30
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31
#include <linux/tick.h>
32
#include <linux/kthread.h>
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
void timecounter_init(struct timecounter *tc,
		      const struct cyclecounter *cc,
		      u64 start_tstamp)
{
	tc->cc = cc;
	tc->cycle_last = cc->read(cc);
	tc->nsec = start_tstamp;
}
EXPORT_SYMBOL(timecounter_init);

/**
 * timecounter_read_delta - get nanoseconds since last call of this function
 * @tc:         Pointer to time counter
 *
 * When the underlying cycle counter runs over, this will be handled
 * correctly as long as it does not run over more than once between
 * calls.
 *
 * The first call to this function for a new time counter initializes
 * the time tracking and returns an undefined result.
 */
static u64 timecounter_read_delta(struct timecounter *tc)
{
	cycle_t cycle_now, cycle_delta;
	u64 ns_offset;

	/* read cycle counter: */
	cycle_now = tc->cc->read(tc->cc);

	/* calculate the delta since the last timecounter_read_delta(): */
	cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;

	/* convert to nanoseconds: */
	ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);

	/* update time stamp of timecounter_read_delta() call: */
	tc->cycle_last = cycle_now;

	return ns_offset;
}

u64 timecounter_read(struct timecounter *tc)
{
	u64 nsec;

	/* increment time by nanoseconds since last call */
	nsec = timecounter_read_delta(tc);
	nsec += tc->nsec;
	tc->nsec = nsec;

	return nsec;
}
EXPORT_SYMBOL(timecounter_read);

u64 timecounter_cyc2time(struct timecounter *tc,
			 cycle_t cycle_tstamp)
{
	u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
	u64 nsec;

	/*
	 * Instead of always treating cycle_tstamp as more recent
	 * than tc->cycle_last, detect when it is too far in the
	 * future and treat it as old time stamp instead.
	 */
	if (cycle_delta > tc->cc->mask / 2) {
		cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
		nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
	} else {
		nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
	}

	return nsec;
}
EXPORT_SYMBOL(timecounter_cyc2time);

110 111
/*[Clocksource internal variables]---------
 * curr_clocksource:
112
 *	currently selected clocksource.
113 114
 * clocksource_list:
 *	linked list with the registered clocksources
115 116
 * clocksource_mutex:
 *	protects manipulations to curr_clocksource and the clocksource_list
117 118 119
 * override_name:
 *	Name of the user-specified clocksource.
 */
120
static struct clocksource *curr_clocksource;
121
static LIST_HEAD(clocksource_list);
122
static DEFINE_MUTEX(clocksource_mutex);
123 124
static char override_name[32];

125
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
126 127
static void clocksource_watchdog_work(struct work_struct *work);

128 129 130
static LIST_HEAD(watchdog_list);
static struct clocksource *watchdog;
static struct timer_list watchdog_timer;
131
static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
132 133
static DEFINE_SPINLOCK(watchdog_lock);
static cycle_t watchdog_last;
134
static int watchdog_running;
T
Thomas Gleixner 已提交
135

136
static int clocksource_watchdog_kthread(void *data);
137
static void __clocksource_change_rating(struct clocksource *cs, int rating);
138

139
/*
140
 * Interval: 0.5sec Threshold: 0.0625s
141 142
 */
#define WATCHDOG_INTERVAL (HZ >> 1)
143
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
144

145 146 147 148 149 150 151 152 153
static void clocksource_watchdog_work(struct work_struct *work)
{
	/*
	 * If kthread_run fails the next watchdog scan over the
	 * watchdog_list will find the unstable clock again.
	 */
	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
}

154
static void __clocksource_unstable(struct clocksource *cs)
155 156
{
	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
157 158
	cs->flags |= CLOCK_SOURCE_UNSTABLE;
	schedule_work(&watchdog_work);
159 160
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
static void clocksource_unstable(struct clocksource *cs, int64_t delta)
{
	printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
	       cs->name, delta);
	__clocksource_unstable(cs);
}

/**
 * clocksource_mark_unstable - mark clocksource unstable via watchdog
 * @cs:		clocksource to be marked unstable
 *
 * This function is called instead of clocksource_change_rating from
 * cpu hotplug code to avoid a deadlock between the clocksource mutex
 * and the cpu hotplug mutex. It defers the update of the clocksource
 * to the watchdog thread.
 */
void clocksource_mark_unstable(struct clocksource *cs)
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
		if (list_empty(&cs->wd_list))
			list_add(&cs->wd_list, &watchdog_list);
		__clocksource_unstable(cs);
	}
	spin_unlock_irqrestore(&watchdog_lock, flags);
}

190 191
static void clocksource_watchdog(unsigned long data)
{
192
	struct clocksource *cs;
193 194
	cycle_t csnow, wdnow;
	int64_t wd_nsec, cs_nsec;
195
	int next_cpu;
196 197

	spin_lock(&watchdog_lock);
198 199
	if (!watchdog_running)
		goto out;
200

201
	wdnow = watchdog->read(watchdog);
202 203
	wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
				     watchdog->mult, watchdog->shift);
204 205
	watchdog_last = wdnow;

206 207 208
	list_for_each_entry(cs, &watchdog_list, wd_list) {

		/* Clocksource already marked unstable? */
209 210
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
			schedule_work(&watchdog_work);
211
			continue;
212
		}
213

214
		csnow = cs->read(cs);
T
Thomas Gleixner 已提交
215

216 217 218
		/* Clocksource initialized ? */
		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
			cs->flags |= CLOCK_SOURCE_WATCHDOG;
T
Thomas Gleixner 已提交
219 220 221 222
			cs->wd_last = csnow;
			continue;
		}

223
		/* Check the deviation from the watchdog clocksource. */
224 225
		cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
					     cs->mask, cs->mult, cs->shift);
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
		cs->wd_last = csnow;
		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
			clocksource_unstable(cs, cs_nsec - wd_nsec);
			continue;
		}

		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
			/*
			 * We just marked the clocksource as highres-capable,
			 * notify the rest of the system as well so that we
			 * transition into high-res mode:
			 */
			tick_clock_notify();
242 243 244
		}
	}

245 246 247 248 249 250 251 252 253
	/*
	 * Cycle through CPUs to check if the CPUs stay synchronized
	 * to each other.
	 */
	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
	if (next_cpu >= nr_cpu_ids)
		next_cpu = cpumask_first(cpu_online_mask);
	watchdog_timer.expires += WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, next_cpu);
254
out:
255 256
	spin_unlock(&watchdog_lock);
}
257

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
static inline void clocksource_start_watchdog(void)
{
	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
		return;
	init_timer(&watchdog_timer);
	watchdog_timer.function = clocksource_watchdog;
	watchdog_last = watchdog->read(watchdog);
	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
	watchdog_running = 1;
}

static inline void clocksource_stop_watchdog(void)
{
	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
		return;
	del_timer(&watchdog_timer);
	watchdog_running = 0;
}

278 279 280 281 282 283 284 285
static inline void clocksource_reset_watchdog(void)
{
	struct clocksource *cs;

	list_for_each_entry(cs, &watchdog_list, wd_list)
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
}

T
Thomas Gleixner 已提交
286 287
static void clocksource_resume_watchdog(void)
{
288 289 290 291 292
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	clocksource_reset_watchdog();
	spin_unlock_irqrestore(&watchdog_lock, flags);
T
Thomas Gleixner 已提交
293 294
}

295
static void clocksource_enqueue_watchdog(struct clocksource *cs)
296 297 298 299 300
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
301
		/* cs is a clocksource to be watched. */
302
		list_add(&cs->wd_list, &watchdog_list);
303
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
304
	} else {
305
		/* cs is a watchdog. */
306
		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
307
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
308
		/* Pick the best watchdog. */
309 310 311
		if (!watchdog || cs->rating > watchdog->rating) {
			watchdog = cs;
			/* Reset watchdog cycles */
312
			clocksource_reset_watchdog();
313 314
		}
	}
315 316
	/* Check if the watchdog timer needs to be started. */
	clocksource_start_watchdog();
317 318
	spin_unlock_irqrestore(&watchdog_lock, flags);
}
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346

static void clocksource_dequeue_watchdog(struct clocksource *cs)
{
	struct clocksource *tmp;
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
		/* cs is a watched clocksource. */
		list_del_init(&cs->wd_list);
	} else if (cs == watchdog) {
		/* Reset watchdog cycles */
		clocksource_reset_watchdog();
		/* Current watchdog is removed. Find an alternative. */
		watchdog = NULL;
		list_for_each_entry(tmp, &clocksource_list, list) {
			if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
				continue;
			if (!watchdog || tmp->rating > watchdog->rating)
				watchdog = tmp;
		}
	}
	cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
	/* Check if the watchdog timer needs to be stopped. */
	clocksource_stop_watchdog();
	spin_unlock_irqrestore(&watchdog_lock, flags);
}

347
static int clocksource_watchdog_kthread(void *data)
348 349 350
{
	struct clocksource *cs, *tmp;
	unsigned long flags;
351
	LIST_HEAD(unstable);
352

353
	mutex_lock(&clocksource_mutex);
354 355 356 357
	spin_lock_irqsave(&watchdog_lock, flags);
	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
			list_del_init(&cs->wd_list);
358
			list_add(&cs->wd_list, &unstable);
359 360 361
		}
	/* Check if the watchdog timer needs to be stopped. */
	clocksource_stop_watchdog();
362 363 364 365 366
	spin_unlock_irqrestore(&watchdog_lock, flags);

	/* Needs to be done outside of watchdog lock */
	list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
		list_del_init(&cs->wd_list);
367
		__clocksource_change_rating(cs, 0);
368
	}
369
	mutex_unlock(&clocksource_mutex);
370
	return 0;
371 372
}

373 374 375
#else /* CONFIG_CLOCKSOURCE_WATCHDOG */

static void clocksource_enqueue_watchdog(struct clocksource *cs)
376 377 378 379
{
	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
T
Thomas Gleixner 已提交
380

381
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
T
Thomas Gleixner 已提交
382
static inline void clocksource_resume_watchdog(void) { }
383 384

#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
385

T
Thomas Gleixner 已提交
386 387 388 389 390
/**
 * clocksource_resume - resume the clocksource(s)
 */
void clocksource_resume(void)
{
391
	struct clocksource *cs;
T
Thomas Gleixner 已提交
392

393
	mutex_lock(&clocksource_mutex);
T
Thomas Gleixner 已提交
394

395
	list_for_each_entry(cs, &clocksource_list, list)
T
Thomas Gleixner 已提交
396 397 398 399 400
		if (cs->resume)
			cs->resume();

	clocksource_resume_watchdog();

401
	mutex_unlock(&clocksource_mutex);
T
Thomas Gleixner 已提交
402 403
}

J
Jason Wessel 已提交
404 405 406 407 408 409 410 411 412 413 414 415
/**
 * clocksource_touch_watchdog - Update watchdog
 *
 * Update the watchdog after exception contexts such as kgdb so as not
 * to incorrectly trip the watchdog.
 *
 */
void clocksource_touch_watchdog(void)
{
	clocksource_resume_watchdog();
}

416
#ifdef CONFIG_GENERIC_TIME
417

418
static int finished_booting;
419 420

/**
421
 * clocksource_select - Select the best clocksource available
422
 *
423
 * Private function. Must hold clocksource_mutex when called.
424
 *
425 426
 * Select the clocksource with the best rating, or the clocksource,
 * which is selected by userspace override.
427
 */
428
static void clocksource_select(void)
429
{
430
	struct clocksource *best, *cs;
431

432
	if (!finished_booting || list_empty(&clocksource_list))
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
		return;
	/* First clocksource on the list has the best rating. */
	best = list_first_entry(&clocksource_list, struct clocksource, list);
	/* Check for the override clocksource. */
	list_for_each_entry(cs, &clocksource_list, list) {
		if (strcmp(cs->name, override_name) != 0)
			continue;
		/*
		 * Check to make sure we don't switch to a non-highres
		 * capable clocksource if the tick code is in oneshot
		 * mode (highres or nohz)
		 */
		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
		    tick_oneshot_mode_active()) {
			/* Override clocksource cannot be used. */
			printk(KERN_WARNING "Override clocksource %s is not "
			       "HRT compatible. Cannot switch while in "
			       "HRT/NOHZ mode\n", cs->name);
			override_name[0] = 0;
		} else
			/* Override clocksource can be used. */
			best = cs;
		break;
	}
457 458 459 460 461
	if (curr_clocksource != best) {
		printk(KERN_INFO "Switching to clocksource %s\n", best->name);
		curr_clocksource = best;
		timekeeping_notify(curr_clocksource);
	}
462
}
463

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
/*
 * clocksource_done_booting - Called near the end of core bootup
 *
 * Hack to avoid lots of clocksource churn at boot time.
 * We use fs_initcall because we want this to start before
 * device_initcall but after subsys_initcall.
 */
static int __init clocksource_done_booting(void)
{
	finished_booting = 1;
	clocksource_select();
	return 0;
}
fs_initcall(clocksource_done_booting);

479
#else /* CONFIG_GENERIC_TIME */
480

481
static inline void clocksource_select(void) { }
482

483
#endif
484

485 486
/*
 * Enqueue the clocksource sorted by rating
487
 */
488
static void clocksource_enqueue(struct clocksource *cs)
489
{
490 491
	struct list_head *entry = &clocksource_list;
	struct clocksource *tmp;
492

493
	list_for_each_entry(tmp, &clocksource_list, list)
494
		/* Keep track of the place, where to insert */
495 496 497
		if (tmp->rating >= cs->rating)
			entry = &tmp->list;
	list_add(&cs->list, entry);
498 499 500
}

/**
501
 * clocksource_register - Used to install new clocksources
502 503 504 505
 * @t:		clocksource to be registered
 *
 * Returns -EBUSY if registration fails, zero otherwise.
 */
506
int clocksource_register(struct clocksource *cs)
507
{
508
	mutex_lock(&clocksource_mutex);
509 510
	clocksource_enqueue(cs);
	clocksource_select();
511
	clocksource_enqueue_watchdog(cs);
512
	mutex_unlock(&clocksource_mutex);
513
	return 0;
514
}
515
EXPORT_SYMBOL(clocksource_register);
516

517 518 519 520 521 522 523 524
static void __clocksource_change_rating(struct clocksource *cs, int rating)
{
	list_del(&cs->list);
	cs->rating = rating;
	clocksource_enqueue(cs);
	clocksource_select();
}

525
/**
526
 * clocksource_change_rating - Change the rating of a registered clocksource
527
 */
528
void clocksource_change_rating(struct clocksource *cs, int rating)
529
{
530
	mutex_lock(&clocksource_mutex);
531
	__clocksource_change_rating(cs, rating);
532
	mutex_unlock(&clocksource_mutex);
533
}
534
EXPORT_SYMBOL(clocksource_change_rating);
535

536 537 538 539 540
/**
 * clocksource_unregister - remove a registered clocksource
 */
void clocksource_unregister(struct clocksource *cs)
{
541
	mutex_lock(&clocksource_mutex);
542
	clocksource_dequeue_watchdog(cs);
543
	list_del(&cs->list);
544
	clocksource_select();
545
	mutex_unlock(&clocksource_mutex);
546
}
547
EXPORT_SYMBOL(clocksource_unregister);
548

549
#ifdef CONFIG_SYSFS
550 551 552 553 554 555 556 557
/**
 * sysfs_show_current_clocksources - sysfs interface for current clocksource
 * @dev:	unused
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing current clocksource.
 */
static ssize_t
558 559
sysfs_show_current_clocksources(struct sys_device *dev,
				struct sysdev_attribute *attr, char *buf)
560
{
561
	ssize_t count = 0;
562

563
	mutex_lock(&clocksource_mutex);
564
	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
565
	mutex_unlock(&clocksource_mutex);
566

567
	return count;
568 569 570 571 572 573 574 575 576 577 578 579
}

/**
 * sysfs_override_clocksource - interface for manually overriding clocksource
 * @dev:	unused
 * @buf:	name of override clocksource
 * @count:	length of buffer
 *
 * Takes input from sysfs interface for manually overriding the default
 * clocksource selction.
 */
static ssize_t sysfs_override_clocksource(struct sys_device *dev,
580
					  struct sysdev_attribute *attr,
581 582 583
					  const char *buf, size_t count)
{
	size_t ret = count;
584

585 586 587 588 589 590 591 592
	/* strings from sysfs write are not 0 terminated! */
	if (count >= sizeof(override_name))
		return -EINVAL;

	/* strip of \n: */
	if (buf[count-1] == '\n')
		count--;

593
	mutex_lock(&clocksource_mutex);
594

595 596
	if (count > 0)
		memcpy(override_name, buf, count);
597
	override_name[count] = 0;
598
	clocksource_select();
599

600
	mutex_unlock(&clocksource_mutex);
601 602 603 604 605 606 607 608 609 610 611 612

	return ret;
}

/**
 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
 * @dev:	unused
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing registered clocksources
 */
static ssize_t
613 614 615
sysfs_show_available_clocksources(struct sys_device *dev,
				  struct sysdev_attribute *attr,
				  char *buf)
616
{
617
	struct clocksource *src;
618
	ssize_t count = 0;
619

620
	mutex_lock(&clocksource_mutex);
621
	list_for_each_entry(src, &clocksource_list, list) {
622 623 624 625 626 627
		/*
		 * Don't show non-HRES clocksource if the tick code is
		 * in one shot mode (highres=on or nohz=on)
		 */
		if (!tick_oneshot_mode_active() ||
		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
628
			count += snprintf(buf + count,
629 630
				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
				  "%s ", src->name);
631
	}
632
	mutex_unlock(&clocksource_mutex);
633

634 635
	count += snprintf(buf + count,
			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
636

637
	return count;
638 639 640 641 642
}

/*
 * Sysfs setup bits:
 */
643
static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
D
Daniel Walker 已提交
644
		   sysfs_override_clocksource);
645

646
static SYSDEV_ATTR(available_clocksource, 0444,
D
Daniel Walker 已提交
647
		   sysfs_show_available_clocksources, NULL);
648 649

static struct sysdev_class clocksource_sysclass = {
650
	.name = "clocksource",
651 652 653 654 655 656 657
};

static struct sys_device device_clocksource = {
	.id	= 0,
	.cls	= &clocksource_sysclass,
};

658
static int __init init_clocksource_sysfs(void)
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
{
	int error = sysdev_class_register(&clocksource_sysclass);

	if (!error)
		error = sysdev_register(&device_clocksource);
	if (!error)
		error = sysdev_create_file(
				&device_clocksource,
				&attr_current_clocksource);
	if (!error)
		error = sysdev_create_file(
				&device_clocksource,
				&attr_available_clocksource);
	return error;
}

device_initcall(init_clocksource_sysfs);
676
#endif /* CONFIG_SYSFS */
677 678 679 680 681 682 683 684 685 686

/**
 * boot_override_clocksource - boot clock override
 * @str:	override name
 *
 * Takes a clocksource= boot argument and uses it
 * as the clocksource override name.
 */
static int __init boot_override_clocksource(char* str)
{
687
	mutex_lock(&clocksource_mutex);
688 689
	if (str)
		strlcpy(override_name, str, sizeof(override_name));
690
	mutex_unlock(&clocksource_mutex);
691 692 693 694 695 696 697 698 699 700 701 702 703 704
	return 1;
}

__setup("clocksource=", boot_override_clocksource);

/**
 * boot_override_clock - Compatibility layer for deprecated boot option
 * @str:	override name
 *
 * DEPRECATED! Takes a clock= boot argument and uses it
 * as the clocksource override name
 */
static int __init boot_override_clock(char* str)
{
705 706 707 708 709 710 711
	if (!strcmp(str, "pmtmr")) {
		printk("Warning: clock=pmtmr is deprecated. "
			"Use clocksource=acpi_pm.\n");
		return boot_override_clocksource("acpi_pm");
	}
	printk("Warning! clock= boot option is deprecated. "
		"Use clocksource=xyz\n");
712 713 714 715
	return boot_override_clocksource(str);
}

__setup("clock=", boot_override_clock);