clocksource.c 19.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/kernel/time/clocksource.c
 *
 * This file contains the functions which manage clocksource drivers.
 *
 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 * TODO WishList:
 *   o Allow clocksource drivers to be unregistered
 */

#include <linux/clocksource.h>
#include <linux/sysdev.h>
#include <linux/init.h>
#include <linux/module.h>
30
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31
#include <linux/tick.h>
32
#include <linux/kthread.h>
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
void timecounter_init(struct timecounter *tc,
		      const struct cyclecounter *cc,
		      u64 start_tstamp)
{
	tc->cc = cc;
	tc->cycle_last = cc->read(cc);
	tc->nsec = start_tstamp;
}
EXPORT_SYMBOL(timecounter_init);

/**
 * timecounter_read_delta - get nanoseconds since last call of this function
 * @tc:         Pointer to time counter
 *
 * When the underlying cycle counter runs over, this will be handled
 * correctly as long as it does not run over more than once between
 * calls.
 *
 * The first call to this function for a new time counter initializes
 * the time tracking and returns an undefined result.
 */
static u64 timecounter_read_delta(struct timecounter *tc)
{
	cycle_t cycle_now, cycle_delta;
	u64 ns_offset;

	/* read cycle counter: */
	cycle_now = tc->cc->read(tc->cc);

	/* calculate the delta since the last timecounter_read_delta(): */
	cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;

	/* convert to nanoseconds: */
	ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);

	/* update time stamp of timecounter_read_delta() call: */
	tc->cycle_last = cycle_now;

	return ns_offset;
}

u64 timecounter_read(struct timecounter *tc)
{
	u64 nsec;

	/* increment time by nanoseconds since last call */
	nsec = timecounter_read_delta(tc);
	nsec += tc->nsec;
	tc->nsec = nsec;

	return nsec;
}
EXPORT_SYMBOL(timecounter_read);

u64 timecounter_cyc2time(struct timecounter *tc,
			 cycle_t cycle_tstamp)
{
	u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
	u64 nsec;

	/*
	 * Instead of always treating cycle_tstamp as more recent
	 * than tc->cycle_last, detect when it is too far in the
	 * future and treat it as old time stamp instead.
	 */
	if (cycle_delta > tc->cc->mask / 2) {
		cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
		nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
	} else {
		nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
	}

	return nsec;
}
EXPORT_SYMBOL(timecounter_cyc2time);

110 111
/*[Clocksource internal variables]---------
 * curr_clocksource:
112
 *	currently selected clocksource.
113 114
 * clocksource_list:
 *	linked list with the registered clocksources
115 116
 * clocksource_mutex:
 *	protects manipulations to curr_clocksource and the clocksource_list
117 118 119
 * override_name:
 *	Name of the user-specified clocksource.
 */
120
static struct clocksource *curr_clocksource;
121
static LIST_HEAD(clocksource_list);
122
static DEFINE_MUTEX(clocksource_mutex);
123
static char override_name[32];
124
static int finished_booting;
125

126
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
127 128
static void clocksource_watchdog_work(struct work_struct *work);

129 130 131
static LIST_HEAD(watchdog_list);
static struct clocksource *watchdog;
static struct timer_list watchdog_timer;
132
static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
133 134
static DEFINE_SPINLOCK(watchdog_lock);
static cycle_t watchdog_last;
135
static int watchdog_running;
T
Thomas Gleixner 已提交
136

137
static int clocksource_watchdog_kthread(void *data);
138
static void __clocksource_change_rating(struct clocksource *cs, int rating);
139

140
/*
141
 * Interval: 0.5sec Threshold: 0.0625s
142 143
 */
#define WATCHDOG_INTERVAL (HZ >> 1)
144
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
145

146 147 148 149 150 151 152 153 154
static void clocksource_watchdog_work(struct work_struct *work)
{
	/*
	 * If kthread_run fails the next watchdog scan over the
	 * watchdog_list will find the unstable clock again.
	 */
	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
}

155
static void __clocksource_unstable(struct clocksource *cs)
156 157
{
	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
158
	cs->flags |= CLOCK_SOURCE_UNSTABLE;
159 160
	if (finished_booting)
		schedule_work(&watchdog_work);
161 162
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
static void clocksource_unstable(struct clocksource *cs, int64_t delta)
{
	printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
	       cs->name, delta);
	__clocksource_unstable(cs);
}

/**
 * clocksource_mark_unstable - mark clocksource unstable via watchdog
 * @cs:		clocksource to be marked unstable
 *
 * This function is called instead of clocksource_change_rating from
 * cpu hotplug code to avoid a deadlock between the clocksource mutex
 * and the cpu hotplug mutex. It defers the update of the clocksource
 * to the watchdog thread.
 */
void clocksource_mark_unstable(struct clocksource *cs)
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
		if (list_empty(&cs->wd_list))
			list_add(&cs->wd_list, &watchdog_list);
		__clocksource_unstable(cs);
	}
	spin_unlock_irqrestore(&watchdog_lock, flags);
}

192 193
static void clocksource_watchdog(unsigned long data)
{
194
	struct clocksource *cs;
195 196
	cycle_t csnow, wdnow;
	int64_t wd_nsec, cs_nsec;
197
	int next_cpu;
198 199

	spin_lock(&watchdog_lock);
200 201
	if (!watchdog_running)
		goto out;
202

203
	wdnow = watchdog->read(watchdog);
204 205
	wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
				     watchdog->mult, watchdog->shift);
206 207
	watchdog_last = wdnow;

208 209 210
	list_for_each_entry(cs, &watchdog_list, wd_list) {

		/* Clocksource already marked unstable? */
211
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
212 213
			if (finished_booting)
				schedule_work(&watchdog_work);
214
			continue;
215
		}
216

217
		csnow = cs->read(cs);
T
Thomas Gleixner 已提交
218

219 220 221
		/* Clocksource initialized ? */
		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
			cs->flags |= CLOCK_SOURCE_WATCHDOG;
T
Thomas Gleixner 已提交
222 223 224 225
			cs->wd_last = csnow;
			continue;
		}

226
		/* Check the deviation from the watchdog clocksource. */
227 228
		cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
					     cs->mask, cs->mult, cs->shift);
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
		cs->wd_last = csnow;
		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
			clocksource_unstable(cs, cs_nsec - wd_nsec);
			continue;
		}

		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
			/*
			 * We just marked the clocksource as highres-capable,
			 * notify the rest of the system as well so that we
			 * transition into high-res mode:
			 */
			tick_clock_notify();
245 246 247
		}
	}

248 249 250 251 252 253 254 255 256
	/*
	 * Cycle through CPUs to check if the CPUs stay synchronized
	 * to each other.
	 */
	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
	if (next_cpu >= nr_cpu_ids)
		next_cpu = cpumask_first(cpu_online_mask);
	watchdog_timer.expires += WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, next_cpu);
257
out:
258 259
	spin_unlock(&watchdog_lock);
}
260

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
static inline void clocksource_start_watchdog(void)
{
	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
		return;
	init_timer(&watchdog_timer);
	watchdog_timer.function = clocksource_watchdog;
	watchdog_last = watchdog->read(watchdog);
	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
	watchdog_running = 1;
}

static inline void clocksource_stop_watchdog(void)
{
	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
		return;
	del_timer(&watchdog_timer);
	watchdog_running = 0;
}

281 282 283 284 285 286 287 288
static inline void clocksource_reset_watchdog(void)
{
	struct clocksource *cs;

	list_for_each_entry(cs, &watchdog_list, wd_list)
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
}

T
Thomas Gleixner 已提交
289 290
static void clocksource_resume_watchdog(void)
{
291 292 293 294 295
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	clocksource_reset_watchdog();
	spin_unlock_irqrestore(&watchdog_lock, flags);
T
Thomas Gleixner 已提交
296 297
}

298
static void clocksource_enqueue_watchdog(struct clocksource *cs)
299 300 301 302 303
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
304
		/* cs is a clocksource to be watched. */
305
		list_add(&cs->wd_list, &watchdog_list);
306
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
307
	} else {
308
		/* cs is a watchdog. */
309
		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
310
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
311
		/* Pick the best watchdog. */
312 313 314
		if (!watchdog || cs->rating > watchdog->rating) {
			watchdog = cs;
			/* Reset watchdog cycles */
315
			clocksource_reset_watchdog();
316 317
		}
	}
318 319
	/* Check if the watchdog timer needs to be started. */
	clocksource_start_watchdog();
320 321
	spin_unlock_irqrestore(&watchdog_lock, flags);
}
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

static void clocksource_dequeue_watchdog(struct clocksource *cs)
{
	struct clocksource *tmp;
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
		/* cs is a watched clocksource. */
		list_del_init(&cs->wd_list);
	} else if (cs == watchdog) {
		/* Reset watchdog cycles */
		clocksource_reset_watchdog();
		/* Current watchdog is removed. Find an alternative. */
		watchdog = NULL;
		list_for_each_entry(tmp, &clocksource_list, list) {
			if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
				continue;
			if (!watchdog || tmp->rating > watchdog->rating)
				watchdog = tmp;
		}
	}
	cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
	/* Check if the watchdog timer needs to be stopped. */
	clocksource_stop_watchdog();
	spin_unlock_irqrestore(&watchdog_lock, flags);
}

350
static int clocksource_watchdog_kthread(void *data)
351 352 353
{
	struct clocksource *cs, *tmp;
	unsigned long flags;
354
	LIST_HEAD(unstable);
355

356
	mutex_lock(&clocksource_mutex);
357 358 359 360
	spin_lock_irqsave(&watchdog_lock, flags);
	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
			list_del_init(&cs->wd_list);
361
			list_add(&cs->wd_list, &unstable);
362 363 364
		}
	/* Check if the watchdog timer needs to be stopped. */
	clocksource_stop_watchdog();
365 366 367 368 369
	spin_unlock_irqrestore(&watchdog_lock, flags);

	/* Needs to be done outside of watchdog lock */
	list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
		list_del_init(&cs->wd_list);
370
		__clocksource_change_rating(cs, 0);
371
	}
372
	mutex_unlock(&clocksource_mutex);
373
	return 0;
374 375
}

376 377 378
#else /* CONFIG_CLOCKSOURCE_WATCHDOG */

static void clocksource_enqueue_watchdog(struct clocksource *cs)
379 380 381 382
{
	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
T
Thomas Gleixner 已提交
383

384
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
T
Thomas Gleixner 已提交
385
static inline void clocksource_resume_watchdog(void) { }
386
static inline int clocksource_watchdog_kthread(void *data) { return 0; }
387 388

#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
389

T
Thomas Gleixner 已提交
390 391 392 393 394
/**
 * clocksource_resume - resume the clocksource(s)
 */
void clocksource_resume(void)
{
395
	struct clocksource *cs;
T
Thomas Gleixner 已提交
396

397
	list_for_each_entry(cs, &clocksource_list, list)
T
Thomas Gleixner 已提交
398 399 400 401 402 403
		if (cs->resume)
			cs->resume();

	clocksource_resume_watchdog();
}

J
Jason Wessel 已提交
404 405 406 407 408 409 410 411 412 413 414 415
/**
 * clocksource_touch_watchdog - Update watchdog
 *
 * Update the watchdog after exception contexts such as kgdb so as not
 * to incorrectly trip the watchdog.
 *
 */
void clocksource_touch_watchdog(void)
{
	clocksource_resume_watchdog();
}

416
#ifdef CONFIG_GENERIC_TIME
417 418

/**
419
 * clocksource_select - Select the best clocksource available
420
 *
421
 * Private function. Must hold clocksource_mutex when called.
422
 *
423 424
 * Select the clocksource with the best rating, or the clocksource,
 * which is selected by userspace override.
425
 */
426
static void clocksource_select(void)
427
{
428
	struct clocksource *best, *cs;
429

430
	if (!finished_booting || list_empty(&clocksource_list))
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
		return;
	/* First clocksource on the list has the best rating. */
	best = list_first_entry(&clocksource_list, struct clocksource, list);
	/* Check for the override clocksource. */
	list_for_each_entry(cs, &clocksource_list, list) {
		if (strcmp(cs->name, override_name) != 0)
			continue;
		/*
		 * Check to make sure we don't switch to a non-highres
		 * capable clocksource if the tick code is in oneshot
		 * mode (highres or nohz)
		 */
		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
		    tick_oneshot_mode_active()) {
			/* Override clocksource cannot be used. */
			printk(KERN_WARNING "Override clocksource %s is not "
			       "HRT compatible. Cannot switch while in "
			       "HRT/NOHZ mode\n", cs->name);
			override_name[0] = 0;
		} else
			/* Override clocksource can be used. */
			best = cs;
		break;
	}
455 456 457 458 459
	if (curr_clocksource != best) {
		printk(KERN_INFO "Switching to clocksource %s\n", best->name);
		curr_clocksource = best;
		timekeeping_notify(curr_clocksource);
	}
460
}
461

462 463 464 465 466 467
#else /* CONFIG_GENERIC_TIME */

static inline void clocksource_select(void) { }

#endif

468 469 470 471 472 473 474 475 476 477
/*
 * clocksource_done_booting - Called near the end of core bootup
 *
 * Hack to avoid lots of clocksource churn at boot time.
 * We use fs_initcall because we want this to start before
 * device_initcall but after subsys_initcall.
 */
static int __init clocksource_done_booting(void)
{
	finished_booting = 1;
478 479 480 481 482 483

	/*
	 * Run the watchdog first to eliminate unstable clock sources
	 */
	clocksource_watchdog_kthread(NULL);

484
	mutex_lock(&clocksource_mutex);
485
	clocksource_select();
486
	mutex_unlock(&clocksource_mutex);
487 488 489 490
	return 0;
}
fs_initcall(clocksource_done_booting);

491 492
/*
 * Enqueue the clocksource sorted by rating
493
 */
494
static void clocksource_enqueue(struct clocksource *cs)
495
{
496 497
	struct list_head *entry = &clocksource_list;
	struct clocksource *tmp;
498

499
	list_for_each_entry(tmp, &clocksource_list, list)
500
		/* Keep track of the place, where to insert */
501 502 503
		if (tmp->rating >= cs->rating)
			entry = &tmp->list;
	list_add(&cs->list, entry);
504 505 506
}

/**
507
 * clocksource_register - Used to install new clocksources
508 509 510 511
 * @t:		clocksource to be registered
 *
 * Returns -EBUSY if registration fails, zero otherwise.
 */
512
int clocksource_register(struct clocksource *cs)
513
{
514
	mutex_lock(&clocksource_mutex);
515 516
	clocksource_enqueue(cs);
	clocksource_select();
517
	clocksource_enqueue_watchdog(cs);
518
	mutex_unlock(&clocksource_mutex);
519
	return 0;
520
}
521
EXPORT_SYMBOL(clocksource_register);
522

523 524 525 526 527 528 529 530
static void __clocksource_change_rating(struct clocksource *cs, int rating)
{
	list_del(&cs->list);
	cs->rating = rating;
	clocksource_enqueue(cs);
	clocksource_select();
}

531
/**
532
 * clocksource_change_rating - Change the rating of a registered clocksource
533
 */
534
void clocksource_change_rating(struct clocksource *cs, int rating)
535
{
536
	mutex_lock(&clocksource_mutex);
537
	__clocksource_change_rating(cs, rating);
538
	mutex_unlock(&clocksource_mutex);
539
}
540
EXPORT_SYMBOL(clocksource_change_rating);
541

542 543 544 545 546
/**
 * clocksource_unregister - remove a registered clocksource
 */
void clocksource_unregister(struct clocksource *cs)
{
547
	mutex_lock(&clocksource_mutex);
548
	clocksource_dequeue_watchdog(cs);
549
	list_del(&cs->list);
550
	clocksource_select();
551
	mutex_unlock(&clocksource_mutex);
552
}
553
EXPORT_SYMBOL(clocksource_unregister);
554

555
#ifdef CONFIG_SYSFS
556 557 558 559 560 561 562 563
/**
 * sysfs_show_current_clocksources - sysfs interface for current clocksource
 * @dev:	unused
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing current clocksource.
 */
static ssize_t
564 565
sysfs_show_current_clocksources(struct sys_device *dev,
				struct sysdev_attribute *attr, char *buf)
566
{
567
	ssize_t count = 0;
568

569
	mutex_lock(&clocksource_mutex);
570
	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
571
	mutex_unlock(&clocksource_mutex);
572

573
	return count;
574 575 576 577 578 579 580 581 582 583 584 585
}

/**
 * sysfs_override_clocksource - interface for manually overriding clocksource
 * @dev:	unused
 * @buf:	name of override clocksource
 * @count:	length of buffer
 *
 * Takes input from sysfs interface for manually overriding the default
 * clocksource selction.
 */
static ssize_t sysfs_override_clocksource(struct sys_device *dev,
586
					  struct sysdev_attribute *attr,
587 588 589
					  const char *buf, size_t count)
{
	size_t ret = count;
590

591 592 593 594 595 596 597 598
	/* strings from sysfs write are not 0 terminated! */
	if (count >= sizeof(override_name))
		return -EINVAL;

	/* strip of \n: */
	if (buf[count-1] == '\n')
		count--;

599
	mutex_lock(&clocksource_mutex);
600

601 602
	if (count > 0)
		memcpy(override_name, buf, count);
603
	override_name[count] = 0;
604
	clocksource_select();
605

606
	mutex_unlock(&clocksource_mutex);
607 608 609 610 611 612 613 614 615 616 617 618

	return ret;
}

/**
 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
 * @dev:	unused
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing registered clocksources
 */
static ssize_t
619 620 621
sysfs_show_available_clocksources(struct sys_device *dev,
				  struct sysdev_attribute *attr,
				  char *buf)
622
{
623
	struct clocksource *src;
624
	ssize_t count = 0;
625

626
	mutex_lock(&clocksource_mutex);
627
	list_for_each_entry(src, &clocksource_list, list) {
628 629 630 631 632 633
		/*
		 * Don't show non-HRES clocksource if the tick code is
		 * in one shot mode (highres=on or nohz=on)
		 */
		if (!tick_oneshot_mode_active() ||
		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
634
			count += snprintf(buf + count,
635 636
				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
				  "%s ", src->name);
637
	}
638
	mutex_unlock(&clocksource_mutex);
639

640 641
	count += snprintf(buf + count,
			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
642

643
	return count;
644 645 646 647 648
}

/*
 * Sysfs setup bits:
 */
649
static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
D
Daniel Walker 已提交
650
		   sysfs_override_clocksource);
651

652
static SYSDEV_ATTR(available_clocksource, 0444,
D
Daniel Walker 已提交
653
		   sysfs_show_available_clocksources, NULL);
654 655

static struct sysdev_class clocksource_sysclass = {
656
	.name = "clocksource",
657 658 659 660 661 662 663
};

static struct sys_device device_clocksource = {
	.id	= 0,
	.cls	= &clocksource_sysclass,
};

664
static int __init init_clocksource_sysfs(void)
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
{
	int error = sysdev_class_register(&clocksource_sysclass);

	if (!error)
		error = sysdev_register(&device_clocksource);
	if (!error)
		error = sysdev_create_file(
				&device_clocksource,
				&attr_current_clocksource);
	if (!error)
		error = sysdev_create_file(
				&device_clocksource,
				&attr_available_clocksource);
	return error;
}

device_initcall(init_clocksource_sysfs);
682
#endif /* CONFIG_SYSFS */
683 684 685 686 687 688 689 690 691 692

/**
 * boot_override_clocksource - boot clock override
 * @str:	override name
 *
 * Takes a clocksource= boot argument and uses it
 * as the clocksource override name.
 */
static int __init boot_override_clocksource(char* str)
{
693
	mutex_lock(&clocksource_mutex);
694 695
	if (str)
		strlcpy(override_name, str, sizeof(override_name));
696
	mutex_unlock(&clocksource_mutex);
697 698 699 700 701 702 703 704 705 706 707 708 709 710
	return 1;
}

__setup("clocksource=", boot_override_clocksource);

/**
 * boot_override_clock - Compatibility layer for deprecated boot option
 * @str:	override name
 *
 * DEPRECATED! Takes a clock= boot argument and uses it
 * as the clocksource override name
 */
static int __init boot_override_clock(char* str)
{
711 712 713 714 715 716 717
	if (!strcmp(str, "pmtmr")) {
		printk("Warning: clock=pmtmr is deprecated. "
			"Use clocksource=acpi_pm.\n");
		return boot_override_clocksource("acpi_pm");
	}
	printk("Warning! clock= boot option is deprecated. "
		"Use clocksource=xyz\n");
718 719 720 721
	return boot_override_clocksource(str);
}

__setup("clock=", boot_override_clock);