clocksource.c 19.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * linux/kernel/time/clocksource.c
 *
 * This file contains the functions which manage clocksource drivers.
 *
 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 * TODO WishList:
 *   o Allow clocksource drivers to be unregistered
 */

#include <linux/clocksource.h>
#include <linux/sysdev.h>
#include <linux/init.h>
#include <linux/module.h>
30
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31
#include <linux/tick.h>
32
#include <linux/kthread.h>
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
void timecounter_init(struct timecounter *tc,
		      const struct cyclecounter *cc,
		      u64 start_tstamp)
{
	tc->cc = cc;
	tc->cycle_last = cc->read(cc);
	tc->nsec = start_tstamp;
}
EXPORT_SYMBOL(timecounter_init);

/**
 * timecounter_read_delta - get nanoseconds since last call of this function
 * @tc:         Pointer to time counter
 *
 * When the underlying cycle counter runs over, this will be handled
 * correctly as long as it does not run over more than once between
 * calls.
 *
 * The first call to this function for a new time counter initializes
 * the time tracking and returns an undefined result.
 */
static u64 timecounter_read_delta(struct timecounter *tc)
{
	cycle_t cycle_now, cycle_delta;
	u64 ns_offset;

	/* read cycle counter: */
	cycle_now = tc->cc->read(tc->cc);

	/* calculate the delta since the last timecounter_read_delta(): */
	cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;

	/* convert to nanoseconds: */
	ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);

	/* update time stamp of timecounter_read_delta() call: */
	tc->cycle_last = cycle_now;

	return ns_offset;
}

u64 timecounter_read(struct timecounter *tc)
{
	u64 nsec;

	/* increment time by nanoseconds since last call */
	nsec = timecounter_read_delta(tc);
	nsec += tc->nsec;
	tc->nsec = nsec;

	return nsec;
}
EXPORT_SYMBOL(timecounter_read);

u64 timecounter_cyc2time(struct timecounter *tc,
			 cycle_t cycle_tstamp)
{
	u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
	u64 nsec;

	/*
	 * Instead of always treating cycle_tstamp as more recent
	 * than tc->cycle_last, detect when it is too far in the
	 * future and treat it as old time stamp instead.
	 */
	if (cycle_delta > tc->cc->mask / 2) {
		cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
		nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
	} else {
		nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
	}

	return nsec;
}
EXPORT_SYMBOL(timecounter_cyc2time);

110 111
/*[Clocksource internal variables]---------
 * curr_clocksource:
112
 *	currently selected clocksource.
113 114
 * clocksource_list:
 *	linked list with the registered clocksources
115 116
 * clocksource_mutex:
 *	protects manipulations to curr_clocksource and the clocksource_list
117 118 119
 * override_name:
 *	Name of the user-specified clocksource.
 */
120
static struct clocksource *curr_clocksource;
121
static LIST_HEAD(clocksource_list);
122
static DEFINE_MUTEX(clocksource_mutex);
123
static char override_name[32];
124
static int finished_booting;
125

126
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
127 128
static void clocksource_watchdog_work(struct work_struct *work);

129 130 131
static LIST_HEAD(watchdog_list);
static struct clocksource *watchdog;
static struct timer_list watchdog_timer;
132
static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
133 134
static DEFINE_SPINLOCK(watchdog_lock);
static cycle_t watchdog_last;
135
static int watchdog_running;
T
Thomas Gleixner 已提交
136

137
static int clocksource_watchdog_kthread(void *data);
138
static void __clocksource_change_rating(struct clocksource *cs, int rating);
139

140
/*
141
 * Interval: 0.5sec Threshold: 0.0625s
142 143
 */
#define WATCHDOG_INTERVAL (HZ >> 1)
144
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
145

146 147 148 149 150 151 152 153 154
static void clocksource_watchdog_work(struct work_struct *work)
{
	/*
	 * If kthread_run fails the next watchdog scan over the
	 * watchdog_list will find the unstable clock again.
	 */
	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
}

155
static void __clocksource_unstable(struct clocksource *cs)
156 157
{
	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
158
	cs->flags |= CLOCK_SOURCE_UNSTABLE;
159 160
	if (finished_booting)
		schedule_work(&watchdog_work);
161 162
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
static void clocksource_unstable(struct clocksource *cs, int64_t delta)
{
	printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
	       cs->name, delta);
	__clocksource_unstable(cs);
}

/**
 * clocksource_mark_unstable - mark clocksource unstable via watchdog
 * @cs:		clocksource to be marked unstable
 *
 * This function is called instead of clocksource_change_rating from
 * cpu hotplug code to avoid a deadlock between the clocksource mutex
 * and the cpu hotplug mutex. It defers the update of the clocksource
 * to the watchdog thread.
 */
void clocksource_mark_unstable(struct clocksource *cs)
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
		if (list_empty(&cs->wd_list))
			list_add(&cs->wd_list, &watchdog_list);
		__clocksource_unstable(cs);
	}
	spin_unlock_irqrestore(&watchdog_lock, flags);
}

192 193
static void clocksource_watchdog(unsigned long data)
{
194
	struct clocksource *cs;
195 196
	cycle_t csnow, wdnow;
	int64_t wd_nsec, cs_nsec;
197
	int next_cpu;
198 199

	spin_lock(&watchdog_lock);
200 201
	if (!watchdog_running)
		goto out;
202

203
	wdnow = watchdog->read(watchdog);
204 205
	wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
				     watchdog->mult, watchdog->shift);
206 207
	watchdog_last = wdnow;

208 209 210
	list_for_each_entry(cs, &watchdog_list, wd_list) {

		/* Clocksource already marked unstable? */
211
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
212 213
			if (finished_booting)
				schedule_work(&watchdog_work);
214
			continue;
215
		}
216

217
		csnow = cs->read(cs);
T
Thomas Gleixner 已提交
218

219 220 221
		/* Clocksource initialized ? */
		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
			cs->flags |= CLOCK_SOURCE_WATCHDOG;
T
Thomas Gleixner 已提交
222 223 224 225
			cs->wd_last = csnow;
			continue;
		}

226
		/* Check the deviation from the watchdog clocksource. */
227 228
		cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
					     cs->mask, cs->mult, cs->shift);
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
		cs->wd_last = csnow;
		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
			clocksource_unstable(cs, cs_nsec - wd_nsec);
			continue;
		}

		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
			/*
			 * We just marked the clocksource as highres-capable,
			 * notify the rest of the system as well so that we
			 * transition into high-res mode:
			 */
			tick_clock_notify();
245 246 247
		}
	}

248 249 250 251 252 253 254 255 256
	/*
	 * Cycle through CPUs to check if the CPUs stay synchronized
	 * to each other.
	 */
	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
	if (next_cpu >= nr_cpu_ids)
		next_cpu = cpumask_first(cpu_online_mask);
	watchdog_timer.expires += WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, next_cpu);
257
out:
258 259
	spin_unlock(&watchdog_lock);
}
260

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
static inline void clocksource_start_watchdog(void)
{
	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
		return;
	init_timer(&watchdog_timer);
	watchdog_timer.function = clocksource_watchdog;
	watchdog_last = watchdog->read(watchdog);
	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
	watchdog_running = 1;
}

static inline void clocksource_stop_watchdog(void)
{
	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
		return;
	del_timer(&watchdog_timer);
	watchdog_running = 0;
}

281 282 283 284 285 286 287 288
static inline void clocksource_reset_watchdog(void)
{
	struct clocksource *cs;

	list_for_each_entry(cs, &watchdog_list, wd_list)
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
}

T
Thomas Gleixner 已提交
289 290
static void clocksource_resume_watchdog(void)
{
291 292 293 294 295
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	clocksource_reset_watchdog();
	spin_unlock_irqrestore(&watchdog_lock, flags);
T
Thomas Gleixner 已提交
296 297
}

298
static void clocksource_enqueue_watchdog(struct clocksource *cs)
299 300 301 302 303
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
304
		/* cs is a clocksource to be watched. */
305
		list_add(&cs->wd_list, &watchdog_list);
306
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
307
	} else {
308
		/* cs is a watchdog. */
309
		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
310
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
311
		/* Pick the best watchdog. */
312 313 314
		if (!watchdog || cs->rating > watchdog->rating) {
			watchdog = cs;
			/* Reset watchdog cycles */
315
			clocksource_reset_watchdog();
316 317
		}
	}
318 319
	/* Check if the watchdog timer needs to be started. */
	clocksource_start_watchdog();
320 321
	spin_unlock_irqrestore(&watchdog_lock, flags);
}
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

static void clocksource_dequeue_watchdog(struct clocksource *cs)
{
	struct clocksource *tmp;
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
		/* cs is a watched clocksource. */
		list_del_init(&cs->wd_list);
	} else if (cs == watchdog) {
		/* Reset watchdog cycles */
		clocksource_reset_watchdog();
		/* Current watchdog is removed. Find an alternative. */
		watchdog = NULL;
		list_for_each_entry(tmp, &clocksource_list, list) {
			if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
				continue;
			if (!watchdog || tmp->rating > watchdog->rating)
				watchdog = tmp;
		}
	}
	cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
	/* Check if the watchdog timer needs to be stopped. */
	clocksource_stop_watchdog();
	spin_unlock_irqrestore(&watchdog_lock, flags);
}

350
static int clocksource_watchdog_kthread(void *data)
351 352 353
{
	struct clocksource *cs, *tmp;
	unsigned long flags;
354
	LIST_HEAD(unstable);
355

356
	mutex_lock(&clocksource_mutex);
357 358 359 360
	spin_lock_irqsave(&watchdog_lock, flags);
	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
			list_del_init(&cs->wd_list);
361
			list_add(&cs->wd_list, &unstable);
362 363 364
		}
	/* Check if the watchdog timer needs to be stopped. */
	clocksource_stop_watchdog();
365 366 367 368 369
	spin_unlock_irqrestore(&watchdog_lock, flags);

	/* Needs to be done outside of watchdog lock */
	list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
		list_del_init(&cs->wd_list);
370
		__clocksource_change_rating(cs, 0);
371
	}
372
	mutex_unlock(&clocksource_mutex);
373
	return 0;
374 375
}

376 377 378
#else /* CONFIG_CLOCKSOURCE_WATCHDOG */

static void clocksource_enqueue_watchdog(struct clocksource *cs)
379 380 381 382
{
	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
T
Thomas Gleixner 已提交
383

384
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
T
Thomas Gleixner 已提交
385
static inline void clocksource_resume_watchdog(void) { }
386
static inline int clocksource_watchdog_kthread(void *data) { return 0; }
387 388

#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
389

T
Thomas Gleixner 已提交
390 391 392 393 394
/**
 * clocksource_resume - resume the clocksource(s)
 */
void clocksource_resume(void)
{
395
	struct clocksource *cs;
T
Thomas Gleixner 已提交
396

397
	mutex_lock(&clocksource_mutex);
T
Thomas Gleixner 已提交
398

399
	list_for_each_entry(cs, &clocksource_list, list)
T
Thomas Gleixner 已提交
400 401 402 403 404
		if (cs->resume)
			cs->resume();

	clocksource_resume_watchdog();

405
	mutex_unlock(&clocksource_mutex);
T
Thomas Gleixner 已提交
406 407
}

J
Jason Wessel 已提交
408 409 410 411 412 413 414 415 416 417 418 419
/**
 * clocksource_touch_watchdog - Update watchdog
 *
 * Update the watchdog after exception contexts such as kgdb so as not
 * to incorrectly trip the watchdog.
 *
 */
void clocksource_touch_watchdog(void)
{
	clocksource_resume_watchdog();
}

420
#ifdef CONFIG_GENERIC_TIME
421 422

/**
423
 * clocksource_select - Select the best clocksource available
424
 *
425
 * Private function. Must hold clocksource_mutex when called.
426
 *
427 428
 * Select the clocksource with the best rating, or the clocksource,
 * which is selected by userspace override.
429
 */
430
static void clocksource_select(void)
431
{
432
	struct clocksource *best, *cs;
433

434
	if (!finished_booting || list_empty(&clocksource_list))
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
		return;
	/* First clocksource on the list has the best rating. */
	best = list_first_entry(&clocksource_list, struct clocksource, list);
	/* Check for the override clocksource. */
	list_for_each_entry(cs, &clocksource_list, list) {
		if (strcmp(cs->name, override_name) != 0)
			continue;
		/*
		 * Check to make sure we don't switch to a non-highres
		 * capable clocksource if the tick code is in oneshot
		 * mode (highres or nohz)
		 */
		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
		    tick_oneshot_mode_active()) {
			/* Override clocksource cannot be used. */
			printk(KERN_WARNING "Override clocksource %s is not "
			       "HRT compatible. Cannot switch while in "
			       "HRT/NOHZ mode\n", cs->name);
			override_name[0] = 0;
		} else
			/* Override clocksource can be used. */
			best = cs;
		break;
	}
459 460 461 462 463
	if (curr_clocksource != best) {
		printk(KERN_INFO "Switching to clocksource %s\n", best->name);
		curr_clocksource = best;
		timekeeping_notify(curr_clocksource);
	}
464
}
465

466 467 468 469 470 471
#else /* CONFIG_GENERIC_TIME */

static inline void clocksource_select(void) { }

#endif

472 473 474 475 476 477 478 479 480 481
/*
 * clocksource_done_booting - Called near the end of core bootup
 *
 * Hack to avoid lots of clocksource churn at boot time.
 * We use fs_initcall because we want this to start before
 * device_initcall but after subsys_initcall.
 */
static int __init clocksource_done_booting(void)
{
	finished_booting = 1;
482 483 484 485 486 487

	/*
	 * Run the watchdog first to eliminate unstable clock sources
	 */
	clocksource_watchdog_kthread(NULL);

488
	mutex_lock(&clocksource_mutex);
489
	clocksource_select();
490
	mutex_unlock(&clocksource_mutex);
491 492 493 494
	return 0;
}
fs_initcall(clocksource_done_booting);

495 496
/*
 * Enqueue the clocksource sorted by rating
497
 */
498
static void clocksource_enqueue(struct clocksource *cs)
499
{
500 501
	struct list_head *entry = &clocksource_list;
	struct clocksource *tmp;
502

503
	list_for_each_entry(tmp, &clocksource_list, list)
504
		/* Keep track of the place, where to insert */
505 506 507
		if (tmp->rating >= cs->rating)
			entry = &tmp->list;
	list_add(&cs->list, entry);
508 509 510
}

/**
511
 * clocksource_register - Used to install new clocksources
512 513 514 515
 * @t:		clocksource to be registered
 *
 * Returns -EBUSY if registration fails, zero otherwise.
 */
516
int clocksource_register(struct clocksource *cs)
517
{
518
	mutex_lock(&clocksource_mutex);
519 520
	clocksource_enqueue(cs);
	clocksource_select();
521
	clocksource_enqueue_watchdog(cs);
522
	mutex_unlock(&clocksource_mutex);
523
	return 0;
524
}
525
EXPORT_SYMBOL(clocksource_register);
526

527 528 529 530 531 532 533 534
static void __clocksource_change_rating(struct clocksource *cs, int rating)
{
	list_del(&cs->list);
	cs->rating = rating;
	clocksource_enqueue(cs);
	clocksource_select();
}

535
/**
536
 * clocksource_change_rating - Change the rating of a registered clocksource
537
 */
538
void clocksource_change_rating(struct clocksource *cs, int rating)
539
{
540
	mutex_lock(&clocksource_mutex);
541
	__clocksource_change_rating(cs, rating);
542
	mutex_unlock(&clocksource_mutex);
543
}
544
EXPORT_SYMBOL(clocksource_change_rating);
545

546 547 548 549 550
/**
 * clocksource_unregister - remove a registered clocksource
 */
void clocksource_unregister(struct clocksource *cs)
{
551
	mutex_lock(&clocksource_mutex);
552
	clocksource_dequeue_watchdog(cs);
553
	list_del(&cs->list);
554
	clocksource_select();
555
	mutex_unlock(&clocksource_mutex);
556
}
557
EXPORT_SYMBOL(clocksource_unregister);
558

559
#ifdef CONFIG_SYSFS
560 561 562 563 564 565 566 567
/**
 * sysfs_show_current_clocksources - sysfs interface for current clocksource
 * @dev:	unused
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing current clocksource.
 */
static ssize_t
568 569
sysfs_show_current_clocksources(struct sys_device *dev,
				struct sysdev_attribute *attr, char *buf)
570
{
571
	ssize_t count = 0;
572

573
	mutex_lock(&clocksource_mutex);
574
	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
575
	mutex_unlock(&clocksource_mutex);
576

577
	return count;
578 579 580 581 582 583 584 585 586 587 588 589
}

/**
 * sysfs_override_clocksource - interface for manually overriding clocksource
 * @dev:	unused
 * @buf:	name of override clocksource
 * @count:	length of buffer
 *
 * Takes input from sysfs interface for manually overriding the default
 * clocksource selction.
 */
static ssize_t sysfs_override_clocksource(struct sys_device *dev,
590
					  struct sysdev_attribute *attr,
591 592 593
					  const char *buf, size_t count)
{
	size_t ret = count;
594

595 596 597 598 599 600 601 602
	/* strings from sysfs write are not 0 terminated! */
	if (count >= sizeof(override_name))
		return -EINVAL;

	/* strip of \n: */
	if (buf[count-1] == '\n')
		count--;

603
	mutex_lock(&clocksource_mutex);
604

605 606
	if (count > 0)
		memcpy(override_name, buf, count);
607
	override_name[count] = 0;
608
	clocksource_select();
609

610
	mutex_unlock(&clocksource_mutex);
611 612 613 614 615 616 617 618 619 620 621 622

	return ret;
}

/**
 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
 * @dev:	unused
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing registered clocksources
 */
static ssize_t
623 624 625
sysfs_show_available_clocksources(struct sys_device *dev,
				  struct sysdev_attribute *attr,
				  char *buf)
626
{
627
	struct clocksource *src;
628
	ssize_t count = 0;
629

630
	mutex_lock(&clocksource_mutex);
631
	list_for_each_entry(src, &clocksource_list, list) {
632 633 634 635 636 637
		/*
		 * Don't show non-HRES clocksource if the tick code is
		 * in one shot mode (highres=on or nohz=on)
		 */
		if (!tick_oneshot_mode_active() ||
		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
638
			count += snprintf(buf + count,
639 640
				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
				  "%s ", src->name);
641
	}
642
	mutex_unlock(&clocksource_mutex);
643

644 645
	count += snprintf(buf + count,
			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
646

647
	return count;
648 649 650 651 652
}

/*
 * Sysfs setup bits:
 */
653
static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
D
Daniel Walker 已提交
654
		   sysfs_override_clocksource);
655

656
static SYSDEV_ATTR(available_clocksource, 0444,
D
Daniel Walker 已提交
657
		   sysfs_show_available_clocksources, NULL);
658 659

static struct sysdev_class clocksource_sysclass = {
660
	.name = "clocksource",
661 662 663 664 665 666 667
};

static struct sys_device device_clocksource = {
	.id	= 0,
	.cls	= &clocksource_sysclass,
};

668
static int __init init_clocksource_sysfs(void)
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
{
	int error = sysdev_class_register(&clocksource_sysclass);

	if (!error)
		error = sysdev_register(&device_clocksource);
	if (!error)
		error = sysdev_create_file(
				&device_clocksource,
				&attr_current_clocksource);
	if (!error)
		error = sysdev_create_file(
				&device_clocksource,
				&attr_available_clocksource);
	return error;
}

device_initcall(init_clocksource_sysfs);
686
#endif /* CONFIG_SYSFS */
687 688 689 690 691 692 693 694 695 696

/**
 * boot_override_clocksource - boot clock override
 * @str:	override name
 *
 * Takes a clocksource= boot argument and uses it
 * as the clocksource override name.
 */
static int __init boot_override_clocksource(char* str)
{
697
	mutex_lock(&clocksource_mutex);
698 699
	if (str)
		strlcpy(override_name, str, sizeof(override_name));
700
	mutex_unlock(&clocksource_mutex);
701 702 703 704 705 706 707 708 709 710 711 712 713 714
	return 1;
}

__setup("clocksource=", boot_override_clocksource);

/**
 * boot_override_clock - Compatibility layer for deprecated boot option
 * @str:	override name
 *
 * DEPRECATED! Takes a clock= boot argument and uses it
 * as the clocksource override name
 */
static int __init boot_override_clock(char* str)
{
715 716 717 718 719 720 721
	if (!strcmp(str, "pmtmr")) {
		printk("Warning: clock=pmtmr is deprecated. "
			"Use clocksource=acpi_pm.\n");
		return boot_override_clocksource("acpi_pm");
	}
	printk("Warning! clock= boot option is deprecated. "
		"Use clocksource=xyz\n");
722 723 724 725
	return boot_override_clocksource(str);
}

__setup("clock=", boot_override_clock);