clocksource.c 29.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * linux/kernel/time/clocksource.c
 *
 * This file contains the functions which manage clocksource drivers.
 *
 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 * TODO WishList:
 *   o Allow clocksource drivers to be unregistered
 */

26
#include <linux/device.h>
27 28 29
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/module.h>
30
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31
#include <linux/tick.h>
32
#include <linux/kthread.h>
33

34 35
#include "tick-internal.h"

36 37 38 39 40 41 42 43
void timecounter_init(struct timecounter *tc,
		      const struct cyclecounter *cc,
		      u64 start_tstamp)
{
	tc->cc = cc;
	tc->cycle_last = cc->read(cc);
	tc->nsec = start_tstamp;
}
44
EXPORT_SYMBOL_GPL(timecounter_init);
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87

/**
 * timecounter_read_delta - get nanoseconds since last call of this function
 * @tc:         Pointer to time counter
 *
 * When the underlying cycle counter runs over, this will be handled
 * correctly as long as it does not run over more than once between
 * calls.
 *
 * The first call to this function for a new time counter initializes
 * the time tracking and returns an undefined result.
 */
static u64 timecounter_read_delta(struct timecounter *tc)
{
	cycle_t cycle_now, cycle_delta;
	u64 ns_offset;

	/* read cycle counter: */
	cycle_now = tc->cc->read(tc->cc);

	/* calculate the delta since the last timecounter_read_delta(): */
	cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;

	/* convert to nanoseconds: */
	ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);

	/* update time stamp of timecounter_read_delta() call: */
	tc->cycle_last = cycle_now;

	return ns_offset;
}

u64 timecounter_read(struct timecounter *tc)
{
	u64 nsec;

	/* increment time by nanoseconds since last call */
	nsec = timecounter_read_delta(tc);
	nsec += tc->nsec;
	tc->nsec = nsec;

	return nsec;
}
88
EXPORT_SYMBOL_GPL(timecounter_read);
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109

u64 timecounter_cyc2time(struct timecounter *tc,
			 cycle_t cycle_tstamp)
{
	u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
	u64 nsec;

	/*
	 * Instead of always treating cycle_tstamp as more recent
	 * than tc->cycle_last, detect when it is too far in the
	 * future and treat it as old time stamp instead.
	 */
	if (cycle_delta > tc->cc->mask / 2) {
		cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
		nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
	} else {
		nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
	}

	return nsec;
}
110
EXPORT_SYMBOL_GPL(timecounter_cyc2time);
111

112 113 114 115 116 117
/**
 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
 * @mult:	pointer to mult variable
 * @shift:	pointer to shift variable
 * @from:	frequency to convert from
 * @to:		frequency to convert to
118
 * @maxsec:	guaranteed runtime conversion range in seconds
119 120 121 122 123 124 125 126
 *
 * The function evaluates the shift/mult pair for the scaled math
 * operations of clocksources and clockevents.
 *
 * @to and @from are frequency values in HZ. For clock sources @to is
 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
 * event @to is the counter frequency and @from is NSEC_PER_SEC.
 *
127
 * The @maxsec conversion range argument controls the time frame in
128 129 130 131 132 133 134 135
 * seconds which must be covered by the runtime conversion with the
 * calculated mult and shift factors. This guarantees that no 64bit
 * overflow happens when the input value of the conversion is
 * multiplied with the calculated mult factor. Larger ranges may
 * reduce the conversion accuracy by chosing smaller mult and shift
 * factors.
 */
void
136
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
137 138 139 140 141 142 143 144
{
	u64 tmp;
	u32 sft, sftacc= 32;

	/*
	 * Calculate the shift factor which is limiting the conversion
	 * range:
	 */
145
	tmp = ((u64)maxsec * from) >> 32;
146 147 148 149 150 151 152 153 154 155 156
	while (tmp) {
		tmp >>=1;
		sftacc--;
	}

	/*
	 * Find the conversion shift/mult pair which has the best
	 * accuracy and fits the maxsec conversion range:
	 */
	for (sft = 32; sft > 0; sft--) {
		tmp = (u64) to << sft;
157
		tmp += from / 2;
158 159 160 161 162 163 164 165
		do_div(tmp, from);
		if ((tmp >> sftacc) == 0)
			break;
	}
	*mult = tmp;
	*shift = sft;
}

166 167
/*[Clocksource internal variables]---------
 * curr_clocksource:
168
 *	currently selected clocksource.
169 170
 * clocksource_list:
 *	linked list with the registered clocksources
171 172
 * clocksource_mutex:
 *	protects manipulations to curr_clocksource and the clocksource_list
173 174 175
 * override_name:
 *	Name of the user-specified clocksource.
 */
176
static struct clocksource *curr_clocksource;
177
static LIST_HEAD(clocksource_list);
178
static DEFINE_MUTEX(clocksource_mutex);
179
static char override_name[CS_NAME_LEN];
180
static int finished_booting;
181

182
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
183
static void clocksource_watchdog_work(struct work_struct *work);
184
static void clocksource_select(void);
185

186 187 188
static LIST_HEAD(watchdog_list);
static struct clocksource *watchdog;
static struct timer_list watchdog_timer;
189
static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
190
static DEFINE_SPINLOCK(watchdog_lock);
191
static int watchdog_running;
192
static atomic_t watchdog_reset_pending;
T
Thomas Gleixner 已提交
193

194
static int clocksource_watchdog_kthread(void *data);
195
static void __clocksource_change_rating(struct clocksource *cs, int rating);
196

197
/*
198
 * Interval: 0.5sec Threshold: 0.0625s
199 200
 */
#define WATCHDOG_INTERVAL (HZ >> 1)
201
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
202

203 204 205 206 207 208 209 210 211
static void clocksource_watchdog_work(struct work_struct *work)
{
	/*
	 * If kthread_run fails the next watchdog scan over the
	 * watchdog_list will find the unstable clock again.
	 */
	kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
}

212
static void __clocksource_unstable(struct clocksource *cs)
213 214
{
	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
215
	cs->flags |= CLOCK_SOURCE_UNSTABLE;
216 217
	if (finished_booting)
		schedule_work(&watchdog_work);
218 219
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
static void clocksource_unstable(struct clocksource *cs, int64_t delta)
{
	printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
	       cs->name, delta);
	__clocksource_unstable(cs);
}

/**
 * clocksource_mark_unstable - mark clocksource unstable via watchdog
 * @cs:		clocksource to be marked unstable
 *
 * This function is called instead of clocksource_change_rating from
 * cpu hotplug code to avoid a deadlock between the clocksource mutex
 * and the cpu hotplug mutex. It defers the update of the clocksource
 * to the watchdog thread.
 */
void clocksource_mark_unstable(struct clocksource *cs)
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
		if (list_empty(&cs->wd_list))
			list_add(&cs->wd_list, &watchdog_list);
		__clocksource_unstable(cs);
	}
	spin_unlock_irqrestore(&watchdog_lock, flags);
}

249 250
static void clocksource_watchdog(unsigned long data)
{
251
	struct clocksource *cs;
252 253
	cycle_t csnow, wdnow;
	int64_t wd_nsec, cs_nsec;
254
	int next_cpu, reset_pending;
255 256

	spin_lock(&watchdog_lock);
257 258
	if (!watchdog_running)
		goto out;
259

260 261
	reset_pending = atomic_read(&watchdog_reset_pending);

262 263 264
	list_for_each_entry(cs, &watchdog_list, wd_list) {

		/* Clocksource already marked unstable? */
265
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
266 267
			if (finished_booting)
				schedule_work(&watchdog_work);
268
			continue;
269
		}
270

271
		local_irq_disable();
272
		csnow = cs->read(cs);
273 274
		wdnow = watchdog->read(watchdog);
		local_irq_enable();
T
Thomas Gleixner 已提交
275

276
		/* Clocksource initialized ? */
277 278
		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
		    atomic_read(&watchdog_reset_pending)) {
279
			cs->flags |= CLOCK_SOURCE_WATCHDOG;
280 281
			cs->wd_last = wdnow;
			cs->cs_last = csnow;
T
Thomas Gleixner 已提交
282 283 284
			continue;
		}

285 286 287 288
		wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
					     watchdog->mult, watchdog->shift);

		cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
289
					     cs->mask, cs->mult, cs->shift);
290 291 292
		cs->cs_last = csnow;
		cs->wd_last = wdnow;

293 294 295
		if (atomic_read(&watchdog_reset_pending))
			continue;

296
		/* Check the deviation from the watchdog clocksource. */
297
		if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
298 299 300 301 302 303 304
			clocksource_unstable(cs, cs_nsec - wd_nsec);
			continue;
		}

		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
305
			/* Mark it valid for high-res. */
306
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
307 308 309 310 311 312 313 314

			/*
			 * clocksource_done_booting() will sort it if
			 * finished_booting is not set yet.
			 */
			if (!finished_booting)
				continue;

315
			/*
316 317 318 319 320 321
			 * If this is not the current clocksource let
			 * the watchdog thread reselect it. Due to the
			 * change to high res this clocksource might
			 * be preferred now. If it is the current
			 * clocksource let the tick code know about
			 * that change.
322
			 */
323 324 325 326 327 328
			if (cs != curr_clocksource) {
				cs->flags |= CLOCK_SOURCE_RESELECT;
				schedule_work(&watchdog_work);
			} else {
				tick_clock_notify();
			}
329 330 331
		}
	}

332 333 334 335 336 337 338
	/*
	 * We only clear the watchdog_reset_pending, when we did a
	 * full cycle through all clocksources.
	 */
	if (reset_pending)
		atomic_dec(&watchdog_reset_pending);

339 340 341 342 343 344 345 346 347
	/*
	 * Cycle through CPUs to check if the CPUs stay synchronized
	 * to each other.
	 */
	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
	if (next_cpu >= nr_cpu_ids)
		next_cpu = cpumask_first(cpu_online_mask);
	watchdog_timer.expires += WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, next_cpu);
348
out:
349 350
	spin_unlock(&watchdog_lock);
}
351

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
static inline void clocksource_start_watchdog(void)
{
	if (watchdog_running || !watchdog || list_empty(&watchdog_list))
		return;
	init_timer(&watchdog_timer);
	watchdog_timer.function = clocksource_watchdog;
	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
	watchdog_running = 1;
}

static inline void clocksource_stop_watchdog(void)
{
	if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
		return;
	del_timer(&watchdog_timer);
	watchdog_running = 0;
}

371 372 373 374 375 376 377 378
static inline void clocksource_reset_watchdog(void)
{
	struct clocksource *cs;

	list_for_each_entry(cs, &watchdog_list, wd_list)
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
}

T
Thomas Gleixner 已提交
379 380
static void clocksource_resume_watchdog(void)
{
381
	atomic_inc(&watchdog_reset_pending);
T
Thomas Gleixner 已提交
382 383
}

384
static void clocksource_enqueue_watchdog(struct clocksource *cs)
385 386 387 388 389
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
390
		/* cs is a clocksource to be watched. */
391
		list_add(&cs->wd_list, &watchdog_list);
392
		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
393
	} else {
394
		/* cs is a watchdog. */
395
		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
396
			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
397
		/* Pick the best watchdog. */
398 399 400
		if (!watchdog || cs->rating > watchdog->rating) {
			watchdog = cs;
			/* Reset watchdog cycles */
401
			clocksource_reset_watchdog();
402 403
		}
	}
404 405
	/* Check if the watchdog timer needs to be started. */
	clocksource_start_watchdog();
406 407
	spin_unlock_irqrestore(&watchdog_lock, flags);
}
408 409 410 411 412 413

static void clocksource_dequeue_watchdog(struct clocksource *cs)
{
	unsigned long flags;

	spin_lock_irqsave(&watchdog_lock, flags);
414 415 416 417 418 419
	if (cs != watchdog) {
		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
			/* cs is a watched clocksource. */
			list_del_init(&cs->wd_list);
			/* Check if the watchdog timer needs to be stopped. */
			clocksource_stop_watchdog();
420 421 422 423 424
		}
	}
	spin_unlock_irqrestore(&watchdog_lock, flags);
}

425
static int __clocksource_watchdog_kthread(void)
426 427 428
{
	struct clocksource *cs, *tmp;
	unsigned long flags;
429
	LIST_HEAD(unstable);
430
	int select = 0;
431 432

	spin_lock_irqsave(&watchdog_lock, flags);
433
	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
434 435
		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
			list_del_init(&cs->wd_list);
436
			list_add(&cs->wd_list, &unstable);
437 438 439 440 441
			select = 1;
		}
		if (cs->flags & CLOCK_SOURCE_RESELECT) {
			cs->flags &= ~CLOCK_SOURCE_RESELECT;
			select = 1;
442
		}
443
	}
444 445
	/* Check if the watchdog timer needs to be stopped. */
	clocksource_stop_watchdog();
446 447 448 449 450
	spin_unlock_irqrestore(&watchdog_lock, flags);

	/* Needs to be done outside of watchdog lock */
	list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
		list_del_init(&cs->wd_list);
451
		__clocksource_change_rating(cs, 0);
452
	}
453 454 455 456 457 458 459 460
	return select;
}

static int clocksource_watchdog_kthread(void *data)
{
	mutex_lock(&clocksource_mutex);
	if (__clocksource_watchdog_kthread())
		clocksource_select();
461
	mutex_unlock(&clocksource_mutex);
462
	return 0;
463 464
}

465 466 467 468 469
static bool clocksource_is_watchdog(struct clocksource *cs)
{
	return cs == watchdog;
}

470 471 472
#else /* CONFIG_CLOCKSOURCE_WATCHDOG */

static void clocksource_enqueue_watchdog(struct clocksource *cs)
473 474 475 476
{
	if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
		cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
}
T
Thomas Gleixner 已提交
477

478
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
T
Thomas Gleixner 已提交
479
static inline void clocksource_resume_watchdog(void) { }
480
static inline int __clocksource_watchdog_kthread(void) { return 0; }
481
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
482
void clocksource_mark_unstable(struct clocksource *cs) { }
483 484

#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
485

M
Magnus Damm 已提交
486 487 488 489 490 491 492 493 494 495 496 497
/**
 * clocksource_suspend - suspend the clocksource(s)
 */
void clocksource_suspend(void)
{
	struct clocksource *cs;

	list_for_each_entry_reverse(cs, &clocksource_list, list)
		if (cs->suspend)
			cs->suspend(cs);
}

T
Thomas Gleixner 已提交
498 499 500 501 502
/**
 * clocksource_resume - resume the clocksource(s)
 */
void clocksource_resume(void)
{
503
	struct clocksource *cs;
T
Thomas Gleixner 已提交
504

505
	list_for_each_entry(cs, &clocksource_list, list)
T
Thomas Gleixner 已提交
506
		if (cs->resume)
507
			cs->resume(cs);
T
Thomas Gleixner 已提交
508 509 510 511

	clocksource_resume_watchdog();
}

J
Jason Wessel 已提交
512 513 514 515
/**
 * clocksource_touch_watchdog - Update watchdog
 *
 * Update the watchdog after exception contexts such as kgdb so as not
516 517
 * to incorrectly trip the watchdog. This might fail when the kernel
 * was stopped in code which holds watchdog_lock.
J
Jason Wessel 已提交
518 519 520 521 522 523
 */
void clocksource_touch_watchdog(void)
{
	clocksource_resume_watchdog();
}

524 525 526 527 528 529 530 531 532
/**
 * clocksource_max_adjustment- Returns max adjustment amount
 * @cs:         Pointer to clocksource
 *
 */
static u32 clocksource_max_adjustment(struct clocksource *cs)
{
	u64 ret;
	/*
533
	 * We won't try to correct for more than 11% adjustments (110,000 ppm),
534 535 536 537 538 539
	 */
	ret = (u64)cs->mult * 11;
	do_div(ret,100);
	return (u32)ret;
}

540
/**
541 542 543 544 545
 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
 * @mult:	cycle to nanosecond multiplier
 * @shift:	cycle to nanosecond divisor (power of two)
 * @maxadj:	maximum adjustment value to mult (~11%)
 * @mask:	bitmask for two's complement subtraction of non 64 bit counters
546
 */
547
u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
548 549 550 551 552 553
{
	u64 max_nsecs, max_cycles;

	/*
	 * Calculate the maximum number of cycles that we can pass to the
	 * cyc2ns function without overflowing a 64-bit signed result. The
554
	 * maximum number of cycles is equal to ULLONG_MAX/(mult+maxadj)
555
	 * which is equivalent to the below.
556 557 558 559 560
	 * max_cycles < (2^63)/(mult + maxadj)
	 * max_cycles < 2^(log2((2^63)/(mult + maxadj)))
	 * max_cycles < 2^(log2(2^63) - log2(mult + maxadj))
	 * max_cycles < 2^(63 - log2(mult + maxadj))
	 * max_cycles < 1 << (63 - log2(mult + maxadj))
561 562 563 564
	 * Please note that we add 1 to the result of the log2 to account for
	 * any rounding errors, ensure the above inequality is satisfied and
	 * no overflow will occur.
	 */
565
	max_cycles = 1ULL << (63 - (ilog2(mult + maxadj) + 1));
566 567 568

	/*
	 * The actual maximum number of cycles we can defer the clocksource is
569
	 * determined by the minimum of max_cycles and mask.
570 571
	 * Note: Here we subtract the maxadj to make sure we don't sleep for
	 * too long if there's a large negative adjustment.
572
	 */
573 574 575 576 577 578 579 580 581 582 583 584 585 586
	max_cycles = min(max_cycles, mask);
	max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);

	return max_nsecs;
}

/**
 * clocksource_max_deferment - Returns max time the clocksource can be deferred
 * @cs:         Pointer to clocksource
 *
 */
static u64 clocksource_max_deferment(struct clocksource *cs)
{
	u64 max_nsecs;
587

588 589
	max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj,
					  cs->mask);
590 591 592 593 594 595
	/*
	 * To ensure that the clocksource does not wrap whilst we are idle,
	 * limit the time the clocksource can be deferred by 12.5%. Please
	 * note a margin of 12.5% is used because this can be computed with
	 * a shift, versus say 10% which would require division.
	 */
596
	return max_nsecs - (max_nsecs >> 3);
597 598
}

J
John Stultz 已提交
599
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
600

601
static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
602 603 604 605 606 607 608 609 610 611 612 613
{
	struct clocksource *cs;

	if (!finished_booting || list_empty(&clocksource_list))
		return NULL;

	/*
	 * We pick the clocksource with the highest rating. If oneshot
	 * mode is active, we pick the highres valid clocksource with
	 * the best rating.
	 */
	list_for_each_entry(cs, &clocksource_list, list) {
614 615
		if (skipcur && cs == curr_clocksource)
			continue;
616 617 618 619 620 621 622
		if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
			continue;
		return cs;
	}
	return NULL;
}

623
static void __clocksource_select(bool skipcur)
624
{
625
	bool oneshot = tick_oneshot_mode_active();
626
	struct clocksource *best, *cs;
627

628
	/* Find the best suitable clocksource */
629
	best = clocksource_find_best(oneshot, skipcur);
630
	if (!best)
631
		return;
632

633 634
	/* Check for the override clocksource. */
	list_for_each_entry(cs, &clocksource_list, list) {
635 636
		if (skipcur && cs == curr_clocksource)
			continue;
637 638 639 640 641 642 643
		if (strcmp(cs->name, override_name) != 0)
			continue;
		/*
		 * Check to make sure we don't switch to a non-highres
		 * capable clocksource if the tick code is in oneshot
		 * mode (highres or nohz)
		 */
644
		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
645 646 647 648 649 650 651 652 653 654
			/* Override clocksource cannot be used. */
			printk(KERN_WARNING "Override clocksource %s is not "
			       "HRT compatible. Cannot switch while in "
			       "HRT/NOHZ mode\n", cs->name);
			override_name[0] = 0;
		} else
			/* Override clocksource can be used. */
			best = cs;
		break;
	}
655 656 657

	if (curr_clocksource != best && !timekeeping_notify(best)) {
		pr_info("Switched to clocksource %s\n", best->name);
658 659
		curr_clocksource = best;
	}
660
}
661

662 663 664 665 666 667 668 669 670 671 672 673 674
/**
 * clocksource_select - Select the best clocksource available
 *
 * Private function. Must hold clocksource_mutex when called.
 *
 * Select the clocksource with the best rating, or the clocksource,
 * which is selected by userspace override.
 */
static void clocksource_select(void)
{
	return __clocksource_select(false);
}

675 676 677 678 679
static void clocksource_select_fallback(void)
{
	return __clocksource_select(true);
}

J
John Stultz 已提交
680
#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
681 682

static inline void clocksource_select(void) { }
683
static inline void clocksource_select_fallback(void) { }
684 685 686

#endif

687 688 689 690 691 692 693 694 695
/*
 * clocksource_done_booting - Called near the end of core bootup
 *
 * Hack to avoid lots of clocksource churn at boot time.
 * We use fs_initcall because we want this to start before
 * device_initcall but after subsys_initcall.
 */
static int __init clocksource_done_booting(void)
{
696 697
	mutex_lock(&clocksource_mutex);
	curr_clocksource = clocksource_default_clock();
698
	finished_booting = 1;
699 700 701
	/*
	 * Run the watchdog first to eliminate unstable clock sources
	 */
702
	__clocksource_watchdog_kthread();
703
	clocksource_select();
704
	mutex_unlock(&clocksource_mutex);
705 706 707 708
	return 0;
}
fs_initcall(clocksource_done_booting);

709 710
/*
 * Enqueue the clocksource sorted by rating
711
 */
712
static void clocksource_enqueue(struct clocksource *cs)
713
{
714 715
	struct list_head *entry = &clocksource_list;
	struct clocksource *tmp;
716

717
	list_for_each_entry(tmp, &clocksource_list, list)
718
		/* Keep track of the place, where to insert */
719 720 721
		if (tmp->rating >= cs->rating)
			entry = &tmp->list;
	list_add(&cs->list, entry);
722 723
}

724
/**
725
 * __clocksource_updatefreq_scale - Used update clocksource with new freq
726
 * @cs:		clocksource to be registered
727 728 729
 * @scale:	Scale factor multiplied against freq to get clocksource hz
 * @freq:	clocksource frequency (cycles per second) divided by scale
 *
730
 * This should only be called from the clocksource->enable() method.
731 732
 *
 * This *SHOULD NOT* be called directly! Please use the
733
 * clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions.
734
 */
735
void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
736
{
737
	u64 sec;
738
	/*
739 740 741 742 743 744 745 746
	 * Calc the maximum number of seconds which we can run before
	 * wrapping around. For clocksources which have a mask > 32bit
	 * we need to limit the max sleep time to have a good
	 * conversion precision. 10 minutes is still a reasonable
	 * amount. That results in a shift value of 24 for a
	 * clocksource with mask >= 40bit and f >= 4GHz. That maps to
	 * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
	 * margin as we do in clocksource_max_deferment()
747
	 */
748
	sec = (cs->mask - (cs->mask >> 3));
749 750 751 752 753 754 755
	do_div(sec, freq);
	do_div(sec, scale);
	if (!sec)
		sec = 1;
	else if (sec > 600 && cs->mask > UINT_MAX)
		sec = 600;

756
	clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
757
			       NSEC_PER_SEC / scale, sec * scale);
758 759 760 761 762 763 764 765 766 767 768 769 770 771

	/*
	 * for clocksources that have large mults, to avoid overflow.
	 * Since mult may be adjusted by ntp, add an safety extra margin
	 *
	 */
	cs->maxadj = clocksource_max_adjustment(cs);
	while ((cs->mult + cs->maxadj < cs->mult)
		|| (cs->mult - cs->maxadj > cs->mult)) {
		cs->mult >>= 1;
		cs->shift--;
		cs->maxadj = clocksource_max_adjustment(cs);
	}

772
	cs->max_idle_ns = clocksource_max_deferment(cs);
773 774 775 776 777
}
EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);

/**
 * __clocksource_register_scale - Used to install new clocksources
778
 * @cs:		clocksource to be registered
779 780 781 782 783 784 785 786 787 788 789
 * @scale:	Scale factor multiplied against freq to get clocksource hz
 * @freq:	clocksource frequency (cycles per second) divided by scale
 *
 * Returns -EBUSY if registration fails, zero otherwise.
 *
 * This *SHOULD NOT* be called directly! Please use the
 * clocksource_register_hz() or clocksource_register_khz helper functions.
 */
int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
{

790
	/* Initialize mult/shift and max_idle_ns */
791
	__clocksource_updatefreq_scale(cs, scale, freq);
792

793
	/* Add clocksource to the clcoksource list */
794 795 796
	mutex_lock(&clocksource_mutex);
	clocksource_enqueue(cs);
	clocksource_enqueue_watchdog(cs);
797
	clocksource_select();
798 799 800 801 802 803
	mutex_unlock(&clocksource_mutex);
	return 0;
}
EXPORT_SYMBOL_GPL(__clocksource_register_scale);


804
/**
805
 * clocksource_register - Used to install new clocksources
806
 * @cs:		clocksource to be registered
807 808 809
 *
 * Returns -EBUSY if registration fails, zero otherwise.
 */
810
int clocksource_register(struct clocksource *cs)
811
{
812 813 814 815 816 817
	/* calculate max adjustment for given mult/shift */
	cs->maxadj = clocksource_max_adjustment(cs);
	WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
		"Clocksource %s might overflow on 11%% adjustment\n",
		cs->name);

818 819 820
	/* calculate max idle time permitted for this clocksource */
	cs->max_idle_ns = clocksource_max_deferment(cs);

821
	mutex_lock(&clocksource_mutex);
822
	clocksource_enqueue(cs);
823
	clocksource_enqueue_watchdog(cs);
824
	clocksource_select();
825
	mutex_unlock(&clocksource_mutex);
826
	return 0;
827
}
828
EXPORT_SYMBOL(clocksource_register);
829

830 831 832 833 834 835 836
static void __clocksource_change_rating(struct clocksource *cs, int rating)
{
	list_del(&cs->list);
	cs->rating = rating;
	clocksource_enqueue(cs);
}

837
/**
838
 * clocksource_change_rating - Change the rating of a registered clocksource
839 840
 * @cs:		clocksource to be changed
 * @rating:	new rating
841
 */
842
void clocksource_change_rating(struct clocksource *cs, int rating)
843
{
844
	mutex_lock(&clocksource_mutex);
845
	__clocksource_change_rating(cs, rating);
846
	clocksource_select();
847
	mutex_unlock(&clocksource_mutex);
848
}
849
EXPORT_SYMBOL(clocksource_change_rating);
850

851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
/*
 * Unbind clocksource @cs. Called with clocksource_mutex held
 */
static int clocksource_unbind(struct clocksource *cs)
{
	/*
	 * I really can't convince myself to support this on hardware
	 * designed by lobotomized monkeys.
	 */
	if (clocksource_is_watchdog(cs))
		return -EBUSY;

	if (cs == curr_clocksource) {
		/* Select and try to install a replacement clock source */
		clocksource_select_fallback();
		if (curr_clocksource == cs)
			return -EBUSY;
	}
	clocksource_dequeue_watchdog(cs);
	list_del_init(&cs->list);
	return 0;
}

874 875
/**
 * clocksource_unregister - remove a registered clocksource
876
 * @cs:	clocksource to be unregistered
877
 */
878
int clocksource_unregister(struct clocksource *cs)
879
{
880 881
	int ret = 0;

882
	mutex_lock(&clocksource_mutex);
883 884
	if (!list_empty(&cs->list))
		ret = clocksource_unbind(cs);
885
	mutex_unlock(&clocksource_mutex);
886
	return ret;
887
}
888
EXPORT_SYMBOL(clocksource_unregister);
889

890
#ifdef CONFIG_SYSFS
891 892 893
/**
 * sysfs_show_current_clocksources - sysfs interface for current clocksource
 * @dev:	unused
894
 * @attr:	unused
895 896 897 898 899
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing current clocksource.
 */
static ssize_t
900 901
sysfs_show_current_clocksources(struct device *dev,
				struct device_attribute *attr, char *buf)
902
{
903
	ssize_t count = 0;
904

905
	mutex_lock(&clocksource_mutex);
906
	count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
907
	mutex_unlock(&clocksource_mutex);
908

909
	return count;
910 911
}

912
ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
{
	size_t ret = cnt;

	/* strings from sysfs write are not 0 terminated! */
	if (!cnt || cnt >= CS_NAME_LEN)
		return -EINVAL;

	/* strip of \n: */
	if (buf[cnt-1] == '\n')
		cnt--;
	if (cnt > 0)
		memcpy(dst, buf, cnt);
	dst[cnt] = 0;
	return ret;
}

929 930 931
/**
 * sysfs_override_clocksource - interface for manually overriding clocksource
 * @dev:	unused
932
 * @attr:	unused
933 934 935 936
 * @buf:	name of override clocksource
 * @count:	length of buffer
 *
 * Takes input from sysfs interface for manually overriding the default
937
 * clocksource selection.
938
 */
939 940
static ssize_t sysfs_override_clocksource(struct device *dev,
					  struct device_attribute *attr,
941 942
					  const char *buf, size_t count)
{
943
	ssize_t ret;
944

945
	mutex_lock(&clocksource_mutex);
946

947
	ret = sysfs_get_uname(buf, override_name, count);
948 949
	if (ret >= 0)
		clocksource_select();
950

951
	mutex_unlock(&clocksource_mutex);
952 953 954 955

	return ret;
}

956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
/**
 * sysfs_unbind_current_clocksource - interface for manually unbinding clocksource
 * @dev:	unused
 * @attr:	unused
 * @buf:	unused
 * @count:	length of buffer
 *
 * Takes input from sysfs interface for manually unbinding a clocksource.
 */
static ssize_t sysfs_unbind_clocksource(struct device *dev,
					struct device_attribute *attr,
					const char *buf, size_t count)
{
	struct clocksource *cs;
	char name[CS_NAME_LEN];
971
	ssize_t ret;
972

973
	ret = sysfs_get_uname(buf, name, count);
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
	if (ret < 0)
		return ret;

	ret = -ENODEV;
	mutex_lock(&clocksource_mutex);
	list_for_each_entry(cs, &clocksource_list, list) {
		if (strcmp(cs->name, name))
			continue;
		ret = clocksource_unbind(cs);
		break;
	}
	mutex_unlock(&clocksource_mutex);

	return ret ? ret : count;
}

990 991 992
/**
 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
 * @dev:	unused
993
 * @attr:	unused
994 995 996 997 998
 * @buf:	char buffer to be filled with clocksource list
 *
 * Provides sysfs interface for listing registered clocksources
 */
static ssize_t
999 1000
sysfs_show_available_clocksources(struct device *dev,
				  struct device_attribute *attr,
1001
				  char *buf)
1002
{
1003
	struct clocksource *src;
1004
	ssize_t count = 0;
1005

1006
	mutex_lock(&clocksource_mutex);
1007
	list_for_each_entry(src, &clocksource_list, list) {
1008 1009 1010 1011 1012 1013
		/*
		 * Don't show non-HRES clocksource if the tick code is
		 * in one shot mode (highres=on or nohz=on)
		 */
		if (!tick_oneshot_mode_active() ||
		    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1014
			count += snprintf(buf + count,
1015 1016
				  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
				  "%s ", src->name);
1017
	}
1018
	mutex_unlock(&clocksource_mutex);
1019

1020 1021
	count += snprintf(buf + count,
			  max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1022

1023
	return count;
1024 1025 1026 1027 1028
}

/*
 * Sysfs setup bits:
 */
1029
static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
D
Daniel Walker 已提交
1030
		   sysfs_override_clocksource);
1031

1032 1033
static DEVICE_ATTR(unbind_clocksource, 0200, NULL, sysfs_unbind_clocksource);

1034
static DEVICE_ATTR(available_clocksource, 0444,
D
Daniel Walker 已提交
1035
		   sysfs_show_available_clocksources, NULL);
1036

1037
static struct bus_type clocksource_subsys = {
1038
	.name = "clocksource",
1039
	.dev_name = "clocksource",
1040 1041
};

1042
static struct device device_clocksource = {
1043
	.id	= 0,
1044
	.bus	= &clocksource_subsys,
1045 1046
};

1047
static int __init init_clocksource_sysfs(void)
1048
{
1049
	int error = subsys_system_register(&clocksource_subsys, NULL);
1050 1051

	if (!error)
1052
		error = device_register(&device_clocksource);
1053
	if (!error)
1054
		error = device_create_file(
1055
				&device_clocksource,
1056
				&dev_attr_current_clocksource);
1057 1058 1059
	if (!error)
		error = device_create_file(&device_clocksource,
					   &dev_attr_unbind_clocksource);
1060
	if (!error)
1061
		error = device_create_file(
1062
				&device_clocksource,
1063
				&dev_attr_available_clocksource);
1064 1065 1066 1067
	return error;
}

device_initcall(init_clocksource_sysfs);
1068
#endif /* CONFIG_SYSFS */
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078

/**
 * boot_override_clocksource - boot clock override
 * @str:	override name
 *
 * Takes a clocksource= boot argument and uses it
 * as the clocksource override name.
 */
static int __init boot_override_clocksource(char* str)
{
1079
	mutex_lock(&clocksource_mutex);
1080 1081
	if (str)
		strlcpy(override_name, str, sizeof(override_name));
1082
	mutex_unlock(&clocksource_mutex);
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
	return 1;
}

__setup("clocksource=", boot_override_clocksource);

/**
 * boot_override_clock - Compatibility layer for deprecated boot option
 * @str:	override name
 *
 * DEPRECATED! Takes a clock= boot argument and uses it
 * as the clocksource override name
 */
static int __init boot_override_clock(char* str)
{
1097 1098 1099 1100 1101 1102 1103
	if (!strcmp(str, "pmtmr")) {
		printk("Warning: clock=pmtmr is deprecated. "
			"Use clocksource=acpi_pm.\n");
		return boot_override_clocksource("acpi_pm");
	}
	printk("Warning! clock= boot option is deprecated. "
		"Use clocksource=xyz\n");
1104 1105 1106 1107
	return boot_override_clocksource(str);
}

__setup("clock=", boot_override_clock);