time.c 26.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Common time routines among all ppc machines.
 *
 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
 * Paul Mackerras' version and mine for PReP and Pmac.
 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
 *
 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
 * to make clock more stable (2.4.0-test5). The only thing
 * that this code assumes is that the timebases have been synchronized
 * by firmware on SMP and are never stopped (never do sleep
 * on SMP then, nap and doze are OK).
 * 
 * Speeded up do_gettimeofday by getting rid of references to
 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
 *
 * TODO (not necessarily in this file):
 * - improve precision and reproducibility of timebase frequency
20
 * measurement at boot time.
L
Linus Torvalds 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * - for astronomical applications: add a new function to get
 * non ambiguous timestamps even around leap seconds. This needs
 * a new timestamp format and a good name.
 *
 * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
 *             "A Kernel Model for Precision Timekeeping" by Dave Mills
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/errno.h>
35
#include <linux/export.h>
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/kernel_stat.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/cpu.h>
#include <linux/security.h>
49 50
#include <linux/percpu.h>
#include <linux/rtc.h>
51
#include <linux/jiffies.h>
52
#include <linux/posix-timers.h>
53
#include <linux/irq.h>
54
#include <linux/delay.h>
55
#include <linux/irq_work.h>
56
#include <asm/trace.h>
L
Linus Torvalds 已提交
57 58 59 60 61 62 63 64 65

#include <asm/io.h>
#include <asm/processor.h>
#include <asm/nvram.h>
#include <asm/cache.h>
#include <asm/machdep.h>
#include <asm/uaccess.h>
#include <asm/time.h>
#include <asm/prom.h>
66 67
#include <asm/irq.h>
#include <asm/div64.h>
P
Paul Mackerras 已提交
68
#include <asm/smp.h>
69
#include <asm/vdso_datapage.h>
70
#include <asm/firmware.h>
M
Michael Neuling 已提交
71
#include <asm/cputime.h>
L
Linus Torvalds 已提交
72

73 74
/* powerpc clocksource/clockevent code */

75
#include <linux/clockchips.h>
76
#include <linux/timekeeper_internal.h>
77

78
static cycle_t rtc_read(struct clocksource *);
79 80 81 82 83 84 85 86
static struct clocksource clocksource_rtc = {
	.name         = "rtc",
	.rating       = 400,
	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
	.mask         = CLOCKSOURCE_MASK(64),
	.read         = rtc_read,
};

87
static cycle_t timebase_read(struct clocksource *);
88 89 90 91 92 93 94 95
static struct clocksource clocksource_timebase = {
	.name         = "timebase",
	.rating       = 400,
	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
	.mask         = CLOCKSOURCE_MASK(64),
	.read         = timebase_read,
};

96 97 98 99 100 101 102
#define DECREMENTER_MAX	0x7fffffff

static int decrementer_set_next_event(unsigned long evt,
				      struct clock_event_device *dev);
static void decrementer_set_mode(enum clock_event_mode mode,
				 struct clock_event_device *dev);

103
struct clock_event_device decrementer_clockevent = {
104 105 106 107 108 109
	.name           = "decrementer",
	.rating         = 200,
	.irq            = 0,
	.set_next_event = decrementer_set_next_event,
	.set_mode       = decrementer_set_mode,
	.features       = CLOCK_EVT_FEAT_ONESHOT,
110
};
111
EXPORT_SYMBOL(decrementer_clockevent);
112

113 114
DEFINE_PER_CPU(u64, decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers);
115

L
Linus Torvalds 已提交
116 117
#define XSEC_PER_SEC (1024*1024)

118 119 120 121 122 123 124
#ifdef CONFIG_PPC64
#define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
#else
/* compute ((xsec << 12) * max) >> 32 */
#define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
#endif

L
Linus Torvalds 已提交
125 126 127 128
unsigned long tb_ticks_per_jiffy;
unsigned long tb_ticks_per_usec = 100; /* sane default */
EXPORT_SYMBOL(tb_ticks_per_usec);
unsigned long tb_ticks_per_sec;
129
EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
130

L
Linus Torvalds 已提交
131
DEFINE_SPINLOCK(rtc_lock);
132
EXPORT_SYMBOL_GPL(rtc_lock);
L
Linus Torvalds 已提交
133

134 135
static u64 tb_to_ns_scale __read_mostly;
static unsigned tb_to_ns_shift __read_mostly;
136
static u64 boot_tb __read_mostly;
L
Linus Torvalds 已提交
137 138

extern struct timezone sys_tz;
139
static long timezone_offset;
L
Linus Torvalds 已提交
140

141
unsigned long ppc_proc_freq;
142
EXPORT_SYMBOL_GPL(ppc_proc_freq);
143
unsigned long ppc_tb_freq;
144
EXPORT_SYMBOL_GPL(ppc_tb_freq);
145

146 147 148
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
 * Factors for converting from cputime_t (timebase ticks) to
149
 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
150 151 152
 * These are all stored as 0.64 fixed-point binary fractions.
 */
u64 __cputime_jiffies_factor;
153
EXPORT_SYMBOL(__cputime_jiffies_factor);
154 155
u64 __cputime_usec_factor;
EXPORT_SYMBOL(__cputime_usec_factor);
156
u64 __cputime_sec_factor;
157
EXPORT_SYMBOL(__cputime_sec_factor);
158
u64 __cputime_clockt_factor;
159
EXPORT_SYMBOL(__cputime_clockt_factor);
M
Michael Neuling 已提交
160 161
DEFINE_PER_CPU(unsigned long, cputime_last_delta);
DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
162

163 164
cputime_t cputime_one_jiffy;

165 166
void (*dtl_consumer)(struct dtl_entry *, u64);

167 168 169 170 171 172
static void calc_cputime_factors(void)
{
	struct div_result res;

	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
	__cputime_jiffies_factor = res.result_low;
173 174
	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
	__cputime_usec_factor = res.result_low;
175 176 177 178 179 180 181
	div128_by_32(1, 0, tb_ticks_per_sec, &res);
	__cputime_sec_factor = res.result_low;
	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
	__cputime_clockt_factor = res.result_low;
}

/*
182 183
 * Read the SPURR on systems that have it, otherwise the PURR,
 * or if that doesn't exist return the timebase value passed in.
184
 */
185
static u64 read_spurr(u64 tb)
186
{
187 188
	if (cpu_has_feature(CPU_FTR_SPURR))
		return mfspr(SPRN_SPURR);
189 190
	if (cpu_has_feature(CPU_FTR_PURR))
		return mfspr(SPRN_PURR);
191
	return tb;
192 193
}

194 195
#ifdef CONFIG_PPC_SPLPAR

196
/*
197 198
 * Scan the dispatch trace log and count up the stolen time.
 * Should be called with interrupts disabled.
199
 */
200
static u64 scan_dispatch_log(u64 stop_tb)
201
{
202
	u64 i = local_paca->dtl_ridx;
203 204 205 206 207 208 209
	struct dtl_entry *dtl = local_paca->dtl_curr;
	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
	struct lppaca *vpa = local_paca->lppaca_ptr;
	u64 tb_delta;
	u64 stolen = 0;
	u64 dtb;

210 211 212
	if (!dtl)
		return 0;

213 214 215
	if (i == vpa->dtl_idx)
		return 0;
	while (i < vpa->dtl_idx) {
216 217
		if (dtl_consumer)
			dtl_consumer(dtl, i);
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
		dtb = dtl->timebase;
		tb_delta = dtl->enqueue_to_dispatch_time +
			dtl->ready_to_enqueue_time;
		barrier();
		if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
			/* buffer has overflowed */
			i = vpa->dtl_idx - N_DISPATCH_LOG;
			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
			continue;
		}
		if (dtb > stop_tb)
			break;
		stolen += tb_delta;
		++i;
		++dtl;
		if (dtl == dtl_end)
			dtl = local_paca->dispatch_log;
	}
	local_paca->dtl_ridx = i;
	local_paca->dtl_curr = dtl;
	return stolen;
239 240
}

241 242 243 244 245 246 247 248
/*
 * Accumulate stolen time by scanning the dispatch trace log.
 * Called on entry from user mode.
 */
void accumulate_stolen_time(void)
{
	u64 sst, ust;

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	u8 save_soft_enabled = local_paca->soft_enabled;

	/* We are called early in the exception entry, before
	 * soft/hard_enabled are sync'ed to the expected state
	 * for the exception. We are hard disabled but the PACA
	 * needs to reflect that so various debug stuff doesn't
	 * complain
	 */
	local_paca->soft_enabled = 0;

	sst = scan_dispatch_log(local_paca->starttime_user);
	ust = scan_dispatch_log(local_paca->starttime);
	local_paca->system_time -= sst;
	local_paca->user_time -= ust;
	local_paca->stolen_time += ust + sst;

	local_paca->soft_enabled = save_soft_enabled;
266 267 268 269 270 271 272 273 274 275 276 277 278 279
}

static inline u64 calculate_stolen_time(u64 stop_tb)
{
	u64 stolen = 0;

	if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
		stolen = scan_dispatch_log(stop_tb);
		get_paca()->system_time -= stolen;
	}

	stolen += get_paca()->stolen_time;
	get_paca()->stolen_time = 0;
	return stolen;
280 281
}

282 283 284 285 286 287 288 289
#else /* CONFIG_PPC_SPLPAR */
static inline u64 calculate_stolen_time(u64 stop_tb)
{
	return 0;
}

#endif /* CONFIG_PPC_SPLPAR */

290 291 292 293
/*
 * Account time for a transition between system, hard irq
 * or soft irq state.
 */
294 295
static u64 vtime_delta(struct task_struct *tsk,
			u64 *sys_scaled, u64 *stolen)
296
{
297 298
	u64 now, nowscaled, deltascaled;
	u64 udelta, delta, user_scaled;
299

300
	now = mftb();
301
	nowscaled = read_spurr(now);
302 303
	get_paca()->system_time += now - get_paca()->starttime;
	get_paca()->starttime = now;
304 305
	deltascaled = nowscaled - get_paca()->startspurr;
	get_paca()->startspurr = nowscaled;
306

307
	*stolen = calculate_stolen_time(now);
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323

	delta = get_paca()->system_time;
	get_paca()->system_time = 0;
	udelta = get_paca()->user_time - get_paca()->utime_sspurr;
	get_paca()->utime_sspurr = get_paca()->user_time;

	/*
	 * Because we don't read the SPURR on every kernel entry/exit,
	 * deltascaled includes both user and system SPURR ticks.
	 * Apportion these ticks to system SPURR ticks and user
	 * SPURR ticks in the same ratio as the system time (delta)
	 * and user time (udelta) values obtained from the timebase
	 * over the same interval.  The system ticks get accounted here;
	 * the user ticks get saved up in paca->user_time_scaled to be
	 * used by account_process_tick.
	 */
324
	*sys_scaled = delta;
325 326 327
	user_scaled = udelta;
	if (deltascaled != delta + udelta) {
		if (udelta) {
328 329
			*sys_scaled = deltascaled * delta / (delta + udelta);
			user_scaled = deltascaled - *sys_scaled;
330
		} else {
331
			*sys_scaled = deltascaled;
332 333 334 335
		}
	}
	get_paca()->user_time_scaled += user_scaled;

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	return delta;
}

void vtime_account_system(struct task_struct *tsk)
{
	u64 delta, sys_scaled, stolen;

	delta = vtime_delta(tsk, &sys_scaled, &stolen);
	account_system_time(tsk, 0, delta, sys_scaled);
	if (stolen)
		account_steal_time(stolen);
}

void vtime_account_idle(struct task_struct *tsk)
{
	u64 delta, sys_scaled, stolen;

	delta = vtime_delta(tsk, &sys_scaled, &stolen);
	account_idle_time(delta + stolen);
355 356 357 358 359 360 361
}

/*
 * Transfer the user and system times accumulated in the paca
 * by the exception entry and exit code to the generic process
 * user and system time records.
 * Must be called with interrupts disabled.
362
 * Assumes that vtime_account() has been called recently
363 364
 * (i.e. since the last entry from usermode) so that
 * get_paca()->user_time_scaled is up to date.
365
 */
366
void account_process_tick(struct task_struct *tsk, int user_tick)
367
{
368
	cputime_t utime, utimescaled;
369 370

	utime = get_paca()->user_time;
371
	utimescaled = get_paca()->user_time_scaled;
372
	get_paca()->user_time = 0;
373 374
	get_paca()->user_time_scaled = 0;
	get_paca()->utime_sspurr = 0;
375
	account_user_time(tsk, utime, utimescaled);
376 377
}

378
void vtime_task_switch(struct task_struct *prev)
379
{
380
	vtime_account(prev);
381 382 383
	account_process_tick(prev, 0);
}

384 385 386 387
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
#define calc_cputime_factors()
#endif

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
void __delay(unsigned long loops)
{
	unsigned long start;
	int diff;

	if (__USE_RTC()) {
		start = get_rtcl();
		do {
			/* the RTCL register wraps at 1000000000 */
			diff = get_rtcl() - start;
			if (diff < 0)
				diff += 1000000000;
		} while (diff < loops);
	} else {
		start = get_tbl();
		while (get_tbl() - start < loops)
			HMT_low();
		HMT_medium();
	}
}
EXPORT_SYMBOL(__delay);

void udelay(unsigned long usecs)
{
	__delay(tb_ticks_per_usec * usecs);
}
EXPORT_SYMBOL(udelay);

L
Linus Torvalds 已提交
416 417 418 419 420 421 422 423 424 425 426 427 428
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
	unsigned long pc = instruction_pointer(regs);

	if (in_lock_functions(pc))
		return regs->link;

	return pc;
}
EXPORT_SYMBOL(profile_pc);
#endif

429
#ifdef CONFIG_IRQ_WORK
430

431 432 433 434
/*
 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
 */
#ifdef CONFIG_PPC64
435
static inline unsigned long test_irq_work_pending(void)
436
{
437 438 439 440
	unsigned long x;

	asm volatile("lbz %0,%1(13)"
		: "=r" (x)
441
		: "i" (offsetof(struct paca_struct, irq_work_pending)));
442 443 444
	return x;
}

445
static inline void set_irq_work_pending_flag(void)
446 447 448
{
	asm volatile("stb %0,%1(13)" : :
		"r" (1),
449
		"i" (offsetof(struct paca_struct, irq_work_pending)));
450 451
}

452
static inline void clear_irq_work_pending(void)
453 454 455
{
	asm volatile("stb %0,%1(13)" : :
		"r" (0),
456
		"i" (offsetof(struct paca_struct, irq_work_pending)));
457 458
}

459 460
#else /* 32-bit */

461
DEFINE_PER_CPU(u8, irq_work_pending);
462

463 464 465
#define set_irq_work_pending_flag()	__get_cpu_var(irq_work_pending) = 1
#define test_irq_work_pending()		__get_cpu_var(irq_work_pending)
#define clear_irq_work_pending()	__get_cpu_var(irq_work_pending) = 0
466

467 468
#endif /* 32 vs 64 bit */

469
void arch_irq_work_raise(void)
470 471
{
	preempt_disable();
472
	set_irq_work_pending_flag();
473 474 475 476
	set_dec(1);
	preempt_enable();
}

477
#else  /* CONFIG_IRQ_WORK */
478

479 480
#define test_irq_work_pending()	0
#define clear_irq_work_pending()
481

482
#endif /* CONFIG_IRQ_WORK */
483

L
Linus Torvalds 已提交
484 485 486 487
/*
 * timer_interrupt - gets called when the decrementer overflows,
 * with interrupts disabled.
 */
488
void timer_interrupt(struct pt_regs * regs)
L
Linus Torvalds 已提交
489
{
490
	struct pt_regs *old_regs;
491 492
	u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
	struct clock_event_device *evt = &__get_cpu_var(decrementers);
493
	u64 now;
494

495 496 497 498 499 500 501 502 503 504 505
	/* Ensure a positive value is written to the decrementer, or else
	 * some CPUs will continue to take decrementer exceptions.
	 */
	set_dec(DECREMENTER_MAX);

	/* Some implementations of hotplug will get timer interrupts while
	 * offline, just ignore these
	 */
	if (!cpu_online(smp_processor_id()))
		return;

506 507 508 509 510
	/* Conditionally hard-enable interrupts now that the DEC has been
	 * bumped to its maximum value
	 */
	may_hard_irq_enable();

511 512
	__get_cpu_var(irq_stat).timer_irqs++;

513
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
514 515 516
	if (atomic_read(&ppc_n_lost_interrupts) != 0)
		do_IRQ(regs);
#endif
L
Linus Torvalds 已提交
517

518
	old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
519 520
	irq_enter();

521 522
	trace_timer_interrupt_entry(regs);

523 524 525
	if (test_irq_work_pending()) {
		clear_irq_work_pending();
		irq_work_run();
526 527
	}

528 529 530 531 532 533 534 535 536 537
	now = get_tb_or_rtc();
	if (now >= *next_tb) {
		*next_tb = ~(u64)0;
		if (evt->event_handler)
			evt->event_handler(evt);
	} else {
		now = *next_tb - now;
		if (now <= DECREMENTER_MAX)
			set_dec((int)now);
	}
L
Linus Torvalds 已提交
538

539
#ifdef CONFIG_PPC64
540
	/* collect purr register values often, for accurate calculations */
541
	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
L
Linus Torvalds 已提交
542 543 544
		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
		cu->current_tb = mfspr(SPRN_PURR);
	}
545
#endif
L
Linus Torvalds 已提交
546

547 548
	trace_timer_interrupt_exit(regs);

L
Linus Torvalds 已提交
549
	irq_exit();
550
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
551 552
}

553 554 555 556 557 558 559 560 561
/*
 * Hypervisor decrementer interrupts shouldn't occur but are sometimes
 * left pending on exit from a KVM guest.  We don't need to do anything
 * to clear them, as they are edge-triggered.
 */
void hdec_interrupt(struct pt_regs *regs)
{
}

562
#ifdef CONFIG_SUSPEND
563
static void generic_suspend_disable_irqs(void)
564 565 566 567 568
{
	/* Disable the decrementer, so that it doesn't interfere
	 * with suspending.
	 */

569
	set_dec(DECREMENTER_MAX);
570
	local_irq_disable();
571
	set_dec(DECREMENTER_MAX);
572 573
}

574
static void generic_suspend_enable_irqs(void)
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
{
	local_irq_enable();
}

/* Overrides the weak version in kernel/power/main.c */
void arch_suspend_disable_irqs(void)
{
	if (ppc_md.suspend_disable_irqs)
		ppc_md.suspend_disable_irqs();
	generic_suspend_disable_irqs();
}

/* Overrides the weak version in kernel/power/main.c */
void arch_suspend_enable_irqs(void)
{
	generic_suspend_enable_irqs();
	if (ppc_md.suspend_enable_irqs)
		ppc_md.suspend_enable_irqs();
}
#endif

L
Linus Torvalds 已提交
596 597 598 599 600 601 602 603 604
/*
 * Scheduler clock - returns current time in nanosec units.
 *
 * Note: mulhdu(a, b) (multiply high double unsigned) returns
 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
 * are 64-bit unsigned numbers.
 */
unsigned long long sched_clock(void)
{
605 606
	if (__USE_RTC())
		return get_rtc();
607
	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
L
Linus Torvalds 已提交
608 609
}

610
static int __init get_freq(char *name, int cells, unsigned long *val)
611 612
{
	struct device_node *cpu;
613
	const unsigned int *fp;
614
	int found = 0;
615

616
	/* The cpu node should have timebase and clock frequency properties */
617 618
	cpu = of_find_node_by_type(NULL, "cpu");

619
	if (cpu) {
620
		fp = of_get_property(cpu, name, NULL);
621
		if (fp) {
622
			found = 1;
623
			*val = of_read_ulong(fp, cells);
624
		}
625 626

		of_node_put(cpu);
627
	}
628 629 630 631

	return found;
}

632 633 634 635 636 637 638 639 640 641 642 643
/* should become __cpuinit when secondary_cpu_time_init also is */
void start_cpu_decrementer(void)
{
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
	/* Clear any pending timer interrupts */
	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);

	/* Enable decrementer interrupt */
	mtspr(SPRN_TCR, TCR_DIE);
#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
}

644 645 646 647 648 649 650
void __init generic_calibrate_decr(void)
{
	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */

	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {

651 652
		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
				"(not found)\n");
653
	}
654

655 656 657 658 659 660 661
	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */

	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {

		printk(KERN_ERR "WARNING: Estimating processor frequency "
				"(not found)\n");
662 663 664
	}
}

665
int update_persistent_clock(struct timespec now)
666 667 668
{
	struct rtc_time tm;

669 670 671 672 673 674 675 676 677 678
	if (!ppc_md.set_rtc_time)
		return 0;

	to_tm(now.tv_sec + 1 + timezone_offset, &tm);
	tm.tm_year -= 1900;
	tm.tm_mon -= 1;

	return ppc_md.set_rtc_time(&tm);
}

679
static void __read_persistent_clock(struct timespec *ts)
680 681 682 683
{
	struct rtc_time tm;
	static int first = 1;

684
	ts->tv_nsec = 0;
685 686 687 688 689 690 691
	/* XXX this is a litle fragile but will work okay in the short term */
	if (first) {
		first = 0;
		if (ppc_md.time_init)
			timezone_offset = ppc_md.time_init();

		/* get_boot_time() isn't guaranteed to be safe to call late */
692 693 694 695 696 697 698 699
		if (ppc_md.get_boot_time) {
			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
			return;
		}
	}
	if (!ppc_md.get_rtc_time) {
		ts->tv_sec = 0;
		return;
700
	}
701
	ppc_md.get_rtc_time(&tm);
702

703 704
	ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
			    tm.tm_hour, tm.tm_min, tm.tm_sec);
705 706
}

707 708 709 710 711 712 713 714 715 716 717 718
void read_persistent_clock(struct timespec *ts)
{
	__read_persistent_clock(ts);

	/* Sanitize it in case real time clock is set below EPOCH */
	if (ts->tv_sec < 0) {
		ts->tv_sec = 0;
		ts->tv_nsec = 0;
	}
		
}

719
/* clocksource code */
720
static cycle_t rtc_read(struct clocksource *cs)
721 722 723 724
{
	return (cycle_t)get_rtc();
}

725
static cycle_t timebase_read(struct clocksource *cs)
726 727 728 729
{
	return (cycle_t)get_tb();
}

730
void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
731
			struct clocksource *clock, u32 mult)
732
{
J
John Stultz 已提交
733
	u64 new_tb_to_xs, new_stamp_xsec;
734
	u32 frac_sec;
735 736 737 738 739 740 741 742

	if (clock != &clocksource_timebase)
		return;

	/* Make userspace gettimeofday spin until we're done. */
	++vdso_data->tb_update_count;
	smp_mb();

743 744
	/* 19342813113834067 ~= 2^(20+64) / 1e9 */
	new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
J
John Stultz 已提交
745
	new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
J
John Stultz 已提交
746
	do_div(new_stamp_xsec, 1000000000);
J
John Stultz 已提交
747
	new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
J
John Stultz 已提交
748

749 750 751 752
	BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
	/* this is tv_nsec / 1e9 as a 0.32 fraction */
	frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;

J
John Stultz 已提交
753 754 755 756 757 758 759 760 761 762 763 764 765 766
	/*
	 * tb_update_count is used to allow the userspace gettimeofday code
	 * to assure itself that it sees a consistent view of the tb_to_xs and
	 * stamp_xsec variables.  It reads the tb_update_count, then reads
	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
	 * the two values of tb_update_count match and are even then the
	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
	 * loops back and reads them again until this criteria is met.
	 * We expect the caller to have done the first increment of
	 * vdso_data->tb_update_count already.
	 */
	vdso_data->tb_orig_stamp = clock->cycle_last;
	vdso_data->stamp_xsec = new_stamp_xsec;
	vdso_data->tb_to_xs = new_tb_to_xs;
767 768
	vdso_data->wtom_clock_sec = wtm->tv_sec;
	vdso_data->wtom_clock_nsec = wtm->tv_nsec;
J
John Stultz 已提交
769
	vdso_data->stamp_xtime = *wall_time;
770
	vdso_data->stamp_sec_fraction = frac_sec;
J
John Stultz 已提交
771 772
	smp_wmb();
	++(vdso_data->tb_update_count);
773 774 775 776 777 778 779 780 781 782 783 784 785
}

void update_vsyscall_tz(void)
{
	/* Make userspace gettimeofday spin until we're done. */
	++vdso_data->tb_update_count;
	smp_mb();
	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
	smp_mb();
	++vdso_data->tb_update_count;
}

786
static void __init clocksource_init(void)
787 788 789 790 791 792 793 794
{
	struct clocksource *clock;

	if (__USE_RTC())
		clock = &clocksource_rtc;
	else
		clock = &clocksource_timebase;

795
	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
796 797 798 799 800 801 802 803 804
		printk(KERN_ERR "clocksource: %s is already registered\n",
		       clock->name);
		return;
	}

	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
	       clock->name, clock->mult, clock->shift);
}

805 806 807
static int decrementer_set_next_event(unsigned long evt,
				      struct clock_event_device *dev)
{
808
	__get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
809 810 811 812 813 814 815 816 817 818 819 820 821
	set_dec(evt);
	return 0;
}

static void decrementer_set_mode(enum clock_event_mode mode,
				 struct clock_event_device *dev)
{
	if (mode != CLOCK_EVT_MODE_ONESHOT)
		decrementer_set_next_event(DECREMENTER_MAX, dev);
}

static void register_decrementer_clockevent(int cpu)
{
822
	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
823 824

	*dec = decrementer_clockevent;
825
	dec->cpumask = cpumask_of(cpu);
826

827 828
	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
		    dec->name, dec->mult, dec->shift, cpu);
829 830 831 832

	clockevents_register_device(dec);
}

833
static void __init init_decrementer_clockevent(void)
834 835 836
{
	int cpu = smp_processor_id();

837 838
	clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);

839 840
	decrementer_clockevent.max_delta_ns =
		clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
841 842
	decrementer_clockevent.min_delta_ns =
		clockevent_delta2ns(2, &decrementer_clockevent);
843 844 845 846 847 848

	register_decrementer_clockevent(cpu);
}

void secondary_cpu_time_init(void)
{
849 850 851 852 853
	/* Start the decrementer on CPUs that have manual control
	 * such as BookE
	 */
	start_cpu_decrementer();

854 855 856 857 858
	/* FIME: Should make unrelatred change to move snapshot_timebase
	 * call here ! */
	register_decrementer_clockevent(smp_processor_id());
}

859
/* This function is only called on the boot processor */
L
Linus Torvalds 已提交
860 861 862
void __init time_init(void)
{
	struct div_result res;
863
	u64 scale;
864 865
	unsigned shift;

866 867 868 869 870 871
	if (__USE_RTC()) {
		/* 601 processor: dec counts down by 128 every 128ns */
		ppc_tb_freq = 1000000000;
	} else {
		/* Normal PowerPC with timebase register */
		ppc_md.calibrate_decr();
872
		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
873
		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
874
		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
875 876
		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
	}
877 878

	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
879
	tb_ticks_per_sec = ppc_tb_freq;
880
	tb_ticks_per_usec = ppc_tb_freq / 1000000;
881
	calc_cputime_factors();
882
	setup_cputime_one_jiffy();
883

L
Linus Torvalds 已提交
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
	/*
	 * Compute scale factor for sched_clock.
	 * The calibrate_decr() function has set tb_ticks_per_sec,
	 * which is the timebase frequency.
	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
	 * the 128-bit result as a 64.64 fixed-point number.
	 * We then shift that number right until it is less than 1.0,
	 * giving us the scale factor and shift count to use in
	 * sched_clock().
	 */
	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
	scale = res.result_low;
	for (shift = 0; res.result_high != 0; ++shift) {
		scale = (scale >> 1) | (res.result_high << 63);
		res.result_high >>= 1;
	}
	tb_to_ns_scale = scale;
	tb_to_ns_shift = shift;
902
	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
903
	boot_tb = get_tb_or_rtc();
L
Linus Torvalds 已提交
904

905
	/* If platform provided a timezone (pmac), we correct the time */
906
	if (timezone_offset) {
907 908
		sys_tz.tz_minuteswest = -timezone_offset / 60;
		sys_tz.tz_dsttime = 0;
909
	}
910

911 912
	vdso_data->tb_update_count = 0;
	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
L
Linus Torvalds 已提交
913

914 915 916 917 918
	/* Start the decrementer on CPUs that have manual control
	 * such as BookE
	 */
	start_cpu_decrementer();

919 920
	/* Register the clocksource */
	clocksource_init();
921

922
	init_decrementer_clockevent();
L
Linus Torvalds 已提交
923 924 925 926 927 928 929
}


#define FEBRUARY	2
#define	STARTOFTIME	1970
#define SECDAY		86400L
#define SECYR		(SECDAY * 365)
930 931
#define	leapyear(year)		((year) % 4 == 0 && \
				 ((year) % 100 != 0 || (year) % 400 == 0))
L
Linus Torvalds 已提交
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
#define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
#define	days_in_month(a) 	(month_days[(a) - 1])

static int month_days[12] = {
	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};

/*
 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
 */
void GregorianDay(struct rtc_time * tm)
{
	int leapsToDate;
	int lastYear;
	int day;
	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };

949
	lastYear = tm->tm_year - 1;
L
Linus Torvalds 已提交
950 951 952 953

	/*
	 * Number of leap corrections to apply up to end of last year
	 */
954
	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
L
Linus Torvalds 已提交
955 956 957 958 959

	/*
	 * This year is a leap year if it is divisible by 4 except when it is
	 * divisible by 100 unless it is divisible by 400
	 *
960
	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
L
Linus Torvalds 已提交
961
	 */
962
	day = tm->tm_mon > 2 && leapyear(tm->tm_year);
L
Linus Torvalds 已提交
963 964 965 966

	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
		   tm->tm_mday;

967
	tm->tm_wday = day % 7;
L
Linus Torvalds 已提交
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
}

void to_tm(int tim, struct rtc_time * tm)
{
	register int    i;
	register long   hms, day;

	day = tim / SECDAY;
	hms = tim % SECDAY;

	/* Hours, minutes, seconds are easy */
	tm->tm_hour = hms / 3600;
	tm->tm_min = (hms % 3600) / 60;
	tm->tm_sec = (hms % 3600) % 60;

	/* Number of years in days */
	for (i = STARTOFTIME; day >= days_in_year(i); i++)
		day -= days_in_year(i);
	tm->tm_year = i;

	/* Number of months in days left */
	if (leapyear(tm->tm_year))
		days_in_month(FEBRUARY) = 29;
	for (i = 1; day >= days_in_month(i); i++)
		day -= days_in_month(i);
	days_in_month(FEBRUARY) = 28;
	tm->tm_mon = i;

	/* Days are what is left over (+1) from all that. */
	tm->tm_mday = day + 1;

	/*
	 * Determine the day of week
	 */
	GregorianDay(tm);
}

/*
 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
 * result.
 */
1009 1010
void div128_by_32(u64 dividend_high, u64 dividend_low,
		  unsigned divisor, struct div_result *dr)
L
Linus Torvalds 已提交
1011
{
1012 1013 1014
	unsigned long a, b, c, d;
	unsigned long w, x, y, z;
	u64 ra, rb, rc;
L
Linus Torvalds 已提交
1015 1016 1017 1018 1019 1020

	a = dividend_high >> 32;
	b = dividend_high & 0xffffffff;
	c = dividend_low >> 32;
	d = dividend_low & 0xffffffff;

1021 1022 1023 1024 1025
	w = a / divisor;
	ra = ((u64)(a - (w * divisor)) << 32) + b;

	rb = ((u64) do_div(ra, divisor) << 32) + c;
	x = ra;
L
Linus Torvalds 已提交
1026

1027 1028 1029 1030 1031
	rc = ((u64) do_div(rb, divisor) << 32) + d;
	y = rb;

	do_div(rc, divisor);
	z = rc;
L
Linus Torvalds 已提交
1032

1033 1034
	dr->result_high = ((u64)w << 32) + x;
	dr->result_low  = ((u64)y << 32) + z;
L
Linus Torvalds 已提交
1035 1036

}
1037

1038 1039 1040 1041 1042 1043 1044 1045 1046
/* We don't need to calibrate delay, we use the CPU timebase for that */
void calibrate_delay(void)
{
	/* Some generic code (such as spinlock debug) use loops_per_jiffy
	 * as the number of __delay(1) in a jiffy, so make it so
	 */
	loops_per_jiffy = tb_ticks_per_jiffy;
}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
static int __init rtc_init(void)
{
	struct platform_device *pdev;

	if (!ppc_md.get_rtc_time)
		return -ENODEV;

	pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
	if (IS_ERR(pdev))
		return PTR_ERR(pdev);

	return 0;
}

module_init(rtc_init);