time.c 32.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
/*
 * Common time routines among all ppc machines.
 *
 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
 * Paul Mackerras' version and mine for PReP and Pmac.
 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
 *
 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
 * to make clock more stable (2.4.0-test5). The only thing
 * that this code assumes is that the timebases have been synchronized
 * by firmware on SMP and are never stopped (never do sleep
 * on SMP then, nap and doze are OK).
 * 
 * Speeded up do_gettimeofday by getting rid of references to
 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
 *
 * TODO (not necessarily in this file):
 * - improve precision and reproducibility of timebase frequency
 * measurement at boot time. (for iSeries, we calibrate the timebase
 * against the Titan chip's clock.)
 * - for astronomical applications: add a new function to get
 * non ambiguous timestamps even around leap seconds. This needs
 * a new timestamp format and a good name.
 *
 * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
 *             "A Kernel Model for Precision Timekeeping" by Dave Mills
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/kernel_stat.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/cpu.h>
#include <linux/security.h>
50 51
#include <linux/percpu.h>
#include <linux/rtc.h>
52
#include <linux/jiffies.h>
53
#include <linux/posix-timers.h>
54
#include <linux/irq.h>
L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62 63

#include <asm/io.h>
#include <asm/processor.h>
#include <asm/nvram.h>
#include <asm/cache.h>
#include <asm/machdep.h>
#include <asm/uaccess.h>
#include <asm/time.h>
#include <asm/prom.h>
64 65
#include <asm/irq.h>
#include <asm/div64.h>
P
Paul Mackerras 已提交
66
#include <asm/smp.h>
67
#include <asm/vdso_datapage.h>
68
#ifdef CONFIG_PPC64
69
#include <asm/firmware.h>
70 71
#endif
#ifdef CONFIG_PPC_ISERIES
72
#include <asm/iseries/it_lp_queue.h>
73
#include <asm/iseries/hv_call_xm.h>
74
#endif
L
Linus Torvalds 已提交
75 76 77 78

/* keep track of when we need to update the rtc */
time_t last_rtc_update;
#ifdef CONFIG_PPC_ISERIES
79 80
static unsigned long __initdata iSeries_recal_titan;
static signed long __initdata iSeries_recal_tb;
L
Linus Torvalds 已提交
81 82
#endif

83 84 85
/* The decrementer counts down by 128 every 128ns on a 601. */
#define DECREMENTER_COUNT_601	(1000000000 / HZ)

L
Linus Torvalds 已提交
86 87
#define XSEC_PER_SEC (1024*1024)

88 89 90 91 92 93 94
#ifdef CONFIG_PPC64
#define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
#else
/* compute ((xsec << 12) * max) >> 32 */
#define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
#endif

L
Linus Torvalds 已提交
95 96 97 98
unsigned long tb_ticks_per_jiffy;
unsigned long tb_ticks_per_usec = 100; /* sane default */
EXPORT_SYMBOL(tb_ticks_per_usec);
unsigned long tb_ticks_per_sec;
99
EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
100 101
u64 tb_to_xs;
unsigned tb_to_us;
102

103
#define TICKLEN_SCALE	TICK_LENGTH_SHIFT
104 105 106 107 108 109 110
u64 last_tick_len;	/* units are ns / 2^TICKLEN_SCALE */
u64 ticklen_to_xs;	/* 0.64 fraction */

/* If last_tick_len corresponds to about 1/HZ seconds, then
   last_tick_len << TICKLEN_SHIFT will be about 2^63. */
#define TICKLEN_SHIFT	(63 - 30 - TICKLEN_SCALE + SHIFT_HZ)

L
Linus Torvalds 已提交
111
DEFINE_SPINLOCK(rtc_lock);
112
EXPORT_SYMBOL_GPL(rtc_lock);
L
Linus Torvalds 已提交
113

114 115 116
static u64 tb_to_ns_scale __read_mostly;
static unsigned tb_to_ns_shift __read_mostly;
static unsigned long boot_tb __read_mostly;
L
Linus Torvalds 已提交
117 118 119 120

struct gettimeofday_struct do_gtod;

extern struct timezone sys_tz;
121
static long timezone_offset;
L
Linus Torvalds 已提交
122

123
unsigned long ppc_proc_freq;
124
EXPORT_SYMBOL(ppc_proc_freq);
125 126
unsigned long ppc_tb_freq;

127 128
static u64 tb_last_jiffy __cacheline_aligned_in_smp;
static DEFINE_PER_CPU(u64, last_jiffy);
129

130 131 132 133 134 135 136
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
 * Factors for converting from cputime_t (timebase ticks) to
 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
 * These are all stored as 0.64 fixed-point binary fractions.
 */
u64 __cputime_jiffies_factor;
137
EXPORT_SYMBOL(__cputime_jiffies_factor);
138
u64 __cputime_msec_factor;
139
EXPORT_SYMBOL(__cputime_msec_factor);
140
u64 __cputime_sec_factor;
141
EXPORT_SYMBOL(__cputime_sec_factor);
142
u64 __cputime_clockt_factor;
143
EXPORT_SYMBOL(__cputime_clockt_factor);
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225

static void calc_cputime_factors(void)
{
	struct div_result res;

	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
	__cputime_jiffies_factor = res.result_low;
	div128_by_32(1000, 0, tb_ticks_per_sec, &res);
	__cputime_msec_factor = res.result_low;
	div128_by_32(1, 0, tb_ticks_per_sec, &res);
	__cputime_sec_factor = res.result_low;
	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
	__cputime_clockt_factor = res.result_low;
}

/*
 * Read the PURR on systems that have it, otherwise the timebase.
 */
static u64 read_purr(void)
{
	if (cpu_has_feature(CPU_FTR_PURR))
		return mfspr(SPRN_PURR);
	return mftb();
}

/*
 * Account time for a transition between system, hard irq
 * or soft irq state.
 */
void account_system_vtime(struct task_struct *tsk)
{
	u64 now, delta;
	unsigned long flags;

	local_irq_save(flags);
	now = read_purr();
	delta = now - get_paca()->startpurr;
	get_paca()->startpurr = now;
	if (!in_interrupt()) {
		delta += get_paca()->system_time;
		get_paca()->system_time = 0;
	}
	account_system_time(tsk, 0, delta);
	local_irq_restore(flags);
}

/*
 * Transfer the user and system times accumulated in the paca
 * by the exception entry and exit code to the generic process
 * user and system time records.
 * Must be called with interrupts disabled.
 */
void account_process_vtime(struct task_struct *tsk)
{
	cputime_t utime;

	utime = get_paca()->user_time;
	get_paca()->user_time = 0;
	account_user_time(tsk, utime);
}

static void account_process_time(struct pt_regs *regs)
{
	int cpu = smp_processor_id();

	account_process_vtime(current);
	run_local_timers();
	if (rcu_pending(cpu))
		rcu_check_callbacks(cpu, user_mode(regs));
	scheduler_tick();
 	run_posix_cpu_timers(current);
}

/*
 * Stuff for accounting stolen time.
 */
struct cpu_purr_data {
	int	initialized;			/* thread is running */
	u64	tb;			/* last TB value read */
	u64	purr;			/* last PURR value read */
};

226 227 228 229 230 231 232
/*
 * Each entry in the cpu_purr_data array is manipulated only by its
 * "owner" cpu -- usually in the timer interrupt but also occasionally
 * in process context for cpu online.  As long as cpus do not touch
 * each others' cpu_purr_data, disabling local interrupts is
 * sufficient to serialize accesses.
 */
233 234 235 236
static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);

static void snapshot_tb_and_purr(void *data)
{
237
	unsigned long flags;
238 239
	struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);

240
	local_irq_save(flags);
241
	p->tb = get_tb_or_rtc();
242
	p->purr = mfspr(SPRN_PURR);
243 244
	wmb();
	p->initialized = 1;
245
	local_irq_restore(flags);
246 247 248 249 250 251 252 253 254 255 256 257
}

/*
 * Called during boot when all cpus have come up.
 */
void snapshot_timebases(void)
{
	if (!cpu_has_feature(CPU_FTR_PURR))
		return;
	on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
}

258 259 260
/*
 * Must be called with interrupts disabled.
 */
261 262
void calculate_steal_time(void)
{
263
	u64 tb, purr;
264
	s64 stolen;
265
	struct cpu_purr_data *pme;
266 267 268

	if (!cpu_has_feature(CPU_FTR_PURR))
		return;
269
	pme = &per_cpu(cpu_purr_data, smp_processor_id());
270 271 272
	if (!pme->initialized)
		return;		/* this can happen in early boot */
	tb = mftb();
273 274 275
	purr = mfspr(SPRN_PURR);
	stolen = (tb - pme->tb) - (purr - pme->purr);
	if (stolen > 0)
276 277 278 279 280
		account_steal_time(current, stolen);
	pme->tb = tb;
	pme->purr = purr;
}

281
#ifdef CONFIG_PPC_SPLPAR
282 283 284 285 286 287
/*
 * Must be called before the cpu is added to the online map when
 * a cpu is being brought up at runtime.
 */
static void snapshot_purr(void)
{
288
	struct cpu_purr_data *pme;
289 290 291 292
	unsigned long flags;

	if (!cpu_has_feature(CPU_FTR_PURR))
		return;
293
	local_irq_save(flags);
294 295 296
	pme = &per_cpu(cpu_purr_data, smp_processor_id());
	pme->tb = mftb();
	pme->purr = mfspr(SPRN_PURR);
297
	pme->initialized = 1;
298
	local_irq_restore(flags);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
}

#endif /* CONFIG_PPC_SPLPAR */

#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
#define calc_cputime_factors()
#define account_process_time(regs)	update_process_times(user_mode(regs))
#define calculate_steal_time()		do { } while (0)
#endif

#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
#define snapshot_purr()			do { } while (0)
#endif

/*
 * Called when a cpu comes up after the system has finished booting,
 * i.e. as a result of a hotplug cpu action.
 */
void snapshot_timebase(void)
{
319
	__get_cpu_var(last_jiffy) = get_tb_or_rtc();
320 321 322
	snapshot_purr();
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
void __delay(unsigned long loops)
{
	unsigned long start;
	int diff;

	if (__USE_RTC()) {
		start = get_rtcl();
		do {
			/* the RTCL register wraps at 1000000000 */
			diff = get_rtcl() - start;
			if (diff < 0)
				diff += 1000000000;
		} while (diff < loops);
	} else {
		start = get_tbl();
		while (get_tbl() - start < loops)
			HMT_low();
		HMT_medium();
	}
}
EXPORT_SYMBOL(__delay);

void udelay(unsigned long usecs)
{
	__delay(tb_ticks_per_usec * usecs);
}
EXPORT_SYMBOL(udelay);

L
Linus Torvalds 已提交
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
static __inline__ void timer_check_rtc(void)
{
        /*
         * update the rtc when needed, this should be performed on the
         * right fraction of a second. Half or full second ?
         * Full second works on mk48t59 clocks, others need testing.
         * Note that this update is basically only used through 
         * the adjtimex system calls. Setting the HW clock in
         * any other way is a /dev/rtc and userland business.
         * This is still wrong by -0.5/+1.5 jiffies because of the
         * timer interrupt resolution and possible delay, but here we 
         * hit a quantization limit which can only be solved by higher
         * resolution timers and decoupling time management from timer
         * interrupts. This is also wrong on the clocks
         * which require being written at the half second boundary.
         * We should have an rtc call that only sets the minutes and
         * seconds like on Intel to avoid problems with non UTC clocks.
         */
369
        if (ppc_md.set_rtc_time && ntp_synced() &&
370
	    xtime.tv_sec - last_rtc_update >= 659 &&
371
	    abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
372 373 374 375 376 377 378 379 380
		struct rtc_time tm;
		to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
		tm.tm_year -= 1900;
		tm.tm_mon -= 1;
		if (ppc_md.set_rtc_time(&tm) == 0)
			last_rtc_update = xtime.tv_sec + 1;
		else
			/* Try again one minute later */
			last_rtc_update += 60;
L
Linus Torvalds 已提交
381 382 383 384 385 386
        }
}

/*
 * This version of gettimeofday has microsecond resolution.
 */
387
static inline void __do_gettimeofday(struct timeval *tv)
L
Linus Torvalds 已提交
388
{
389 390 391 392
	unsigned long sec, usec;
	u64 tb_ticks, xsec;
	struct gettimeofday_vars *temp_varp;
	u64 temp_tb_to_xs, temp_stamp_xsec;
L
Linus Torvalds 已提交
393 394 395 396 397 398 399 400

	/*
	 * These calculations are faster (gets rid of divides)
	 * if done in units of 1/2^20 rather than microseconds.
	 * The conversion to microseconds at the end is done
	 * without a divide (and in fact, without a multiply)
	 */
	temp_varp = do_gtod.varp;
401 402 403 404 405 406

	/* Sampling the time base must be done after loading
	 * do_gtod.varp in order to avoid racing with update_gtod.
	 */
	data_barrier(temp_varp);
	tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
L
Linus Torvalds 已提交
407 408
	temp_tb_to_xs = temp_varp->tb_to_xs;
	temp_stamp_xsec = temp_varp->stamp_xsec;
409
	xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
L
Linus Torvalds 已提交
410
	sec = xsec / XSEC_PER_SEC;
411 412
	usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
	usec = SCALE_XSEC(usec, 1000000);
L
Linus Torvalds 已提交
413 414 415 416 417 418 419

	tv->tv_sec = sec;
	tv->tv_usec = usec;
}

void do_gettimeofday(struct timeval *tv)
{
420 421 422
	if (__USE_RTC()) {
		/* do this the old way */
		unsigned long flags, seq;
423
		unsigned int sec, nsec, usec;
424 425 426 427

		do {
			seq = read_seqbegin_irqsave(&xtime_lock, flags);
			sec = xtime.tv_sec;
428
			nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
429
		} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
430
		usec = nsec / 1000;
431 432 433 434 435 436 437 438
		while (usec >= 1000000) {
			usec -= 1000000;
			++sec;
		}
		tv->tv_sec = sec;
		tv->tv_usec = usec;
		return;
	}
439
	__do_gettimeofday(tv);
L
Linus Torvalds 已提交
440 441 442 443 444
}

EXPORT_SYMBOL(do_gettimeofday);

/*
445 446 447 448 449 450
 * There are two copies of tb_to_xs and stamp_xsec so that no
 * lock is needed to access and use these values in
 * do_gettimeofday.  We alternate the copies and as long as a
 * reasonable time elapses between changes, there will never
 * be inconsistent values.  ntpd has a minimum of one minute
 * between updates.
L
Linus Torvalds 已提交
451
 */
452
static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
453
			       u64 new_tb_to_xs)
L
Linus Torvalds 已提交
454 455
{
	unsigned temp_idx;
456
	struct gettimeofday_vars *temp_varp;
L
Linus Torvalds 已提交
457 458 459 460

	temp_idx = (do_gtod.var_idx == 0);
	temp_varp = &do_gtod.vars[temp_idx];

461 462
	temp_varp->tb_to_xs = new_tb_to_xs;
	temp_varp->tb_orig_stamp = new_tb_stamp;
L
Linus Torvalds 已提交
463
	temp_varp->stamp_xsec = new_stamp_xsec;
464
	smp_mb();
L
Linus Torvalds 已提交
465 466 467
	do_gtod.varp = temp_varp;
	do_gtod.var_idx = temp_idx;

468 469 470 471 472 473 474 475
	/*
	 * tb_update_count is used to allow the userspace gettimeofday code
	 * to assure itself that it sees a consistent view of the tb_to_xs and
	 * stamp_xsec variables.  It reads the tb_update_count, then reads
	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
	 * the two values of tb_update_count match and are even then the
	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
	 * loops back and reads them again until this criteria is met.
476 477
	 * We expect the caller to have done the first increment of
	 * vdso_data->tb_update_count already.
478
	 */
479 480 481 482 483
	vdso_data->tb_orig_stamp = new_tb_stamp;
	vdso_data->stamp_xsec = new_stamp_xsec;
	vdso_data->tb_to_xs = new_tb_to_xs;
	vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
	vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
484
	smp_wmb();
485
	++(vdso_data->tb_update_count);
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
}

/*
 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
 * difference tb - tb_orig_stamp small enough to always fit inside a
 * 32 bits number. This is a requirement of our fast 32 bits userland
 * implementation in the vdso. If we "miss" a call to this function
 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
 * with a too big difference, then the vdso will fallback to calling
 * the syscall
 */
static __inline__ void timer_recalc_offset(u64 cur_tb)
{
	unsigned long offset;
	u64 new_stamp_xsec;
502
	u64 tlen, t2x;
503 504
	u64 tb, xsec_old, xsec_new;
	struct gettimeofday_vars *varp;
505

506 507
	if (__USE_RTC())
		return;
508
	tlen = current_tick_length();
509
	offset = cur_tb - do_gtod.varp->tb_orig_stamp;
510 511
	if (tlen == last_tick_len && offset < 0x80000000u)
		return;
512 513 514 515 516 517 518 519
	if (tlen != last_tick_len) {
		t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
		last_tick_len = tlen;
	} else
		t2x = do_gtod.varp->tb_to_xs;
	new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
	do_div(new_stamp_xsec, 1000000000);
	new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534

	++vdso_data->tb_update_count;
	smp_mb();

	/*
	 * Make sure time doesn't go backwards for userspace gettimeofday.
	 */
	tb = get_tb();
	varp = do_gtod.varp;
	xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
		+ varp->stamp_xsec;
	xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
	if (xsec_new < xsec_old)
		new_stamp_xsec += xsec_old - xsec_new;

535
	update_gtod(cur_tb, new_stamp_xsec, t2x);
L
Linus Torvalds 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
}

#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
	unsigned long pc = instruction_pointer(regs);

	if (in_lock_functions(pc))
		return regs->link;

	return pc;
}
EXPORT_SYMBOL(profile_pc);
#endif

#ifdef CONFIG_PPC_ISERIES

/* 
 * This function recalibrates the timebase based on the 49-bit time-of-day
 * value in the Titan chip.  The Titan is much more accurate than the value
 * returned by the service processor for the timebase frequency.  
 */

559
static int __init iSeries_tb_recal(void)
L
Linus Torvalds 已提交
560 561 562
{
	struct div_result divres;
	unsigned long titan, tb;
563 564 565 566 567

	/* Make sure we only run on iSeries */
	if (!firmware_has_feature(FW_FEATURE_ISERIES))
		return -ENODEV;

L
Linus Torvalds 已提交
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
	tb = get_tb();
	titan = HvCallXm_loadTod();
	if ( iSeries_recal_titan ) {
		unsigned long tb_ticks = tb - iSeries_recal_tb;
		unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
		unsigned long new_tb_ticks_per_sec   = (tb_ticks * USEC_PER_SEC)/titan_usec;
		unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
		long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
		char sign = '+';		
		/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
		new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;

		if ( tick_diff < 0 ) {
			tick_diff = -tick_diff;
			sign = '-';
		}
		if ( tick_diff ) {
			if ( tick_diff < tb_ticks_per_jiffy/25 ) {
				printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
						new_tb_ticks_per_jiffy, sign, tick_diff );
				tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
				tb_ticks_per_sec   = new_tb_ticks_per_sec;
590
				calc_cputime_factors();
L
Linus Torvalds 已提交
591 592 593 594
				div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
				do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
				tb_to_xs = divres.result_low;
				do_gtod.varp->tb_to_xs = tb_to_xs;
595 596
				vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
				vdso_data->tb_to_xs = tb_to_xs;
L
Linus Torvalds 已提交
597 598 599 600 601 602 603 604 605 606 607
			}
			else {
				printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
					"                   new tb_ticks_per_jiffy = %lu\n"
					"                   old tb_ticks_per_jiffy = %lu\n",
					new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
			}
		}
	}
	iSeries_recal_titan = titan;
	iSeries_recal_tb = tb;
608 609

	return 0;
L
Linus Torvalds 已提交
610
}
611 612 613 614 615 616 617 618 619
late_initcall(iSeries_tb_recal);

/* Called from platform early init */
void __init iSeries_time_init_early(void)
{
	iSeries_recal_tb = get_tb();
	iSeries_recal_titan = HvCallXm_loadTod();
}
#endif /* CONFIG_PPC_ISERIES */
L
Linus Torvalds 已提交
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634

/*
 * For iSeries shared processors, we have to let the hypervisor
 * set the hardware decrementer.  We set a virtual decrementer
 * in the lppaca and call the hypervisor if the virtual
 * decrementer is less than the current value in the hardware
 * decrementer. (almost always the new decrementer value will
 * be greater than the current hardware decementer so the hypervisor
 * call will not be needed)
 */

/*
 * timer_interrupt - gets called when the decrementer overflows,
 * with interrupts disabled.
 */
635
void timer_interrupt(struct pt_regs * regs)
L
Linus Torvalds 已提交
636
{
637
	struct pt_regs *old_regs;
L
Linus Torvalds 已提交
638
	int next_dec;
639 640
	int cpu = smp_processor_id();
	unsigned long ticks;
641
	u64 tb_next_jiffy;
642 643 644 645 646

#ifdef CONFIG_PPC32
	if (atomic_read(&ppc_n_lost_interrupts) != 0)
		do_IRQ(regs);
#endif
L
Linus Torvalds 已提交
647

648
	old_regs = set_irq_regs(regs);
L
Linus Torvalds 已提交
649 650
	irq_enter();

651
	profile_tick(CPU_PROFILING);
652
	calculate_steal_time();
L
Linus Torvalds 已提交
653

654
#ifdef CONFIG_PPC_ISERIES
655 656
	if (firmware_has_feature(FW_FEATURE_ISERIES))
		get_lppaca()->int_dword.fields.decr_int = 0;
657 658 659 660 661 662 663 664 665
#endif

	while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
	       >= tb_ticks_per_jiffy) {
		/* Update last_jiffy */
		per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
		/* Handle RTCL overflow on 601 */
		if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
			per_cpu(last_jiffy, cpu) -= 1000000000;
L
Linus Torvalds 已提交
666 667 668 669 670 671 672 673 674

		/*
		 * We cannot disable the decrementer, so in the period
		 * between this cpu's being marked offline in cpu_online_map
		 * and calling stop-self, it is taking timer interrupts.
		 * Avoid calling into the scheduler rebalancing code if this
		 * is the case.
		 */
		if (!cpu_is_offline(cpu))
675
			account_process_time(regs);
676

L
Linus Torvalds 已提交
677 678 679 680
		/*
		 * No need to check whether cpu is offline here; boot_cpuid
		 * should have been fixed up by now.
		 */
681 682 683 684
		if (cpu != boot_cpuid)
			continue;

		write_seqlock(&xtime_lock);
685
		tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
686 687
		if (__USE_RTC() && tb_next_jiffy >= 1000000000)
			tb_next_jiffy -= 1000000000;
688 689
		if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
			tb_last_jiffy = tb_next_jiffy;
690
			do_timer(1);
691 692 693
			timer_recalc_offset(tb_last_jiffy);
			timer_check_rtc();
		}
694
		write_sequnlock(&xtime_lock);
L
Linus Torvalds 已提交
695 696
	}
	
697
	next_dec = tb_ticks_per_jiffy - ticks;
L
Linus Torvalds 已提交
698 699 700
	set_dec(next_dec);

#ifdef CONFIG_PPC_ISERIES
701
	if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
O
Olaf Hering 已提交
702
		process_hvlpevents();
L
Linus Torvalds 已提交
703 704
#endif

705
#ifdef CONFIG_PPC64
706
	/* collect purr register values often, for accurate calculations */
707
	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
L
Linus Torvalds 已提交
708 709 710
		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
		cu->current_tb = mfspr(SPRN_PURR);
	}
711
#endif
L
Linus Torvalds 已提交
712 713

	irq_exit();
714
	set_irq_regs(old_regs);
L
Linus Torvalds 已提交
715 716
}

717 718
void wakeup_decrementer(void)
{
719
	unsigned long ticks;
720 721

	/*
722 723
	 * The timebase gets saved on sleep and restored on wakeup,
	 * so all we need to do is to reset the decrementer.
724
	 */
725 726 727 728 729 730
	ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
	if (ticks < tb_ticks_per_jiffy)
		ticks = tb_ticks_per_jiffy - ticks;
	else
		ticks = 1;
	set_dec(ticks);
731 732
}

733
#ifdef CONFIG_SMP
734 735 736
void __init smp_space_timers(unsigned int max_cpus)
{
	int i;
737
	u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
738

739 740
	/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
	previous_tb -= tb_ticks_per_jiffy;
741

742
	for_each_possible_cpu(i) {
743 744
		if (i == boot_cpuid)
			continue;
745
		per_cpu(last_jiffy, i) = previous_tb;
746 747 748 749
	}
}
#endif

L
Linus Torvalds 已提交
750 751 752 753 754 755 756 757 758
/*
 * Scheduler clock - returns current time in nanosec units.
 *
 * Note: mulhdu(a, b) (multiply high double unsigned) returns
 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
 * are 64-bit unsigned numbers.
 */
unsigned long long sched_clock(void)
{
759 760
	if (__USE_RTC())
		return get_rtc();
761
	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
L
Linus Torvalds 已提交
762 763 764 765 766 767 768
}

int do_settimeofday(struct timespec *tv)
{
	time_t wtm_sec, new_sec = tv->tv_sec;
	long wtm_nsec, new_nsec = tv->tv_nsec;
	unsigned long flags;
769 770
	u64 new_xsec;
	unsigned long tb_delta;
L
Linus Torvalds 已提交
771 772 773 774 775

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irqsave(&xtime_lock, flags);
776 777 778 779 780

	/*
	 * Updating the RTC is not the job of this code. If the time is
	 * stepped under NTP, the RTC will be updated after STA_UNSYNC
	 * is cleared.  Tools like clock/hwclock either copy the RTC
L
Linus Torvalds 已提交
781 782 783 784
	 * to the system time, in which case there is no point in writing
	 * to the RTC again, or write to the RTC but then they don't call
	 * settimeofday to perform this operation.
	 */
785

786 787 788 789
	/* Make userspace gettimeofday spin until we're done. */
	++vdso_data->tb_update_count;
	smp_mb();

790 791 792 793
	/*
	 * Subtract off the number of nanoseconds since the
	 * beginning of the last tick.
	 */
794
	tb_delta = tb_ticks_since(tb_last_jiffy);
795 796
	tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
	new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
L
Linus Torvalds 已提交
797 798 799 800 801 802 803 804 805 806 807 808

	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);

 	set_normalized_timespec(&xtime, new_sec, new_nsec);
	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

	/* In case of a large backwards jump in time with NTP, we want the 
	 * clock to be updated as soon as the PLL is again in lock.
	 */
	last_rtc_update = new_sec - 658;

J
john stultz 已提交
809
	ntp_clear();
L
Linus Torvalds 已提交
810

811 812 813
	new_xsec = xtime.tv_nsec;
	if (new_xsec != 0) {
		new_xsec *= XSEC_PER_SEC;
814 815
		do_div(new_xsec, NSEC_PER_SEC);
	}
816
	new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
817
	update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
L
Linus Torvalds 已提交
818

819 820
	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
L
Linus Torvalds 已提交
821 822 823 824 825 826 827 828

	write_sequnlock_irqrestore(&xtime_lock, flags);
	clock_was_set();
	return 0;
}

EXPORT_SYMBOL(do_settimeofday);

829
static int __init get_freq(char *name, int cells, unsigned long *val)
830 831
{
	struct device_node *cpu;
832
	const unsigned int *fp;
833
	int found = 0;
834

835
	/* The cpu node should have timebase and clock frequency properties */
836 837
	cpu = of_find_node_by_type(NULL, "cpu");

838
	if (cpu) {
839
		fp = of_get_property(cpu, name, NULL);
840
		if (fp) {
841
			found = 1;
842
			*val = of_read_ulong(fp, cells);
843
		}
844 845

		of_node_put(cpu);
846
	}
847 848 849 850 851 852 853 854 855 856 857

	return found;
}

void __init generic_calibrate_decr(void)
{
	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */

	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {

858 859
		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
				"(not found)\n");
860
	}
861

862 863 864 865 866 867 868
	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */

	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {

		printk(KERN_ERR "WARNING: Estimating processor frequency "
				"(not found)\n");
869
	}
870

J
Josh Boyer 已提交
871
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
872 873 874 875 876 877 878 879 880 881
	/* Set the time base to zero */
	mtspr(SPRN_TBWL, 0);
	mtspr(SPRN_TBWU, 0);

	/* Clear any pending timer interrupts */
	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);

	/* Enable decrementer interrupt */
	mtspr(SPRN_TCR, TCR_DIE);
#endif
882 883
}

884 885 886 887 888 889 890 891 892 893 894 895 896 897
unsigned long get_boot_time(void)
{
	struct rtc_time tm;

	if (ppc_md.get_boot_time)
		return ppc_md.get_boot_time();
	if (!ppc_md.get_rtc_time)
		return 0;
	ppc_md.get_rtc_time(&tm);
	return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
		      tm.tm_hour, tm.tm_min, tm.tm_sec);
}

/* This function is only called on the boot processor */
L
Linus Torvalds 已提交
898 899 900
void __init time_init(void)
{
	unsigned long flags;
901
	unsigned long tm = 0;
L
Linus Torvalds 已提交
902
	struct div_result res;
903
	u64 scale, x;
904 905 906 907
	unsigned shift;

        if (ppc_md.time_init != NULL)
                timezone_offset = ppc_md.time_init();
L
Linus Torvalds 已提交
908

909 910 911
	if (__USE_RTC()) {
		/* 601 processor: dec counts down by 128 every 128ns */
		ppc_tb_freq = 1000000000;
912
		tb_last_jiffy = get_rtcl();
913 914 915
	} else {
		/* Normal PowerPC with timebase register */
		ppc_md.calibrate_decr();
916
		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
917
		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
918
		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
919
		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
920
		tb_last_jiffy = get_tb();
921
	}
922 923

	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
924
	tb_ticks_per_sec = ppc_tb_freq;
925 926
	tb_ticks_per_usec = ppc_tb_freq / 1000000;
	tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
927
	calc_cputime_factors();
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945

	/*
	 * Calculate the length of each tick in ns.  It will not be
	 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
	 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
	 * rounded up.
	 */
	x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
	do_div(x, ppc_tb_freq);
	tick_nsec = x;
	last_tick_len = x << TICKLEN_SCALE;

	/*
	 * Compute ticklen_to_xs, which is a factor which gets multiplied
	 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
	 * It is computed as:
	 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
	 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
946 947 948 949 950 951 952
	 * which turns out to be N = 51 - SHIFT_HZ.
	 * This gives the result as a 0.64 fixed-point fraction.
	 * That value is reduced by an offset amounting to 1 xsec per
	 * 2^31 timebase ticks to avoid problems with time going backwards
	 * by 1 xsec when we do timer_recalc_offset due to losing the
	 * fractional xsec.  That offset is equal to ppc_tb_freq/2^51
	 * since there are 2^20 xsec in a second.
953
	 */
954 955
	div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
		     tb_ticks_per_jiffy << SHIFT_HZ, &res);
956 957 958 959 960
	div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
	ticklen_to_xs = res.result_low;

	/* Compute tb_to_xs from tick_nsec */
	tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
961

L
Linus Torvalds 已提交
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
	/*
	 * Compute scale factor for sched_clock.
	 * The calibrate_decr() function has set tb_ticks_per_sec,
	 * which is the timebase frequency.
	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
	 * the 128-bit result as a 64.64 fixed-point number.
	 * We then shift that number right until it is less than 1.0,
	 * giving us the scale factor and shift count to use in
	 * sched_clock().
	 */
	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
	scale = res.result_low;
	for (shift = 0; res.result_high != 0; ++shift) {
		scale = (scale >> 1) | (res.result_high << 63);
		res.result_high >>= 1;
	}
	tb_to_ns_scale = scale;
	tb_to_ns_shift = shift;
980
	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
981
	boot_tb = get_tb_or_rtc();
L
Linus Torvalds 已提交
982

983
	tm = get_boot_time();
L
Linus Torvalds 已提交
984 985

	write_seqlock_irqsave(&xtime_lock, flags);
986 987 988 989 990 991 992 993

	/* If platform provided a timezone (pmac), we correct the time */
        if (timezone_offset) {
		sys_tz.tz_minuteswest = -timezone_offset / 60;
		sys_tz.tz_dsttime = 0;
		tm -= timezone_offset;
        }

994 995
	xtime.tv_sec = tm;
	xtime.tv_nsec = 0;
L
Linus Torvalds 已提交
996 997
	do_gtod.varp = &do_gtod.vars[0];
	do_gtod.var_idx = 0;
998
	do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
999
	__get_cpu_var(last_jiffy) = tb_last_jiffy;
1000
	do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
L
Linus Torvalds 已提交
1001 1002 1003
	do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
	do_gtod.varp->tb_to_xs = tb_to_xs;
	do_gtod.tb_to_us = tb_to_us;
1004 1005 1006 1007

	vdso_data->tb_orig_stamp = tb_last_jiffy;
	vdso_data->tb_update_count = 0;
	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1008
	vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1009
	vdso_data->tb_to_xs = tb_to_xs;
L
Linus Torvalds 已提交
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026

	time_freq = 0;

	last_rtc_update = xtime.tv_sec;
	set_normalized_timespec(&wall_to_monotonic,
	                        -xtime.tv_sec, -xtime.tv_nsec);
	write_sequnlock_irqrestore(&xtime_lock, flags);

	/* Not exact, but the timer interrupt takes care of this */
	set_dec(tb_ticks_per_jiffy);
}


#define FEBRUARY	2
#define	STARTOFTIME	1970
#define SECDAY		86400L
#define SECYR		(SECDAY * 365)
1027 1028
#define	leapyear(year)		((year) % 4 == 0 && \
				 ((year) % 100 != 0 || (year) % 400 == 0))
L
Linus Torvalds 已提交
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
#define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
#define	days_in_month(a) 	(month_days[(a) - 1])

static int month_days[12] = {
	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};

/*
 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
 */
void GregorianDay(struct rtc_time * tm)
{
	int leapsToDate;
	int lastYear;
	int day;
	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };

1046
	lastYear = tm->tm_year - 1;
L
Linus Torvalds 已提交
1047 1048 1049 1050

	/*
	 * Number of leap corrections to apply up to end of last year
	 */
1051
	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
L
Linus Torvalds 已提交
1052 1053 1054 1055 1056

	/*
	 * This year is a leap year if it is divisible by 4 except when it is
	 * divisible by 100 unless it is divisible by 400
	 *
1057
	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
L
Linus Torvalds 已提交
1058
	 */
1059
	day = tm->tm_mon > 2 && leapyear(tm->tm_year);
L
Linus Torvalds 已提交
1060 1061 1062 1063

	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
		   tm->tm_mday;

1064
	tm->tm_wday = day % 7;
L
Linus Torvalds 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
}

void to_tm(int tim, struct rtc_time * tm)
{
	register int    i;
	register long   hms, day;

	day = tim / SECDAY;
	hms = tim % SECDAY;

	/* Hours, minutes, seconds are easy */
	tm->tm_hour = hms / 3600;
	tm->tm_min = (hms % 3600) / 60;
	tm->tm_sec = (hms % 3600) % 60;

	/* Number of years in days */
	for (i = STARTOFTIME; day >= days_in_year(i); i++)
		day -= days_in_year(i);
	tm->tm_year = i;

	/* Number of months in days left */
	if (leapyear(tm->tm_year))
		days_in_month(FEBRUARY) = 29;
	for (i = 1; day >= days_in_month(i); i++)
		day -= days_in_month(i);
	days_in_month(FEBRUARY) = 28;
	tm->tm_mon = i;

	/* Days are what is left over (+1) from all that. */
	tm->tm_mday = day + 1;

	/*
	 * Determine the day of week
	 */
	GregorianDay(tm);
}

/* Auxiliary function to compute scaling factors */
/* Actually the choice of a timebase running at 1/4 the of the bus
 * frequency giving resolution of a few tens of nanoseconds is quite nice.
 * It makes this computation very precise (27-28 bits typically) which
 * is optimistic considering the stability of most processor clock
 * oscillators and the precision with which the timebase frequency
 * is measured but does not harm.
 */
1110 1111
unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
{
L
Linus Torvalds 已提交
1112 1113 1114 1115 1116 1117
        unsigned mlt=0, tmp, err;
        /* No concern for performance, it's done once: use a stupid
         * but safe and compact method to find the multiplier.
         */
  
        for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1118 1119
                if (mulhwu(inscale, mlt|tmp) < outscale)
			mlt |= tmp;
L
Linus Torvalds 已提交
1120 1121 1122 1123 1124 1125 1126 1127 1128
        }
  
        /* We might still be off by 1 for the best approximation.
         * A side effect of this is that if outscale is too large
         * the returned value will be zero.
         * Many corner cases have been checked and seem to work,
         * some might have been forgotten in the test however.
         */
  
1129 1130 1131
        err = inscale * (mlt+1);
        if (err <= inscale/2)
		mlt++;
L
Linus Torvalds 已提交
1132
        return mlt;
1133
}
L
Linus Torvalds 已提交
1134 1135 1136 1137 1138

/*
 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
 * result.
 */
1139 1140
void div128_by_32(u64 dividend_high, u64 dividend_low,
		  unsigned divisor, struct div_result *dr)
L
Linus Torvalds 已提交
1141
{
1142 1143 1144
	unsigned long a, b, c, d;
	unsigned long w, x, y, z;
	u64 ra, rb, rc;
L
Linus Torvalds 已提交
1145 1146 1147 1148 1149 1150

	a = dividend_high >> 32;
	b = dividend_high & 0xffffffff;
	c = dividend_low >> 32;
	d = dividend_low & 0xffffffff;

1151 1152 1153 1154 1155
	w = a / divisor;
	ra = ((u64)(a - (w * divisor)) << 32) + b;

	rb = ((u64) do_div(ra, divisor) << 32) + c;
	x = ra;
L
Linus Torvalds 已提交
1156

1157 1158 1159 1160 1161
	rc = ((u64) do_div(rb, divisor) << 32) + d;
	y = rb;

	do_div(rc, divisor);
	z = rc;
L
Linus Torvalds 已提交
1162

1163 1164
	dr->result_high = ((u64)w << 32) + x;
	dr->result_low  = ((u64)y << 32) + z;
L
Linus Torvalds 已提交
1165 1166

}