timekeeping.c 36.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *  linux/kernel/time/timekeeping.c
 *
 *  Kernel timekeeping code and accessor functions
 *
 *  This code was moved from linux/kernel/timer.c.
 *  Please see that file for copyright and history logs.
 *
 */

#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
16
#include <linux/sched.h>
17
#include <linux/syscore_ops.h>
18 19 20 21
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/tick.h>
22
#include <linux/stop_machine.h>
23

24 25 26
/* Structure holding internal timekeeping values. */
struct timekeeper {
	/* Current clocksource used for timekeeping. */
27
	struct clocksource	*clock;
28
	/* NTP adjusted clock multiplier */
29
	u32			mult;
30
	/* The shift value of the current clocksource. */
31
	u32			shift;
32
	/* Number of clock cycles in one NTP interval. */
33
	cycle_t			cycle_interval;
34
	/* Number of clock shifted nano seconds in one NTP interval. */
35
	u64			xtime_interval;
36
	/* shifted nano seconds left over when rounding cycle_interval */
37
	s64			xtime_remainder;
38
	/* Raw nano seconds accumulated per NTP interval. */
39
	u32			raw_interval;
40

41 42 43
	/* Current CLOCK_REALTIME time in seconds */
	u64			xtime_sec;
	/* Clock shifted nano seconds */
44
	u64			xtime_nsec;
45

46 47
	/* Difference between accumulated time and NTP time in ntp
	 * shifted nano seconds. */
48
	s64			ntp_error;
49 50
	/* Shift conversion between clock shifted nano seconds and
	 * ntp shifted nano seconds. */
51
	u32			ntp_error_shift;
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66
	/*
	 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
	 * for sub jiffie times) to get to monotonic time.  Monotonic is pegged
	 * at zero at system boot time, so wall_to_monotonic will be negative,
	 * however, we will ALWAYS keep the tv_nsec part positive so we can use
	 * the usual normalization.
	 *
	 * wall_to_monotonic is moved after resume from suspend for the
	 * monotonic time not to jump. We need to add total_sleep_time to
	 * wall_to_monotonic to get the real boot based time offset.
	 *
	 * - wall_to_monotonic is no longer the boot time, getboottime must be
	 * used instead.
	 */
67
	struct timespec		wall_to_monotonic;
68
	/* time spent in suspend */
69
	struct timespec		total_sleep_time;
70
	/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
71
	struct timespec		raw_time;
72
	/* Offset clock monotonic -> clock realtime */
73
	ktime_t			offs_real;
74
	/* Offset clock monotonic -> clock boottime */
75
	ktime_t			offs_boot;
J
John Stultz 已提交
76
	/* Seqlock for all timekeeper values */
77
	seqlock_t		lock;
78 79
};

80
static struct timekeeper timekeeper;
81

82 83 84 85 86 87 88 89 90
/*
 * This read-write spinlock protects us from races in SMP while
 * playing with xtime.
 */
__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);

/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
	while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
		tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
		tk->xtime_sec++;
	}
}

static struct timespec tk_xtime(struct timekeeper *tk)
{
	struct timespec ts;

	ts.tv_sec = tk->xtime_sec;
	ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
	return ts;
}
107

108 109 110 111 112 113 114 115 116 117 118
static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
{
	tk->xtime_sec = ts->tv_sec;
	tk->xtime_nsec = ts->tv_nsec << tk->shift;
}

static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
{
	tk->xtime_sec += ts->tv_sec;
	tk->xtime_nsec += ts->tv_nsec << tk->shift;
}
119

120 121 122 123 124 125 126 127 128 129
/**
 * timekeeper_setup_internals - Set up internals to use clocksource clock.
 *
 * @clock:		Pointer to clocksource.
 *
 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 * pair and interval request.
 *
 * Unless you're the timekeeping code, you should not be using this!
 */
130
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
131 132
{
	cycle_t interval;
133
	u64 tmp, ntpinterval;
134
	struct clocksource *old_clock;
135

136 137
	old_clock = tk->clock;
	tk->clock = clock;
138 139 140 141 142
	clock->cycle_last = clock->read(clock);

	/* Do the ns -> cycle conversion first, using original mult */
	tmp = NTP_INTERVAL_LENGTH;
	tmp <<= clock->shift;
143
	ntpinterval = tmp;
144 145
	tmp += clock->mult/2;
	do_div(tmp, clock->mult);
146 147 148 149
	if (tmp == 0)
		tmp = 1;

	interval = (cycle_t) tmp;
150
	tk->cycle_interval = interval;
151 152

	/* Go back from cycles -> shifted ns */
153 154 155
	tk->xtime_interval = (u64) interval * clock->mult;
	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
	tk->raw_interval =
156
		((u64) interval * clock->mult) >> clock->shift;
157

158 159 160 161
	 /* if changing clocks, convert xtime_nsec shift units */
	if (old_clock) {
		int shift_change = clock->shift - old_clock->shift;
		if (shift_change < 0)
162
			tk->xtime_nsec >>= -shift_change;
163
		else
164
			tk->xtime_nsec <<= shift_change;
165
	}
166
	tk->shift = clock->shift;
167

168 169
	tk->ntp_error = 0;
	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
170 171 172 173 174 175

	/*
	 * The timekeeper keeps its own mult values for the currently
	 * active clocksource. These value will be adjusted via NTP
	 * to counteract clock drifting.
	 */
176
	tk->mult = clock->mult;
177
}
178

179
/* Timekeeper helper functions. */
180
static inline s64 timekeeping_get_ns(struct timekeeper *tk)
181 182 183
{
	cycle_t cycle_now, cycle_delta;
	struct clocksource *clock;
184
	s64 nsec;
185 186

	/* read clocksource: */
187
	clock = tk->clock;
188 189 190 191 192
	cycle_now = clock->read(clock);

	/* calculate the delta since the last update_wall_time: */
	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;

193 194
	nsec = cycle_delta * tk->mult + tk->xtime_nsec;
	nsec >>= tk->shift;
195 196 197

	/* If arch requires, add in gettimeoffset() */
	return nsec + arch_gettimeoffset();
198 199
}

200
static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
201 202 203
{
	cycle_t cycle_now, cycle_delta;
	struct clocksource *clock;
204
	s64 nsec;
205 206

	/* read clocksource: */
207
	clock = tk->clock;
208 209 210 211 212
	cycle_now = clock->read(clock);

	/* calculate the delta since the last update_wall_time: */
	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;

213 214 215 216 217
	/* convert delta to nanoseconds. */
	nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);

	/* If arch requires, add in gettimeoffset() */
	return nsec + arch_gettimeoffset();
218 219
}

220
static void update_rt_offset(struct timekeeper *tk)
221
{
222
	struct timespec tmp, *wtm = &tk->wall_to_monotonic;
223 224

	set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
225
	tk->offs_real = timespec_to_ktime(tmp);
226 227
}

228
/* must hold write on timekeeper.lock */
229
static void timekeeping_update(struct timekeeper *tk, bool clearntp)
230
{
231 232
	struct timespec xt;

233
	if (clearntp) {
234
		tk->ntp_error = 0;
235 236
		ntp_clear();
	}
237 238 239
	update_rt_offset(tk);
	xt = tk_xtime(tk);
	update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
240 241 242
}


243
/**
244
 * timekeeping_forward_now - update clock to the current time
245
 *
246 247 248
 * Forward the current clock to update its state since the last call to
 * update_wall_time(). This is useful before significant clock changes,
 * as it avoids having to deal with this time offset explicitly.
249
 */
250
static void timekeeping_forward_now(struct timekeeper *tk)
251 252
{
	cycle_t cycle_now, cycle_delta;
253
	struct clocksource *clock;
254
	s64 nsec;
255

256
	clock = tk->clock;
257
	cycle_now = clock->read(clock);
258
	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
259
	clock->cycle_last = cycle_now;
260

261
	tk->xtime_nsec += cycle_delta * tk->mult;
262 263

	/* If arch requires, add in gettimeoffset() */
264
	tk->xtime_nsec += arch_gettimeoffset() << tk->shift;
265

266
	tk_normalize_xtime(tk);
267

268
	nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
269
	timespec_add_ns(&tk->raw_time, nsec);
270 271 272
}

/**
273
 * getnstimeofday - Returns the time of day in a timespec
274 275
 * @ts:		pointer to the timespec to be set
 *
276
 * Returns the time of day in a timespec.
277
 */
278
void getnstimeofday(struct timespec *ts)
279 280
{
	unsigned long seq;
281
	s64 nsecs = 0;
282

283 284
	WARN_ON(timekeeping_suspended);

285
	do {
J
John Stultz 已提交
286
		seq = read_seqbegin(&timekeeper.lock);
287

288
		ts->tv_sec = timekeeper.xtime_sec;
289
		ts->tv_nsec = timekeeping_get_ns(&timekeeper);
290

J
John Stultz 已提交
291
	} while (read_seqretry(&timekeeper.lock, seq));
292 293 294 295 296

	timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getnstimeofday);

297 298 299 300 301 302 303 304
ktime_t ktime_get(void)
{
	unsigned int seq;
	s64 secs, nsecs;

	WARN_ON(timekeeping_suspended);

	do {
J
John Stultz 已提交
305
		seq = read_seqbegin(&timekeeper.lock);
306
		secs = timekeeper.xtime_sec +
307
				timekeeper.wall_to_monotonic.tv_sec;
308
		nsecs = timekeeping_get_ns(&timekeeper) +
309
				timekeeper.wall_to_monotonic.tv_nsec;
310

J
John Stultz 已提交
311
	} while (read_seqretry(&timekeeper.lock, seq));
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	/*
	 * Use ktime_set/ktime_add_ns to create a proper ktime on
	 * 32-bit architectures without CONFIG_KTIME_SCALAR.
	 */
	return ktime_add_ns(ktime_set(secs, 0), nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get);

/**
 * ktime_get_ts - get the monotonic clock in timespec format
 * @ts:		pointer to timespec variable
 *
 * The function calculates the monotonic clock from the realtime
 * clock and the wall_to_monotonic offset and stores the result
 * in normalized timespec format in the variable pointed to by @ts.
 */
void ktime_get_ts(struct timespec *ts)
{
	struct timespec tomono;
	unsigned int seq;

	WARN_ON(timekeeping_suspended);

	do {
J
John Stultz 已提交
336
		seq = read_seqbegin(&timekeeper.lock);
337
		ts->tv_sec = timekeeper.xtime_sec;
338
		ts->tv_nsec = timekeeping_get_ns(&timekeeper);
339
		tomono = timekeeper.wall_to_monotonic;
340

J
John Stultz 已提交
341
	} while (read_seqretry(&timekeeper.lock, seq));
342 343

	set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
344
				ts->tv_nsec + tomono.tv_nsec);
345 346 347
}
EXPORT_SYMBOL_GPL(ktime_get_ts);

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
#ifdef CONFIG_NTP_PPS

/**
 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
 * @ts_raw:	pointer to the timespec to be set to raw monotonic time
 * @ts_real:	pointer to the timespec to be set to the time of day
 *
 * This function reads both the time of day and raw monotonic time at the
 * same time atomically and stores the resulting timestamps in timespec
 * format.
 */
void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
{
	unsigned long seq;
	s64 nsecs_raw, nsecs_real;

	WARN_ON_ONCE(timekeeping_suspended);

	do {
J
John Stultz 已提交
367
		seq = read_seqbegin(&timekeeper.lock);
368

369
		*ts_raw = timekeeper.raw_time;
370 371
		ts_real->tv_sec = timekeeper.xtime_sec;
		ts_real->tv_nsec = 0;
372

373 374
		nsecs_raw = timekeeping_get_ns_raw(&timekeeper);
		nsecs_real = timekeeping_get_ns(&timekeeper);
375

J
John Stultz 已提交
376
	} while (read_seqretry(&timekeeper.lock, seq));
377 378 379 380 381 382 383 384

	timespec_add_ns(ts_raw, nsecs_raw);
	timespec_add_ns(ts_real, nsecs_real);
}
EXPORT_SYMBOL(getnstime_raw_and_real);

#endif /* CONFIG_NTP_PPS */

385 386 387 388
/**
 * do_gettimeofday - Returns the time of day in a timeval
 * @tv:		pointer to the timeval to be set
 *
389
 * NOTE: Users should be converted to using getnstimeofday()
390 391 392 393 394
 */
void do_gettimeofday(struct timeval *tv)
{
	struct timespec now;

395
	getnstimeofday(&now);
396 397 398 399
	tv->tv_sec = now.tv_sec;
	tv->tv_usec = now.tv_nsec/1000;
}
EXPORT_SYMBOL(do_gettimeofday);
400

401 402 403 404 405 406
/**
 * do_settimeofday - Sets the time of day
 * @tv:		pointer to the timespec variable containing the new time
 *
 * Sets the time of day to the new time and update NTP and notify hrtimers
 */
407
int do_settimeofday(const struct timespec *tv)
408
{
409
	struct timespec ts_delta, xt;
410
	unsigned long flags;
411 412 413 414

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

415
	write_seqlock_irqsave(&timekeeper.lock, flags);
416

417
	timekeeping_forward_now(&timekeeper);
418

419 420 421 422
	xt = tk_xtime(&timekeeper);
	ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
	ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;

423 424
	timekeeper.wall_to_monotonic =
			timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
425

426 427
	tk_set_xtime(&timekeeper, tv);

428
	timekeeping_update(&timekeeper, true);
429

430
	write_sequnlock_irqrestore(&timekeeper.lock, flags);
431 432 433 434 435 436 437 438

	/* signal hrtimers about time change */
	clock_was_set();

	return 0;
}
EXPORT_SYMBOL(do_settimeofday);

439 440 441 442 443 444 445 446 447

/**
 * timekeeping_inject_offset - Adds or subtracts from the current time.
 * @tv:		pointer to the timespec variable containing the offset
 *
 * Adds or subtracts an offset value from the current time.
 */
int timekeeping_inject_offset(struct timespec *ts)
{
448
	unsigned long flags;
449 450 451 452

	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

453
	write_seqlock_irqsave(&timekeeper.lock, flags);
454

455
	timekeeping_forward_now(&timekeeper);
456

457 458

	tk_xtime_add(&timekeeper, ts);
459 460
	timekeeper.wall_to_monotonic =
				timespec_sub(timekeeper.wall_to_monotonic, *ts);
461

462
	timekeeping_update(&timekeeper, true);
463

464
	write_sequnlock_irqrestore(&timekeeper.lock, flags);
465 466 467 468 469 470 471 472

	/* signal hrtimers about time change */
	clock_was_set();

	return 0;
}
EXPORT_SYMBOL(timekeeping_inject_offset);

473 474 475 476 477
/**
 * change_clocksource - Swaps clocksources if a new one is available
 *
 * Accumulates current time interval and initializes new clocksource
 */
478
static int change_clocksource(void *data)
479
{
480
	struct clocksource *new, *old;
481
	unsigned long flags;
482

483
	new = (struct clocksource *) data;
484

485 486
	write_seqlock_irqsave(&timekeeper.lock, flags);

487
	timekeeping_forward_now(&timekeeper);
488 489
	if (!new->enable || new->enable(new) == 0) {
		old = timekeeper.clock;
490
		tk_setup_internals(&timekeeper, new);
491 492 493
		if (old->disable)
			old->disable(old);
	}
494
	timekeeping_update(&timekeeper, true);
495 496 497

	write_sequnlock_irqrestore(&timekeeper.lock, flags);

498 499
	return 0;
}
500

501 502 503 504 505 506 507 508 509 510
/**
 * timekeeping_notify - Install a new clock source
 * @clock:		pointer to the clock source
 *
 * This function is called from clocksource.c after a new, better clock
 * source has been registered. The caller holds the clocksource_mutex.
 */
void timekeeping_notify(struct clocksource *clock)
{
	if (timekeeper.clock == clock)
511
		return;
512
	stop_machine(change_clocksource, clock, NULL);
513 514
	tick_clock_notify();
}
515

516 517 518 519 520 521 522 523 524 525 526 527 528 529
/**
 * ktime_get_real - get the real (wall-) time in ktime_t format
 *
 * returns the time in ktime_t format
 */
ktime_t ktime_get_real(void)
{
	struct timespec now;

	getnstimeofday(&now);

	return timespec_to_ktime(now);
}
EXPORT_SYMBOL_GPL(ktime_get_real);
530

531 532 533 534 535 536 537 538 539 540 541 542
/**
 * getrawmonotonic - Returns the raw monotonic time in a timespec
 * @ts:		pointer to the timespec to be set
 *
 * Returns the raw monotonic time (completely un-modified by ntp)
 */
void getrawmonotonic(struct timespec *ts)
{
	unsigned long seq;
	s64 nsecs;

	do {
J
John Stultz 已提交
543
		seq = read_seqbegin(&timekeeper.lock);
544
		nsecs = timekeeping_get_ns_raw(&timekeeper);
545
		*ts = timekeeper.raw_time;
546

J
John Stultz 已提交
547
	} while (read_seqretry(&timekeeper.lock, seq));
548 549 550 551 552 553

	timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getrawmonotonic);


554
/**
555
 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
556
 */
557
int timekeeping_valid_for_hres(void)
558 559 560 561 562
{
	unsigned long seq;
	int ret;

	do {
J
John Stultz 已提交
563
		seq = read_seqbegin(&timekeeper.lock);
564

565
		ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
566

J
John Stultz 已提交
567
	} while (read_seqretry(&timekeeper.lock, seq));
568 569 570 571

	return ret;
}

572 573 574 575 576
/**
 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
 */
u64 timekeeping_max_deferment(void)
{
J
John Stultz 已提交
577 578
	unsigned long seq;
	u64 ret;
579

J
John Stultz 已提交
580 581 582 583 584 585 586 587
	do {
		seq = read_seqbegin(&timekeeper.lock);

		ret = timekeeper.clock->max_idle_ns;

	} while (read_seqretry(&timekeeper.lock, seq));

	return ret;
588 589
}

590
/**
591
 * read_persistent_clock -  Return time from the persistent clock.
592 593
 *
 * Weak dummy function for arches that do not yet support it.
594 595
 * Reads the time from the battery backed persistent clock.
 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
596 597 598
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
599
void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
600
{
601 602
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
603 604
}

605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
/**
 * read_boot_clock -  Return time of the system start.
 *
 * Weak dummy function for arches that do not yet support it.
 * Function to read the exact time the system has been started.
 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
void __attribute__((weak)) read_boot_clock(struct timespec *ts)
{
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
}

620 621 622 623 624
/*
 * timekeeping_init - Initializes the clocksource and common timekeeping values
 */
void __init timekeeping_init(void)
{
625
	struct clocksource *clock;
626
	unsigned long flags;
627
	struct timespec now, boot;
628 629

	read_persistent_clock(&now);
630
	read_boot_clock(&boot);
631

J
John Stultz 已提交
632
	seqlock_init(&timekeeper.lock);
633

R
Roman Zippel 已提交
634
	ntp_init();
635

J
John Stultz 已提交
636
	write_seqlock_irqsave(&timekeeper.lock, flags);
637
	clock = clocksource_default_clock();
638 639
	if (clock->enable)
		clock->enable(clock);
640
	tk_setup_internals(&timekeeper, clock);
641

642
	tk_set_xtime(&timekeeper, &now);
643 644
	timekeeper.raw_time.tv_sec = 0;
	timekeeper.raw_time.tv_nsec = 0;
645 646 647
	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
		boot = tk_xtime(&timekeeper);

648
	set_normalized_timespec(&timekeeper.wall_to_monotonic,
649
				-boot.tv_sec, -boot.tv_nsec);
650
	update_rt_offset(&timekeeper);
651 652
	timekeeper.total_sleep_time.tv_sec = 0;
	timekeeper.total_sleep_time.tv_nsec = 0;
J
John Stultz 已提交
653
	write_sequnlock_irqrestore(&timekeeper.lock, flags);
654 655 656
}

/* time in seconds when suspend began */
657
static struct timespec timekeeping_suspend_time;
658

659 660 661 662 663 664
static void update_sleep_time(struct timespec t)
{
	timekeeper.total_sleep_time = t;
	timekeeper.offs_boot = timespec_to_ktime(t);
}

665 666 667 668 669 670 671
/**
 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
 * @delta: pointer to a timespec delta value
 *
 * Takes a timespec offset measuring a suspend interval and properly
 * adds the sleep offset to the timekeeping variables.
 */
672 673
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
							struct timespec *delta)
674
{
675
	if (!timespec_valid(delta)) {
676
		printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
677 678 679 680
					"sleep delta value!\n");
		return;
	}

681 682 683
	tk_xtime_add(tk, delta);
	tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta);
	update_sleep_time(timespec_add(tk->total_sleep_time, *delta));
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
}


/**
 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
 * @delta: pointer to a timespec delta value
 *
 * This hook is for architectures that cannot support read_persistent_clock
 * because their RTC/persistent clock is only accessible when irqs are enabled.
 *
 * This function should only be called by rtc_resume(), and allows
 * a suspend offset to be injected into the timekeeping values.
 */
void timekeeping_inject_sleeptime(struct timespec *delta)
{
699
	unsigned long flags;
700 701 702 703 704 705 706
	struct timespec ts;

	/* Make sure we don't set the clock twice */
	read_persistent_clock(&ts);
	if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
		return;

707
	write_seqlock_irqsave(&timekeeper.lock, flags);
J
John Stultz 已提交
708

709
	timekeeping_forward_now(&timekeeper);
710

711
	__timekeeping_inject_sleeptime(&timekeeper, delta);
712

713
	timekeeping_update(&timekeeper, true);
714

715
	write_sequnlock_irqrestore(&timekeeper.lock, flags);
716 717 718 719 720 721

	/* signal hrtimers about time change */
	clock_was_set();
}


722 723 724 725 726 727 728
/**
 * timekeeping_resume - Resumes the generic timekeeping subsystem.
 *
 * This is for the generic clocksource timekeeping.
 * xtime/wall_to_monotonic/jiffies/etc are
 * still managed by arch specific suspend/resume code.
 */
729
static void timekeeping_resume(void)
730
{
731
	unsigned long flags;
732 733 734
	struct timespec ts;

	read_persistent_clock(&ts);
735

736 737
	clocksource_resume();

738
	write_seqlock_irqsave(&timekeeper.lock, flags);
739

740 741
	if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
		ts = timespec_sub(ts, timekeeping_suspend_time);
742
		__timekeeping_inject_sleeptime(&timekeeper, &ts);
743 744
	}
	/* re-base the last cycle value */
745 746
	timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
	timekeeper.ntp_error = 0;
747
	timekeeping_suspended = 0;
748
	write_sequnlock_irqrestore(&timekeeper.lock, flags);
749 750 751 752 753 754

	touch_softlockup_watchdog();

	clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);

	/* Resume hrtimers */
755
	hrtimers_resume();
756 757
}

758
static int timekeeping_suspend(void)
759
{
760
	unsigned long flags;
761 762
	struct timespec		delta, delta_delta;
	static struct timespec	old_delta;
763

764
	read_persistent_clock(&timekeeping_suspend_time);
765

766
	write_seqlock_irqsave(&timekeeper.lock, flags);
767
	timekeeping_forward_now(&timekeeper);
768
	timekeeping_suspended = 1;
769 770 771 772 773 774 775

	/*
	 * To avoid drift caused by repeated suspend/resumes,
	 * which each can add ~1 second drift error,
	 * try to compensate so the difference in system time
	 * and persistent_clock time stays close to constant.
	 */
776
	delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time);
777 778 779 780 781 782 783 784 785 786 787 788
	delta_delta = timespec_sub(delta, old_delta);
	if (abs(delta_delta.tv_sec)  >= 2) {
		/*
		 * if delta_delta is too large, assume time correction
		 * has occured and set old_delta to the current delta.
		 */
		old_delta = delta;
	} else {
		/* Otherwise try to adjust old_system to compensate */
		timekeeping_suspend_time =
			timespec_add(timekeeping_suspend_time, delta_delta);
	}
789
	write_sequnlock_irqrestore(&timekeeper.lock, flags);
790 791

	clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
M
Magnus Damm 已提交
792
	clocksource_suspend();
793 794 795 796 797

	return 0;
}

/* sysfs resume/suspend bits for timekeeping */
798
static struct syscore_ops timekeeping_syscore_ops = {
799 800 801 802
	.resume		= timekeeping_resume,
	.suspend	= timekeeping_suspend,
};

803
static int __init timekeeping_init_ops(void)
804
{
805 806
	register_syscore_ops(&timekeeping_syscore_ops);
	return 0;
807 808
}

809
device_initcall(timekeeping_init_ops);
810 811 812 813 814

/*
 * If the error is already larger, we look ahead even further
 * to compensate for late or lost adjustments.
 */
815 816
static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
						 s64 error, s64 *interval,
817 818 819 820 821 822 823 824 825 826 827 828
						 s64 *offset)
{
	s64 tick_error, i;
	u32 look_ahead, adj;
	s32 error2, mult;

	/*
	 * Use the current error value to determine how much to look ahead.
	 * The larger the error the slower we adjust for it to avoid problems
	 * with losing too many ticks, otherwise we would overadjust and
	 * produce an even larger error.  The smaller the adjustment the
	 * faster we try to adjust for it, as lost ticks can do less harm
L
Li Zefan 已提交
829
	 * here.  This is tuned so that an error of about 1 msec is adjusted
830 831
	 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
	 */
832
	error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
833 834 835 836 837 838 839 840
	error2 = abs(error2);
	for (look_ahead = 0; error2 > 0; look_ahead++)
		error2 >>= 2;

	/*
	 * Now calculate the error in (1 << look_ahead) ticks, but first
	 * remove the single look ahead already included in the error.
	 */
841 842
	tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
	tick_error -= tk->xtime_interval >> 1;
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	error = ((error - tick_error) >> look_ahead) + tick_error;

	/* Finally calculate the adjustment shift value.  */
	i = *interval;
	mult = 1;
	if (error < 0) {
		error = -error;
		*interval = -*interval;
		*offset = -*offset;
		mult = -1;
	}
	for (adj = 0; error > i; adj++)
		error >>= 1;

	*interval <<= adj;
	*offset <<= adj;
	return mult << adj;
}

/*
 * Adjust the multiplier to reduce the error value,
 * this is optimized for the most common adjustments of -1,0,1,
 * for other values we can do a bit more work.
 */
867
static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
868
{
869
	s64 error, interval = tk->cycle_interval;
870 871
	int adj;

872
	/*
873
	 * The point of this is to check if the error is greater than half
874 875 876 877 878
	 * an interval.
	 *
	 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
	 *
	 * Note we subtract one in the shift, so that error is really error*2.
879 880
	 * This "saves" dividing(shifting) interval twice, but keeps the
	 * (error > interval) comparison as still measuring if error is
881
	 * larger than half an interval.
882
	 *
883
	 * Note: It does not "save" on aggravation when reading the code.
884
	 */
885
	error = tk->ntp_error >> (tk->ntp_error_shift - 1);
886
	if (error > interval) {
887 888
		/*
		 * We now divide error by 4(via shift), which checks if
889
		 * the error is greater than twice the interval.
890 891 892
		 * If it is greater, we need a bigadjust, if its smaller,
		 * we can adjust by 1.
		 */
893
		error >>= 2;
894 895 896 897 898
		/*
		 * XXX - In update_wall_time, we round up to the next
		 * nanosecond, and store the amount rounded up into
		 * the error. This causes the likely below to be unlikely.
		 *
899
		 * The proper fix is to avoid rounding up by using
900 901 902 903
		 * the high precision timekeeper.xtime_nsec instead of
		 * xtime.tv_nsec everywhere. Fixing this will take some
		 * time.
		 */
904 905 906
		if (likely(error <= interval))
			adj = 1;
		else
907 908
			adj = timekeeping_bigadjust(tk, error, &interval,
							&offset);
909
	} else if (error < -interval) {
910
		/* See comment above, this is just switched for the negative */
911 912 913 914 915 916
		error >>= 2;
		if (likely(error >= -interval)) {
			adj = -1;
			interval = -interval;
			offset = -offset;
		} else
917 918 919
			adj = timekeeping_bigadjust(tk, error, &interval,
							&offset);
	} else
920 921
		return;

922 923
	if (unlikely(tk->clock->maxadj &&
		(tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
924 925
		printk_once(KERN_WARNING
			"Adjusting %s more than 11%% (%ld vs %ld)\n",
926 927
			tk->clock->name, (long)tk->mult + adj,
			(long)tk->clock->mult + tk->clock->maxadj);
928
	}
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
	/*
	 * So the following can be confusing.
	 *
	 * To keep things simple, lets assume adj == 1 for now.
	 *
	 * When adj != 1, remember that the interval and offset values
	 * have been appropriately scaled so the math is the same.
	 *
	 * The basic idea here is that we're increasing the multiplier
	 * by one, this causes the xtime_interval to be incremented by
	 * one cycle_interval. This is because:
	 *	xtime_interval = cycle_interval * mult
	 * So if mult is being incremented by one:
	 *	xtime_interval = cycle_interval * (mult + 1)
	 * Its the same as:
	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
	 * Which can be shortened to:
	 *	xtime_interval += cycle_interval
	 *
	 * So offset stores the non-accumulated cycles. Thus the current
	 * time (in shifted nanoseconds) is:
	 *	now = (offset * adj) + xtime_nsec
	 * Now, even though we're adjusting the clock frequency, we have
	 * to keep time consistent. In other words, we can't jump back
	 * in time, and we also want to avoid jumping forward in time.
	 *
	 * So given the same offset value, we need the time to be the same
	 * both before and after the freq adjustment.
	 *	now = (offset * adj_1) + xtime_nsec_1
	 *	now = (offset * adj_2) + xtime_nsec_2
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_2) + xtime_nsec_2
	 * And we know:
	 *	adj_2 = adj_1 + 1
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * (adj_1+1)) + xtime_nsec_2
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_1) + offset + xtime_nsec_2
	 * Canceling the sides:
	 *	xtime_nsec_1 = offset + xtime_nsec_2
	 * Which gives us:
	 *	xtime_nsec_2 = xtime_nsec_1 - offset
	 * Which simplfies to:
	 *	xtime_nsec -= offset
	 *
	 * XXX - TODO: Doc ntp_error calculation.
	 */
978 979 980 981
	tk->mult += adj;
	tk->xtime_interval += interval;
	tk->xtime_nsec -= offset;
	tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996

	/*
	 * It may be possible that when we entered this function, xtime_nsec
	 * was very small.  Further, if we're slightly speeding the clocksource
	 * in the code above, its possible the required corrective factor to
	 * xtime_nsec could cause it to underflow.
	 *
	 * Now, since we already accumulated the second, cannot simply roll
	 * the accumulated second back, since the NTP subsystem has been
	 * notified via second_overflow. So instead we push xtime_nsec forward
	 * by the amount we underflowed, and add that amount into the error.
	 *
	 * We'll correct this error next time through this function, when
	 * xtime_nsec is not as small.
	 */
997 998 999 1000
	if (unlikely((s64)tk->xtime_nsec < 0)) {
		s64 neg = -(s64)tk->xtime_nsec;
		tk->xtime_nsec = 0;
		tk->ntp_error += neg << tk->ntp_error_shift;
1001 1002
	}

1003 1004
}

L
Linus Torvalds 已提交
1005

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
/**
 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
 *
 * Helper function that accumulates a the nsecs greater then a second
 * from the xtime_nsec field to the xtime_secs field.
 * It also calls into the NTP code to handle leapsecond processing.
 *
 */
static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
{
	u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;

	while (tk->xtime_nsec >= nsecps) {
		int leap;

		tk->xtime_nsec -= nsecps;
		tk->xtime_sec++;

		/* Figure out if its a leap sec and apply if needed */
		leap = second_overflow(tk->xtime_sec);
		tk->xtime_sec += leap;
		tk->wall_to_monotonic.tv_sec -= leap;
		if (leap)
			clock_was_set_delayed();

	}
}


1035 1036 1037 1038 1039 1040 1041 1042 1043
/**
 * logarithmic_accumulation - shifted accumulation of cycles
 *
 * This functions accumulates a shifted interval of cycles into
 * into a shifted interval nanoseconds. Allows for O(log) accumulation
 * loop.
 *
 * Returns the unconsumed cycles.
 */
1044 1045
static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
						u32 shift)
1046
{
1047
	u64 raw_nsecs;
1048

1049 1050
	/* If the offset is smaller then a shifted interval, do nothing */
	if (offset < tk->cycle_interval<<shift)
1051 1052 1053
		return offset;

	/* Accumulate one shifted interval */
1054 1055
	offset -= tk->cycle_interval << shift;
	tk->clock->cycle_last += tk->cycle_interval << shift;
1056

1057 1058
	tk->xtime_nsec += tk->xtime_interval << shift;
	accumulate_nsecs_to_secs(tk);
1059

1060
	/* Accumulate raw time */
1061 1062
	raw_nsecs = tk->raw_interval << shift;
	raw_nsecs += tk->raw_time.tv_nsec;
1063 1064 1065
	if (raw_nsecs >= NSEC_PER_SEC) {
		u64 raw_secs = raw_nsecs;
		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1066
		tk->raw_time.tv_sec += raw_secs;
1067
	}
1068
	tk->raw_time.tv_nsec = raw_nsecs;
1069 1070

	/* Accumulate error between NTP and clock interval */
1071 1072 1073
	tk->ntp_error += ntp_tick_length() << shift;
	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
						(tk->ntp_error_shift + shift);
1074 1075 1076 1077

	return offset;
}

L
Linus Torvalds 已提交
1078

1079 1080 1081 1082
/**
 * update_wall_time - Uses the current clocksource to increment the wall time
 *
 */
1083
static void update_wall_time(void)
1084
{
1085
	struct clocksource *clock;
1086
	cycle_t offset;
1087
	int shift = 0, maxshift;
J
John Stultz 已提交
1088
	unsigned long flags;
1089
	s64 remainder;
J
John Stultz 已提交
1090 1091

	write_seqlock_irqsave(&timekeeper.lock, flags);
1092 1093 1094

	/* Make sure we're fully resumed: */
	if (unlikely(timekeeping_suspended))
J
John Stultz 已提交
1095
		goto out;
1096

1097
	clock = timekeeper.clock;
J
John Stultz 已提交
1098 1099

#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1100
	offset = timekeeper.cycle_interval;
J
John Stultz 已提交
1101 1102
#else
	offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1103 1104
#endif

1105 1106 1107 1108
	/*
	 * With NO_HZ we may have to accumulate many cycle_intervals
	 * (think "ticks") worth of time at once. To do this efficiently,
	 * we calculate the largest doubling multiple of cycle_intervals
1109
	 * that is smaller than the offset.  We then accumulate that
1110 1111
	 * chunk in one go, and then try to consume the next smaller
	 * doubled multiple.
1112
	 */
1113 1114
	shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
	shift = max(0, shift);
1115
	/* Bound shift to one less than what overflows tick_length */
1116
	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1117
	shift = min(shift, maxshift);
1118
	while (offset >= timekeeper.cycle_interval) {
1119
		offset = logarithmic_accumulation(&timekeeper, offset, shift);
1120 1121
		if(offset < timekeeper.cycle_interval<<shift)
			shift--;
1122 1123 1124
	}

	/* correct the clock when NTP error is too big */
1125
	timekeeping_adjust(&timekeeper, offset);
1126

1127

J
John Stultz 已提交
1128
	/*
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	* Store only full nanoseconds into xtime_nsec after rounding
	* it up and add the remainder to the error difference.
	* XXX - This is necessary to avoid small 1ns inconsistnecies caused
	* by truncating the remainder in vsyscalls. However, it causes
	* additional work to be done in timekeeping_adjust(). Once
	* the vsyscall implementations are converted to use xtime_nsec
	* (shifted nanoseconds), this can be killed.
	*/
	remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1);
	timekeeper.xtime_nsec -= remainder;
	timekeeper.xtime_nsec += 1 << timekeeper.shift;
	timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift;
1141

J
John Stultz 已提交
1142 1143
	/*
	 * Finally, make sure that after the rounding
1144
	 * xtime_nsec isn't larger than NSEC_PER_SEC
J
John Stultz 已提交
1145
	 */
1146
	accumulate_nsecs_to_secs(&timekeeper);
L
Linus Torvalds 已提交
1147

1148
	timekeeping_update(&timekeeper, false);
J
John Stultz 已提交
1149 1150 1151 1152

out:
	write_sequnlock_irqrestore(&timekeeper.lock, flags);

1153
}
T
Tomas Janousek 已提交
1154 1155 1156 1157 1158

/**
 * getboottime - Return the real time of system boot.
 * @ts:		pointer to the timespec to be set
 *
1159
 * Returns the wall-time of boot in a timespec.
T
Tomas Janousek 已提交
1160 1161 1162 1163 1164 1165 1166 1167
 *
 * This is based on the wall_to_monotonic offset and the total suspend
 * time. Calls to settimeofday will affect the value returned (which
 * basically means that however wrong your real time clock is at boot time,
 * you get the right time here).
 */
void getboottime(struct timespec *ts)
{
1168
	struct timespec boottime = {
1169
		.tv_sec = timekeeper.wall_to_monotonic.tv_sec +
1170
				timekeeper.total_sleep_time.tv_sec,
1171
		.tv_nsec = timekeeper.wall_to_monotonic.tv_nsec +
1172
				timekeeper.total_sleep_time.tv_nsec
1173
	};
1174 1175

	set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
T
Tomas Janousek 已提交
1176
}
1177
EXPORT_SYMBOL_GPL(getboottime);
T
Tomas Janousek 已提交
1178

1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196

/**
 * get_monotonic_boottime - Returns monotonic time since boot
 * @ts:		pointer to the timespec to be set
 *
 * Returns the monotonic time since boot in a timespec.
 *
 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
 * includes the time spent in suspend.
 */
void get_monotonic_boottime(struct timespec *ts)
{
	struct timespec tomono, sleep;
	unsigned int seq;

	WARN_ON(timekeeping_suspended);

	do {
J
John Stultz 已提交
1197
		seq = read_seqbegin(&timekeeper.lock);
1198
		ts->tv_sec = timekeeper.xtime_sec;
1199
		ts->tv_nsec = timekeeping_get_ns(&timekeeper);
1200
		tomono = timekeeper.wall_to_monotonic;
1201
		sleep = timekeeper.total_sleep_time;
1202

J
John Stultz 已提交
1203
	} while (read_seqretry(&timekeeper.lock, seq));
1204 1205

	set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
1206
			ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
}
EXPORT_SYMBOL_GPL(get_monotonic_boottime);

/**
 * ktime_get_boottime - Returns monotonic time since boot in a ktime
 *
 * Returns the monotonic time since boot in a ktime
 *
 * This is similar to CLOCK_MONTONIC/ktime_get, but also
 * includes the time spent in suspend.
 */
ktime_t ktime_get_boottime(void)
{
	struct timespec ts;

	get_monotonic_boottime(&ts);
	return timespec_to_ktime(ts);
}
EXPORT_SYMBOL_GPL(ktime_get_boottime);

T
Tomas Janousek 已提交
1227 1228 1229 1230 1231 1232
/**
 * monotonic_to_bootbased - Convert the monotonic time to boot based.
 * @ts:		pointer to the timespec to be converted
 */
void monotonic_to_bootbased(struct timespec *ts)
{
1233
	*ts = timespec_add(*ts, timekeeper.total_sleep_time);
T
Tomas Janousek 已提交
1234
}
1235
EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1236

1237 1238
unsigned long get_seconds(void)
{
1239
	return timekeeper.xtime_sec;
1240 1241 1242
}
EXPORT_SYMBOL(get_seconds);

1243 1244
struct timespec __current_kernel_time(void)
{
1245
	return tk_xtime(&timekeeper);
1246
}
1247

1248 1249 1250 1251 1252 1253
struct timespec current_kernel_time(void)
{
	struct timespec now;
	unsigned long seq;

	do {
J
John Stultz 已提交
1254
		seq = read_seqbegin(&timekeeper.lock);
L
Linus Torvalds 已提交
1255

1256
		now = tk_xtime(&timekeeper);
J
John Stultz 已提交
1257
	} while (read_seqretry(&timekeeper.lock, seq));
1258 1259 1260 1261

	return now;
}
EXPORT_SYMBOL(current_kernel_time);
1262 1263 1264 1265 1266 1267 1268

struct timespec get_monotonic_coarse(void)
{
	struct timespec now, mono;
	unsigned long seq;

	do {
J
John Stultz 已提交
1269
		seq = read_seqbegin(&timekeeper.lock);
L
Linus Torvalds 已提交
1270

1271
		now = tk_xtime(&timekeeper);
1272
		mono = timekeeper.wall_to_monotonic;
J
John Stultz 已提交
1273
	} while (read_seqretry(&timekeeper.lock, seq));
1274 1275 1276 1277 1278

	set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
				now.tv_nsec + mono.tv_nsec);
	return now;
}
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290

/*
 * The 64-bit jiffies value is not atomic - you MUST NOT read it
 * without sampling the sequence number in xtime_lock.
 * jiffies is defined in the linker script...
 */
void do_timer(unsigned long ticks)
{
	jiffies_64 += ticks;
	update_wall_time();
	calc_global_load(ticks);
}
1291 1292

/**
1293 1294
 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
 *    and sleep offsets.
1295 1296
 * @xtim:	pointer to timespec to be set with xtime
 * @wtom:	pointer to timespec to be set with wall_to_monotonic
1297
 * @sleep:	pointer to timespec to be set with time in suspend
1298
 */
1299 1300
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
				struct timespec *wtom, struct timespec *sleep)
1301 1302 1303 1304
{
	unsigned long seq;

	do {
J
John Stultz 已提交
1305
		seq = read_seqbegin(&timekeeper.lock);
1306
		*xtim = tk_xtime(&timekeeper);
1307
		*wtom = timekeeper.wall_to_monotonic;
1308
		*sleep = timekeeper.total_sleep_time;
J
John Stultz 已提交
1309
	} while (read_seqretry(&timekeeper.lock, seq));
1310
}
T
Torben Hohn 已提交
1311

1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
#ifdef CONFIG_HIGH_RES_TIMERS
/**
 * ktime_get_update_offsets - hrtimer helper
 * @offs_real:	pointer to storage for monotonic -> realtime offset
 * @offs_boot:	pointer to storage for monotonic -> boottime offset
 *
 * Returns current monotonic time and updates the offsets
 * Called from hrtimer_interupt() or retrigger_next_event()
 */
ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
{
	ktime_t now;
	unsigned int seq;
	u64 secs, nsecs;

	do {
		seq = read_seqbegin(&timekeeper.lock);

1330
		secs = timekeeper.xtime_sec;
1331
		nsecs = timekeeping_get_ns(&timekeeper);
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342

		*offs_real = timekeeper.offs_real;
		*offs_boot = timekeeper.offs_boot;
	} while (read_seqretry(&timekeeper.lock, seq));

	now = ktime_add_ns(ktime_set(secs, 0), nsecs);
	now = ktime_sub(now, *offs_real);
	return now;
}
#endif

1343 1344 1345 1346 1347 1348 1349 1350 1351
/**
 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
 */
ktime_t ktime_get_monotonic_offset(void)
{
	unsigned long seq;
	struct timespec wtom;

	do {
J
John Stultz 已提交
1352
		seq = read_seqbegin(&timekeeper.lock);
1353
		wtom = timekeeper.wall_to_monotonic;
J
John Stultz 已提交
1354 1355
	} while (read_seqretry(&timekeeper.lock, seq));

1356 1357
	return timespec_to_ktime(wtom);
}
1358 1359
EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);

1360

T
Torben Hohn 已提交
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
/**
 * xtime_update() - advances the timekeeping infrastructure
 * @ticks:	number of ticks, that have elapsed since the last call.
 *
 * Must be called with interrupts disabled.
 */
void xtime_update(unsigned long ticks)
{
	write_seqlock(&xtime_lock);
	do_timer(ticks);
	write_sequnlock(&xtime_lock);
}