timekeeping.c 35.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 *  linux/kernel/time/timekeeping.c
 *
 *  Kernel timekeeping code and accessor functions
 *
 *  This code was moved from linux/kernel/timer.c.
 *  Please see that file for copyright and history logs.
 *
 */

11
#include <linux/timekeeper_internal.h>
12 13 14 15 16
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
17
#include <linux/sched.h>
18
#include <linux/syscore_ops.h>
19 20 21 22
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/tick.h>
23
#include <linux/stop_machine.h>
24

25

26
static struct timekeeper timekeeper;
27

28 29 30
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;

31 32 33 34 35 36 37 38 39 40 41
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
	while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
		tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
		tk->xtime_sec++;
	}
}

static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
{
	tk->xtime_sec = ts->tv_sec;
42
	tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
43 44 45 46 47
}

static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
{
	tk->xtime_sec += ts->tv_sec;
48
	tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
49
	tk_normalize_xtime(tk);
50
}
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
{
	struct timespec tmp;

	/*
	 * Verify consistency of: offset_real = -wall_to_monotonic
	 * before modifying anything
	 */
	set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
					-tk->wall_to_monotonic.tv_nsec);
	WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
	tk->wall_to_monotonic = wtm;
	set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
	tk->offs_real = timespec_to_ktime(tmp);
}

static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
{
	/* Verify consistency before modifying */
	WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);

	tk->total_sleep_time	= t;
	tk->offs_boot		= timespec_to_ktime(t);
}

77 78 79 80 81 82 83 84 85 86
/**
 * timekeeper_setup_internals - Set up internals to use clocksource clock.
 *
 * @clock:		Pointer to clocksource.
 *
 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 * pair and interval request.
 *
 * Unless you're the timekeeping code, you should not be using this!
 */
87
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
88 89
{
	cycle_t interval;
90
	u64 tmp, ntpinterval;
91
	struct clocksource *old_clock;
92

93 94
	old_clock = tk->clock;
	tk->clock = clock;
95 96 97 98 99
	clock->cycle_last = clock->read(clock);

	/* Do the ns -> cycle conversion first, using original mult */
	tmp = NTP_INTERVAL_LENGTH;
	tmp <<= clock->shift;
100
	ntpinterval = tmp;
101 102
	tmp += clock->mult/2;
	do_div(tmp, clock->mult);
103 104 105 106
	if (tmp == 0)
		tmp = 1;

	interval = (cycle_t) tmp;
107
	tk->cycle_interval = interval;
108 109

	/* Go back from cycles -> shifted ns */
110 111 112
	tk->xtime_interval = (u64) interval * clock->mult;
	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
	tk->raw_interval =
113
		((u64) interval * clock->mult) >> clock->shift;
114

115 116 117 118
	 /* if changing clocks, convert xtime_nsec shift units */
	if (old_clock) {
		int shift_change = clock->shift - old_clock->shift;
		if (shift_change < 0)
119
			tk->xtime_nsec >>= -shift_change;
120
		else
121
			tk->xtime_nsec <<= shift_change;
122
	}
123
	tk->shift = clock->shift;
124

125 126
	tk->ntp_error = 0;
	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
127 128 129 130 131 132

	/*
	 * The timekeeper keeps its own mult values for the currently
	 * active clocksource. These value will be adjusted via NTP
	 * to counteract clock drifting.
	 */
133
	tk->mult = clock->mult;
134
}
135

136
/* Timekeeper helper functions. */
137
static inline s64 timekeeping_get_ns(struct timekeeper *tk)
138 139 140
{
	cycle_t cycle_now, cycle_delta;
	struct clocksource *clock;
141
	s64 nsec;
142 143

	/* read clocksource: */
144
	clock = tk->clock;
145 146 147 148 149
	cycle_now = clock->read(clock);

	/* calculate the delta since the last update_wall_time: */
	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;

150 151
	nsec = cycle_delta * tk->mult + tk->xtime_nsec;
	nsec >>= tk->shift;
152 153 154

	/* If arch requires, add in gettimeoffset() */
	return nsec + arch_gettimeoffset();
155 156
}

157
static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
158 159 160
{
	cycle_t cycle_now, cycle_delta;
	struct clocksource *clock;
161
	s64 nsec;
162 163

	/* read clocksource: */
164
	clock = tk->clock;
165 166 167 168 169
	cycle_now = clock->read(clock);

	/* calculate the delta since the last update_wall_time: */
	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;

170 171 172 173 174
	/* convert delta to nanoseconds. */
	nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);

	/* If arch requires, add in gettimeoffset() */
	return nsec + arch_gettimeoffset();
175 176
}

177
/* must hold write on timekeeper.lock */
178
static void timekeeping_update(struct timekeeper *tk, bool clearntp)
179 180
{
	if (clearntp) {
181
		tk->ntp_error = 0;
182 183
		ntp_clear();
	}
184
	update_vsyscall(tk);
185 186
}

187
/**
188
 * timekeeping_forward_now - update clock to the current time
189
 *
190 191 192
 * Forward the current clock to update its state since the last call to
 * update_wall_time(). This is useful before significant clock changes,
 * as it avoids having to deal with this time offset explicitly.
193
 */
194
static void timekeeping_forward_now(struct timekeeper *tk)
195 196
{
	cycle_t cycle_now, cycle_delta;
197
	struct clocksource *clock;
198
	s64 nsec;
199

200
	clock = tk->clock;
201
	cycle_now = clock->read(clock);
202
	cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
203
	clock->cycle_last = cycle_now;
204

205
	tk->xtime_nsec += cycle_delta * tk->mult;
206 207

	/* If arch requires, add in gettimeoffset() */
208
	tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
209

210
	tk_normalize_xtime(tk);
211

212
	nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
213
	timespec_add_ns(&tk->raw_time, nsec);
214 215 216
}

/**
217
 * getnstimeofday - Returns the time of day in a timespec
218 219
 * @ts:		pointer to the timespec to be set
 *
220
 * Returns the time of day in a timespec.
221
 */
222
void getnstimeofday(struct timespec *ts)
223
{
224
	struct timekeeper *tk = &timekeeper;
225
	unsigned long seq;
226
	s64 nsecs = 0;
227

228 229
	WARN_ON(timekeeping_suspended);

230
	do {
231
		seq = read_seqbegin(&tk->lock);
232

233
		ts->tv_sec = tk->xtime_sec;
234
		nsecs = timekeeping_get_ns(tk);
235

236
	} while (read_seqretry(&tk->lock, seq));
237

238
	ts->tv_nsec = 0;
239 240 241 242
	timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getnstimeofday);

243 244
ktime_t ktime_get(void)
{
245
	struct timekeeper *tk = &timekeeper;
246 247 248 249 250 251
	unsigned int seq;
	s64 secs, nsecs;

	WARN_ON(timekeeping_suspended);

	do {
252 253 254
		seq = read_seqbegin(&tk->lock);
		secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
		nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
255

256
	} while (read_seqretry(&tk->lock, seq));
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
	/*
	 * Use ktime_set/ktime_add_ns to create a proper ktime on
	 * 32-bit architectures without CONFIG_KTIME_SCALAR.
	 */
	return ktime_add_ns(ktime_set(secs, 0), nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get);

/**
 * ktime_get_ts - get the monotonic clock in timespec format
 * @ts:		pointer to timespec variable
 *
 * The function calculates the monotonic clock from the realtime
 * clock and the wall_to_monotonic offset and stores the result
 * in normalized timespec format in the variable pointed to by @ts.
 */
void ktime_get_ts(struct timespec *ts)
{
275
	struct timekeeper *tk = &timekeeper;
276
	struct timespec tomono;
277
	s64 nsec;
278 279 280 281 282
	unsigned int seq;

	WARN_ON(timekeeping_suspended);

	do {
283 284
		seq = read_seqbegin(&tk->lock);
		ts->tv_sec = tk->xtime_sec;
285
		nsec = timekeeping_get_ns(tk);
286
		tomono = tk->wall_to_monotonic;
287

288
	} while (read_seqretry(&tk->lock, seq));
289

290 291 292
	ts->tv_sec += tomono.tv_sec;
	ts->tv_nsec = 0;
	timespec_add_ns(ts, nsec + tomono.tv_nsec);
293 294 295
}
EXPORT_SYMBOL_GPL(ktime_get_ts);

296 297 298 299 300 301 302 303 304 305 306 307 308
#ifdef CONFIG_NTP_PPS

/**
 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
 * @ts_raw:	pointer to the timespec to be set to raw monotonic time
 * @ts_real:	pointer to the timespec to be set to the time of day
 *
 * This function reads both the time of day and raw monotonic time at the
 * same time atomically and stores the resulting timestamps in timespec
 * format.
 */
void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
{
309
	struct timekeeper *tk = &timekeeper;
310 311 312 313 314 315
	unsigned long seq;
	s64 nsecs_raw, nsecs_real;

	WARN_ON_ONCE(timekeeping_suspended);

	do {
316
		seq = read_seqbegin(&tk->lock);
317

318 319
		*ts_raw = tk->raw_time;
		ts_real->tv_sec = tk->xtime_sec;
320
		ts_real->tv_nsec = 0;
321

322 323
		nsecs_raw = timekeeping_get_ns_raw(tk);
		nsecs_real = timekeeping_get_ns(tk);
324

325
	} while (read_seqretry(&tk->lock, seq));
326 327 328 329 330 331 332 333

	timespec_add_ns(ts_raw, nsecs_raw);
	timespec_add_ns(ts_real, nsecs_real);
}
EXPORT_SYMBOL(getnstime_raw_and_real);

#endif /* CONFIG_NTP_PPS */

334 335 336 337
/**
 * do_gettimeofday - Returns the time of day in a timeval
 * @tv:		pointer to the timeval to be set
 *
338
 * NOTE: Users should be converted to using getnstimeofday()
339 340 341 342 343
 */
void do_gettimeofday(struct timeval *tv)
{
	struct timespec now;

344
	getnstimeofday(&now);
345 346 347 348
	tv->tv_sec = now.tv_sec;
	tv->tv_usec = now.tv_nsec/1000;
}
EXPORT_SYMBOL(do_gettimeofday);
349

350 351 352 353 354 355
/**
 * do_settimeofday - Sets the time of day
 * @tv:		pointer to the timespec variable containing the new time
 *
 * Sets the time of day to the new time and update NTP and notify hrtimers
 */
356
int do_settimeofday(const struct timespec *tv)
357
{
358
	struct timekeeper *tk = &timekeeper;
359
	struct timespec ts_delta, xt;
360
	unsigned long flags;
361

362
	if (!timespec_valid_strict(tv))
363 364
		return -EINVAL;

365
	write_seqlock_irqsave(&tk->lock, flags);
366

367
	timekeeping_forward_now(tk);
368

369
	xt = tk_xtime(tk);
370 371 372
	ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
	ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;

373
	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
374

375
	tk_set_xtime(tk, tv);
376

377
	timekeeping_update(tk, true);
378

379
	write_sequnlock_irqrestore(&tk->lock, flags);
380 381 382 383 384 385 386 387

	/* signal hrtimers about time change */
	clock_was_set();

	return 0;
}
EXPORT_SYMBOL(do_settimeofday);

388 389 390 391 392 393 394 395
/**
 * timekeeping_inject_offset - Adds or subtracts from the current time.
 * @tv:		pointer to the timespec variable containing the offset
 *
 * Adds or subtracts an offset value from the current time.
 */
int timekeeping_inject_offset(struct timespec *ts)
{
396
	struct timekeeper *tk = &timekeeper;
397
	unsigned long flags;
398 399
	struct timespec tmp;
	int ret = 0;
400 401 402 403

	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

404
	write_seqlock_irqsave(&tk->lock, flags);
405

406
	timekeeping_forward_now(tk);
407

408 409
	/* Make sure the proposed value is valid */
	tmp = timespec_add(tk_xtime(tk),  *ts);
410
	if (!timespec_valid_strict(&tmp)) {
411 412 413
		ret = -EINVAL;
		goto error;
	}
414

415 416
	tk_xtime_add(tk, ts);
	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
417

418
error: /* even if we error out, we forwarded the time, so call update */
419
	timekeeping_update(tk, true);
420

421
	write_sequnlock_irqrestore(&tk->lock, flags);
422 423 424 425

	/* signal hrtimers about time change */
	clock_was_set();

426
	return ret;
427 428 429
}
EXPORT_SYMBOL(timekeeping_inject_offset);

430 431 432 433 434
/**
 * change_clocksource - Swaps clocksources if a new one is available
 *
 * Accumulates current time interval and initializes new clocksource
 */
435
static int change_clocksource(void *data)
436
{
437
	struct timekeeper *tk = &timekeeper;
438
	struct clocksource *new, *old;
439
	unsigned long flags;
440

441
	new = (struct clocksource *) data;
442

443
	write_seqlock_irqsave(&tk->lock, flags);
444

445
	timekeeping_forward_now(tk);
446
	if (!new->enable || new->enable(new) == 0) {
447 448
		old = tk->clock;
		tk_setup_internals(tk, new);
449 450 451
		if (old->disable)
			old->disable(old);
	}
452
	timekeeping_update(tk, true);
453

454
	write_sequnlock_irqrestore(&tk->lock, flags);
455

456 457
	return 0;
}
458

459 460 461 462 463 464 465 466 467
/**
 * timekeeping_notify - Install a new clock source
 * @clock:		pointer to the clock source
 *
 * This function is called from clocksource.c after a new, better clock
 * source has been registered. The caller holds the clocksource_mutex.
 */
void timekeeping_notify(struct clocksource *clock)
{
468 469 470
	struct timekeeper *tk = &timekeeper;

	if (tk->clock == clock)
471
		return;
472
	stop_machine(change_clocksource, clock, NULL);
473 474
	tick_clock_notify();
}
475

476 477 478 479 480 481 482 483 484 485 486 487 488 489
/**
 * ktime_get_real - get the real (wall-) time in ktime_t format
 *
 * returns the time in ktime_t format
 */
ktime_t ktime_get_real(void)
{
	struct timespec now;

	getnstimeofday(&now);

	return timespec_to_ktime(now);
}
EXPORT_SYMBOL_GPL(ktime_get_real);
490

491 492 493 494 495 496 497 498
/**
 * getrawmonotonic - Returns the raw monotonic time in a timespec
 * @ts:		pointer to the timespec to be set
 *
 * Returns the raw monotonic time (completely un-modified by ntp)
 */
void getrawmonotonic(struct timespec *ts)
{
499
	struct timekeeper *tk = &timekeeper;
500 501 502 503
	unsigned long seq;
	s64 nsecs;

	do {
504 505 506
		seq = read_seqbegin(&tk->lock);
		nsecs = timekeeping_get_ns_raw(tk);
		*ts = tk->raw_time;
507

508
	} while (read_seqretry(&tk->lock, seq));
509 510 511 512 513

	timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getrawmonotonic);

514
/**
515
 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
516
 */
517
int timekeeping_valid_for_hres(void)
518
{
519
	struct timekeeper *tk = &timekeeper;
520 521 522 523
	unsigned long seq;
	int ret;

	do {
524
		seq = read_seqbegin(&tk->lock);
525

526
		ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
527

528
	} while (read_seqretry(&tk->lock, seq));
529 530 531 532

	return ret;
}

533 534 535 536 537
/**
 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
 */
u64 timekeeping_max_deferment(void)
{
538
	struct timekeeper *tk = &timekeeper;
J
John Stultz 已提交
539 540
	unsigned long seq;
	u64 ret;
541

J
John Stultz 已提交
542
	do {
543
		seq = read_seqbegin(&tk->lock);
J
John Stultz 已提交
544

545
		ret = tk->clock->max_idle_ns;
J
John Stultz 已提交
546

547
	} while (read_seqretry(&tk->lock, seq));
J
John Stultz 已提交
548 549

	return ret;
550 551
}

552
/**
553
 * read_persistent_clock -  Return time from the persistent clock.
554 555
 *
 * Weak dummy function for arches that do not yet support it.
556 557
 * Reads the time from the battery backed persistent clock.
 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
558 559 560
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
561
void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
562
{
563 564
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
565 566
}

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
/**
 * read_boot_clock -  Return time of the system start.
 *
 * Weak dummy function for arches that do not yet support it.
 * Function to read the exact time the system has been started.
 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
void __attribute__((weak)) read_boot_clock(struct timespec *ts)
{
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
}

582 583 584 585 586
/*
 * timekeeping_init - Initializes the clocksource and common timekeeping values
 */
void __init timekeeping_init(void)
{
587
	struct timekeeper *tk = &timekeeper;
588
	struct clocksource *clock;
589
	unsigned long flags;
590
	struct timespec now, boot, tmp;
591 592

	read_persistent_clock(&now);
593
	if (!timespec_valid_strict(&now)) {
594 595 596 597 598 599
		pr_warn("WARNING: Persistent clock returned invalid value!\n"
			"         Check your CMOS/BIOS settings.\n");
		now.tv_sec = 0;
		now.tv_nsec = 0;
	}

600
	read_boot_clock(&boot);
601
	if (!timespec_valid_strict(&boot)) {
602 603 604 605 606
		pr_warn("WARNING: Boot clock returned invalid value!\n"
			"         Check your CMOS/BIOS settings.\n");
		boot.tv_sec = 0;
		boot.tv_nsec = 0;
	}
607

608
	seqlock_init(&tk->lock);
609

R
Roman Zippel 已提交
610
	ntp_init();
611

612
	write_seqlock_irqsave(&tk->lock, flags);
613
	clock = clocksource_default_clock();
614 615
	if (clock->enable)
		clock->enable(clock);
616
	tk_setup_internals(tk, clock);
617

618 619 620
	tk_set_xtime(tk, &now);
	tk->raw_time.tv_sec = 0;
	tk->raw_time.tv_nsec = 0;
621
	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
622
		boot = tk_xtime(tk);
623

624
	set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
625
	tk_set_wall_to_mono(tk, tmp);
626 627 628

	tmp.tv_sec = 0;
	tmp.tv_nsec = 0;
629
	tk_set_sleep_time(tk, tmp);
630

631
	write_sequnlock_irqrestore(&tk->lock, flags);
632 633 634
}

/* time in seconds when suspend began */
635
static struct timespec timekeeping_suspend_time;
636

637 638 639 640 641 642 643
/**
 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
 * @delta: pointer to a timespec delta value
 *
 * Takes a timespec offset measuring a suspend interval and properly
 * adds the sleep offset to the timekeeping variables.
 */
644 645
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
							struct timespec *delta)
646
{
647
	if (!timespec_valid_strict(delta)) {
648
		printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
649 650 651
					"sleep delta value!\n");
		return;
	}
652
	tk_xtime_add(tk, delta);
653 654
	tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
	tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
655 656 657 658 659 660 661 662 663 664 665 666 667 668
}

/**
 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
 * @delta: pointer to a timespec delta value
 *
 * This hook is for architectures that cannot support read_persistent_clock
 * because their RTC/persistent clock is only accessible when irqs are enabled.
 *
 * This function should only be called by rtc_resume(), and allows
 * a suspend offset to be injected into the timekeeping values.
 */
void timekeeping_inject_sleeptime(struct timespec *delta)
{
669
	struct timekeeper *tk = &timekeeper;
670
	unsigned long flags;
671 672 673 674 675 676 677
	struct timespec ts;

	/* Make sure we don't set the clock twice */
	read_persistent_clock(&ts);
	if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
		return;

678
	write_seqlock_irqsave(&tk->lock, flags);
J
John Stultz 已提交
679

680
	timekeeping_forward_now(tk);
681

682
	__timekeeping_inject_sleeptime(tk, delta);
683

684
	timekeeping_update(tk, true);
685

686
	write_sequnlock_irqrestore(&tk->lock, flags);
687 688 689 690 691

	/* signal hrtimers about time change */
	clock_was_set();
}

692 693 694 695 696 697 698
/**
 * timekeeping_resume - Resumes the generic timekeeping subsystem.
 *
 * This is for the generic clocksource timekeeping.
 * xtime/wall_to_monotonic/jiffies/etc are
 * still managed by arch specific suspend/resume code.
 */
699
static void timekeeping_resume(void)
700
{
701
	struct timekeeper *tk = &timekeeper;
702
	unsigned long flags;
703 704 705
	struct timespec ts;

	read_persistent_clock(&ts);
706

707
	clockevents_resume();
708 709
	clocksource_resume();

710
	write_seqlock_irqsave(&tk->lock, flags);
711

712 713
	if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
		ts = timespec_sub(ts, timekeeping_suspend_time);
714
		__timekeeping_inject_sleeptime(tk, &ts);
715 716
	}
	/* re-base the last cycle value */
717 718
	tk->clock->cycle_last = tk->clock->read(tk->clock);
	tk->ntp_error = 0;
719
	timekeeping_suspended = 0;
720 721
	timekeeping_update(tk, false);
	write_sequnlock_irqrestore(&tk->lock, flags);
722 723 724 725 726 727

	touch_softlockup_watchdog();

	clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);

	/* Resume hrtimers */
728
	hrtimers_resume();
729 730
}

731
static int timekeeping_suspend(void)
732
{
733
	struct timekeeper *tk = &timekeeper;
734
	unsigned long flags;
735 736
	struct timespec		delta, delta_delta;
	static struct timespec	old_delta;
737

738
	read_persistent_clock(&timekeeping_suspend_time);
739

740 741
	write_seqlock_irqsave(&tk->lock, flags);
	timekeeping_forward_now(tk);
742
	timekeeping_suspended = 1;
743 744 745 746 747 748 749

	/*
	 * To avoid drift caused by repeated suspend/resumes,
	 * which each can add ~1 second drift error,
	 * try to compensate so the difference in system time
	 * and persistent_clock time stays close to constant.
	 */
750
	delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
751 752 753 754 755 756 757 758 759 760 761 762
	delta_delta = timespec_sub(delta, old_delta);
	if (abs(delta_delta.tv_sec)  >= 2) {
		/*
		 * if delta_delta is too large, assume time correction
		 * has occured and set old_delta to the current delta.
		 */
		old_delta = delta;
	} else {
		/* Otherwise try to adjust old_system to compensate */
		timekeeping_suspend_time =
			timespec_add(timekeeping_suspend_time, delta_delta);
	}
763
	write_sequnlock_irqrestore(&tk->lock, flags);
764 765

	clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
M
Magnus Damm 已提交
766
	clocksource_suspend();
767
	clockevents_suspend();
768 769 770 771 772

	return 0;
}

/* sysfs resume/suspend bits for timekeeping */
773
static struct syscore_ops timekeeping_syscore_ops = {
774 775 776 777
	.resume		= timekeeping_resume,
	.suspend	= timekeeping_suspend,
};

778
static int __init timekeeping_init_ops(void)
779
{
780 781
	register_syscore_ops(&timekeeping_syscore_ops);
	return 0;
782 783
}

784
device_initcall(timekeeping_init_ops);
785 786 787 788 789

/*
 * If the error is already larger, we look ahead even further
 * to compensate for late or lost adjustments.
 */
790 791
static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
						 s64 error, s64 *interval,
792 793 794 795 796 797 798 799 800 801 802 803
						 s64 *offset)
{
	s64 tick_error, i;
	u32 look_ahead, adj;
	s32 error2, mult;

	/*
	 * Use the current error value to determine how much to look ahead.
	 * The larger the error the slower we adjust for it to avoid problems
	 * with losing too many ticks, otherwise we would overadjust and
	 * produce an even larger error.  The smaller the adjustment the
	 * faster we try to adjust for it, as lost ticks can do less harm
L
Li Zefan 已提交
804
	 * here.  This is tuned so that an error of about 1 msec is adjusted
805 806
	 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
	 */
807
	error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
808 809 810 811 812 813 814 815
	error2 = abs(error2);
	for (look_ahead = 0; error2 > 0; look_ahead++)
		error2 >>= 2;

	/*
	 * Now calculate the error in (1 << look_ahead) ticks, but first
	 * remove the single look ahead already included in the error.
	 */
816 817
	tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
	tick_error -= tk->xtime_interval >> 1;
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
	error = ((error - tick_error) >> look_ahead) + tick_error;

	/* Finally calculate the adjustment shift value.  */
	i = *interval;
	mult = 1;
	if (error < 0) {
		error = -error;
		*interval = -*interval;
		*offset = -*offset;
		mult = -1;
	}
	for (adj = 0; error > i; adj++)
		error >>= 1;

	*interval <<= adj;
	*offset <<= adj;
	return mult << adj;
}

/*
 * Adjust the multiplier to reduce the error value,
 * this is optimized for the most common adjustments of -1,0,1,
 * for other values we can do a bit more work.
 */
842
static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
843
{
844
	s64 error, interval = tk->cycle_interval;
845 846
	int adj;

847
	/*
848
	 * The point of this is to check if the error is greater than half
849 850 851 852 853
	 * an interval.
	 *
	 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
	 *
	 * Note we subtract one in the shift, so that error is really error*2.
854 855
	 * This "saves" dividing(shifting) interval twice, but keeps the
	 * (error > interval) comparison as still measuring if error is
856
	 * larger than half an interval.
857
	 *
858
	 * Note: It does not "save" on aggravation when reading the code.
859
	 */
860
	error = tk->ntp_error >> (tk->ntp_error_shift - 1);
861
	if (error > interval) {
862 863
		/*
		 * We now divide error by 4(via shift), which checks if
864
		 * the error is greater than twice the interval.
865 866 867
		 * If it is greater, we need a bigadjust, if its smaller,
		 * we can adjust by 1.
		 */
868
		error >>= 2;
869 870 871 872 873
		/*
		 * XXX - In update_wall_time, we round up to the next
		 * nanosecond, and store the amount rounded up into
		 * the error. This causes the likely below to be unlikely.
		 *
874
		 * The proper fix is to avoid rounding up by using
875
		 * the high precision tk->xtime_nsec instead of
876 877 878
		 * xtime.tv_nsec everywhere. Fixing this will take some
		 * time.
		 */
879 880 881
		if (likely(error <= interval))
			adj = 1;
		else
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
			adj = timekeeping_bigadjust(tk, error, &interval, &offset);
	} else {
		if (error < -interval) {
			/* See comment above, this is just switched for the negative */
			error >>= 2;
			if (likely(error >= -interval)) {
				adj = -1;
				interval = -interval;
				offset = -offset;
			} else {
				adj = timekeeping_bigadjust(tk, error, &interval, &offset);
			}
		} else {
			goto out_adjust;
		}
	}
898

899 900
	if (unlikely(tk->clock->maxadj &&
		(tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
901 902
		printk_once(KERN_WARNING
			"Adjusting %s more than 11%% (%ld vs %ld)\n",
903 904
			tk->clock->name, (long)tk->mult + adj,
			(long)tk->clock->mult + tk->clock->maxadj);
905
	}
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
	/*
	 * So the following can be confusing.
	 *
	 * To keep things simple, lets assume adj == 1 for now.
	 *
	 * When adj != 1, remember that the interval and offset values
	 * have been appropriately scaled so the math is the same.
	 *
	 * The basic idea here is that we're increasing the multiplier
	 * by one, this causes the xtime_interval to be incremented by
	 * one cycle_interval. This is because:
	 *	xtime_interval = cycle_interval * mult
	 * So if mult is being incremented by one:
	 *	xtime_interval = cycle_interval * (mult + 1)
	 * Its the same as:
	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
	 * Which can be shortened to:
	 *	xtime_interval += cycle_interval
	 *
	 * So offset stores the non-accumulated cycles. Thus the current
	 * time (in shifted nanoseconds) is:
	 *	now = (offset * adj) + xtime_nsec
	 * Now, even though we're adjusting the clock frequency, we have
	 * to keep time consistent. In other words, we can't jump back
	 * in time, and we also want to avoid jumping forward in time.
	 *
	 * So given the same offset value, we need the time to be the same
	 * both before and after the freq adjustment.
	 *	now = (offset * adj_1) + xtime_nsec_1
	 *	now = (offset * adj_2) + xtime_nsec_2
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_2) + xtime_nsec_2
	 * And we know:
	 *	adj_2 = adj_1 + 1
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * (adj_1+1)) + xtime_nsec_2
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_1) + offset + xtime_nsec_2
	 * Canceling the sides:
	 *	xtime_nsec_1 = offset + xtime_nsec_2
	 * Which gives us:
	 *	xtime_nsec_2 = xtime_nsec_1 - offset
	 * Which simplfies to:
	 *	xtime_nsec -= offset
	 *
	 * XXX - TODO: Doc ntp_error calculation.
	 */
955 956 957 958
	tk->mult += adj;
	tk->xtime_interval += interval;
	tk->xtime_nsec -= offset;
	tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
959

960
out_adjust:
961 962 963 964 965 966 967 968 969 970 971 972 973 974
	/*
	 * It may be possible that when we entered this function, xtime_nsec
	 * was very small.  Further, if we're slightly speeding the clocksource
	 * in the code above, its possible the required corrective factor to
	 * xtime_nsec could cause it to underflow.
	 *
	 * Now, since we already accumulated the second, cannot simply roll
	 * the accumulated second back, since the NTP subsystem has been
	 * notified via second_overflow. So instead we push xtime_nsec forward
	 * by the amount we underflowed, and add that amount into the error.
	 *
	 * We'll correct this error next time through this function, when
	 * xtime_nsec is not as small.
	 */
975 976 977 978
	if (unlikely((s64)tk->xtime_nsec < 0)) {
		s64 neg = -(s64)tk->xtime_nsec;
		tk->xtime_nsec = 0;
		tk->ntp_error += neg << tk->ntp_error_shift;
979 980
	}

981 982
}

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
/**
 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
 *
 * Helper function that accumulates a the nsecs greater then a second
 * from the xtime_nsec field to the xtime_secs field.
 * It also calls into the NTP code to handle leapsecond processing.
 *
 */
static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
{
	u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;

	while (tk->xtime_nsec >= nsecps) {
		int leap;

		tk->xtime_nsec -= nsecps;
		tk->xtime_sec++;

		/* Figure out if its a leap sec and apply if needed */
		leap = second_overflow(tk->xtime_sec);
1003 1004 1005 1006
		if (unlikely(leap)) {
			struct timespec ts;

			tk->xtime_sec += leap;
1007

1008 1009 1010 1011 1012 1013 1014
			ts.tv_sec = leap;
			ts.tv_nsec = 0;
			tk_set_wall_to_mono(tk,
				timespec_sub(tk->wall_to_monotonic, ts));

			clock_was_set_delayed();
		}
1015 1016 1017
	}
}

1018 1019 1020 1021 1022 1023 1024 1025 1026
/**
 * logarithmic_accumulation - shifted accumulation of cycles
 *
 * This functions accumulates a shifted interval of cycles into
 * into a shifted interval nanoseconds. Allows for O(log) accumulation
 * loop.
 *
 * Returns the unconsumed cycles.
 */
1027 1028
static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
						u32 shift)
1029
{
1030
	u64 raw_nsecs;
1031

1032 1033
	/* If the offset is smaller then a shifted interval, do nothing */
	if (offset < tk->cycle_interval<<shift)
1034 1035 1036
		return offset;

	/* Accumulate one shifted interval */
1037 1038
	offset -= tk->cycle_interval << shift;
	tk->clock->cycle_last += tk->cycle_interval << shift;
1039

1040 1041
	tk->xtime_nsec += tk->xtime_interval << shift;
	accumulate_nsecs_to_secs(tk);
1042

1043
	/* Accumulate raw time */
1044
	raw_nsecs = (u64)tk->raw_interval << shift;
1045
	raw_nsecs += tk->raw_time.tv_nsec;
1046 1047 1048
	if (raw_nsecs >= NSEC_PER_SEC) {
		u64 raw_secs = raw_nsecs;
		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1049
		tk->raw_time.tv_sec += raw_secs;
1050
	}
1051
	tk->raw_time.tv_nsec = raw_nsecs;
1052 1053

	/* Accumulate error between NTP and clock interval */
1054 1055 1056
	tk->ntp_error += ntp_tick_length() << shift;
	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
						(tk->ntp_error_shift + shift);
1057 1058 1059 1060

	return offset;
}

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
static inline void old_vsyscall_fixup(struct timekeeper *tk)
{
	s64 remainder;

	/*
	* Store only full nanoseconds into xtime_nsec after rounding
	* it up and add the remainder to the error difference.
	* XXX - This is necessary to avoid small 1ns inconsistnecies caused
	* by truncating the remainder in vsyscalls. However, it causes
	* additional work to be done in timekeeping_adjust(). Once
	* the vsyscall implementations are converted to use xtime_nsec
	* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
	* users are removed, this can be killed.
	*/
	remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
	tk->xtime_nsec -= remainder;
	tk->xtime_nsec += 1ULL << tk->shift;
	tk->ntp_error += remainder << tk->ntp_error_shift;

}
#else
#define old_vsyscall_fixup(tk)
#endif



1088 1089 1090 1091
/**
 * update_wall_time - Uses the current clocksource to increment the wall time
 *
 */
1092
static void update_wall_time(void)
1093
{
1094
	struct clocksource *clock;
1095
	struct timekeeper *tk = &timekeeper;
1096
	cycle_t offset;
1097
	int shift = 0, maxshift;
J
John Stultz 已提交
1098 1099
	unsigned long flags;

1100
	write_seqlock_irqsave(&tk->lock, flags);
1101 1102 1103

	/* Make sure we're fully resumed: */
	if (unlikely(timekeeping_suspended))
J
John Stultz 已提交
1104
		goto out;
1105

1106
	clock = tk->clock;
J
John Stultz 已提交
1107 1108

#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1109
	offset = tk->cycle_interval;
J
John Stultz 已提交
1110 1111
#else
	offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1112 1113
#endif

1114 1115 1116 1117
	/* Check if there's really nothing to do */
	if (offset < tk->cycle_interval)
		goto out;

1118 1119 1120 1121
	/*
	 * With NO_HZ we may have to accumulate many cycle_intervals
	 * (think "ticks") worth of time at once. To do this efficiently,
	 * we calculate the largest doubling multiple of cycle_intervals
1122
	 * that is smaller than the offset.  We then accumulate that
1123 1124
	 * chunk in one go, and then try to consume the next smaller
	 * doubled multiple.
1125
	 */
1126
	shift = ilog2(offset) - ilog2(tk->cycle_interval);
1127
	shift = max(0, shift);
1128
	/* Bound shift to one less than what overflows tick_length */
1129
	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1130
	shift = min(shift, maxshift);
1131 1132 1133
	while (offset >= tk->cycle_interval) {
		offset = logarithmic_accumulation(tk, offset, shift);
		if (offset < tk->cycle_interval<<shift)
1134
			shift--;
1135 1136 1137
	}

	/* correct the clock when NTP error is too big */
1138
	timekeeping_adjust(tk, offset);
1139

J
John Stultz 已提交
1140
	/*
1141 1142 1143 1144
	 * XXX This can be killed once everyone converts
	 * to the new update_vsyscall.
	 */
	old_vsyscall_fixup(tk);
1145

J
John Stultz 已提交
1146 1147
	/*
	 * Finally, make sure that after the rounding
1148
	 * xtime_nsec isn't larger than NSEC_PER_SEC
J
John Stultz 已提交
1149
	 */
1150
	accumulate_nsecs_to_secs(tk);
L
Linus Torvalds 已提交
1151

1152
	timekeeping_update(tk, false);
J
John Stultz 已提交
1153 1154

out:
1155
	write_sequnlock_irqrestore(&tk->lock, flags);
J
John Stultz 已提交
1156

1157
}
T
Tomas Janousek 已提交
1158 1159 1160 1161 1162

/**
 * getboottime - Return the real time of system boot.
 * @ts:		pointer to the timespec to be set
 *
1163
 * Returns the wall-time of boot in a timespec.
T
Tomas Janousek 已提交
1164 1165 1166 1167 1168 1169 1170 1171
 *
 * This is based on the wall_to_monotonic offset and the total suspend
 * time. Calls to settimeofday will affect the value returned (which
 * basically means that however wrong your real time clock is at boot time,
 * you get the right time here).
 */
void getboottime(struct timespec *ts)
{
1172
	struct timekeeper *tk = &timekeeper;
1173
	struct timespec boottime = {
1174 1175 1176 1177
		.tv_sec = tk->wall_to_monotonic.tv_sec +
				tk->total_sleep_time.tv_sec,
		.tv_nsec = tk->wall_to_monotonic.tv_nsec +
				tk->total_sleep_time.tv_nsec
1178
	};
1179 1180

	set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
T
Tomas Janousek 已提交
1181
}
1182
EXPORT_SYMBOL_GPL(getboottime);
T
Tomas Janousek 已提交
1183

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
/**
 * get_monotonic_boottime - Returns monotonic time since boot
 * @ts:		pointer to the timespec to be set
 *
 * Returns the monotonic time since boot in a timespec.
 *
 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
 * includes the time spent in suspend.
 */
void get_monotonic_boottime(struct timespec *ts)
{
1195
	struct timekeeper *tk = &timekeeper;
1196
	struct timespec tomono, sleep;
1197
	s64 nsec;
1198 1199 1200 1201 1202
	unsigned int seq;

	WARN_ON(timekeeping_suspended);

	do {
1203 1204
		seq = read_seqbegin(&tk->lock);
		ts->tv_sec = tk->xtime_sec;
1205
		nsec = timekeeping_get_ns(tk);
1206 1207
		tomono = tk->wall_to_monotonic;
		sleep = tk->total_sleep_time;
1208

1209
	} while (read_seqretry(&tk->lock, seq));
1210

1211 1212 1213
	ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
	ts->tv_nsec = 0;
	timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
}
EXPORT_SYMBOL_GPL(get_monotonic_boottime);

/**
 * ktime_get_boottime - Returns monotonic time since boot in a ktime
 *
 * Returns the monotonic time since boot in a ktime
 *
 * This is similar to CLOCK_MONTONIC/ktime_get, but also
 * includes the time spent in suspend.
 */
ktime_t ktime_get_boottime(void)
{
	struct timespec ts;

	get_monotonic_boottime(&ts);
	return timespec_to_ktime(ts);
}
EXPORT_SYMBOL_GPL(ktime_get_boottime);

T
Tomas Janousek 已提交
1234 1235 1236 1237 1238 1239
/**
 * monotonic_to_bootbased - Convert the monotonic time to boot based.
 * @ts:		pointer to the timespec to be converted
 */
void monotonic_to_bootbased(struct timespec *ts)
{
1240 1241 1242
	struct timekeeper *tk = &timekeeper;

	*ts = timespec_add(*ts, tk->total_sleep_time);
T
Tomas Janousek 已提交
1243
}
1244
EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1245

1246 1247
unsigned long get_seconds(void)
{
1248 1249 1250
	struct timekeeper *tk = &timekeeper;

	return tk->xtime_sec;
1251 1252 1253
}
EXPORT_SYMBOL(get_seconds);

1254 1255
struct timespec __current_kernel_time(void)
{
1256 1257 1258
	struct timekeeper *tk = &timekeeper;

	return tk_xtime(tk);
1259
}
1260

1261 1262
struct timespec current_kernel_time(void)
{
1263
	struct timekeeper *tk = &timekeeper;
1264 1265 1266 1267
	struct timespec now;
	unsigned long seq;

	do {
1268
		seq = read_seqbegin(&tk->lock);
L
Linus Torvalds 已提交
1269

1270 1271
		now = tk_xtime(tk);
	} while (read_seqretry(&tk->lock, seq));
1272 1273 1274 1275

	return now;
}
EXPORT_SYMBOL(current_kernel_time);
1276 1277 1278

struct timespec get_monotonic_coarse(void)
{
1279
	struct timekeeper *tk = &timekeeper;
1280 1281 1282 1283
	struct timespec now, mono;
	unsigned long seq;

	do {
1284
		seq = read_seqbegin(&tk->lock);
L
Linus Torvalds 已提交
1285

1286 1287 1288
		now = tk_xtime(tk);
		mono = tk->wall_to_monotonic;
	} while (read_seqretry(&tk->lock, seq));
1289 1290 1291 1292 1293

	set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
				now.tv_nsec + mono.tv_nsec);
	return now;
}
1294 1295

/*
1296
 * Must hold jiffies_lock
1297 1298 1299 1300 1301 1302 1303
 */
void do_timer(unsigned long ticks)
{
	jiffies_64 += ticks;
	update_wall_time();
	calc_global_load(ticks);
}
1304 1305

/**
1306 1307
 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
 *    and sleep offsets.
1308 1309
 * @xtim:	pointer to timespec to be set with xtime
 * @wtom:	pointer to timespec to be set with wall_to_monotonic
1310
 * @sleep:	pointer to timespec to be set with time in suspend
1311
 */
1312 1313
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
				struct timespec *wtom, struct timespec *sleep)
1314
{
1315
	struct timekeeper *tk = &timekeeper;
1316 1317 1318
	unsigned long seq;

	do {
1319 1320 1321 1322 1323
		seq = read_seqbegin(&tk->lock);
		*xtim = tk_xtime(tk);
		*wtom = tk->wall_to_monotonic;
		*sleep = tk->total_sleep_time;
	} while (read_seqretry(&tk->lock, seq));
1324
}
T
Torben Hohn 已提交
1325

1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
#ifdef CONFIG_HIGH_RES_TIMERS
/**
 * ktime_get_update_offsets - hrtimer helper
 * @offs_real:	pointer to storage for monotonic -> realtime offset
 * @offs_boot:	pointer to storage for monotonic -> boottime offset
 *
 * Returns current monotonic time and updates the offsets
 * Called from hrtimer_interupt() or retrigger_next_event()
 */
ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
{
1337
	struct timekeeper *tk = &timekeeper;
1338 1339 1340 1341 1342
	ktime_t now;
	unsigned int seq;
	u64 secs, nsecs;

	do {
1343
		seq = read_seqbegin(&tk->lock);
1344

1345 1346
		secs = tk->xtime_sec;
		nsecs = timekeeping_get_ns(tk);
1347

1348 1349 1350
		*offs_real = tk->offs_real;
		*offs_boot = tk->offs_boot;
	} while (read_seqretry(&tk->lock, seq));
1351 1352 1353 1354 1355 1356 1357

	now = ktime_add_ns(ktime_set(secs, 0), nsecs);
	now = ktime_sub(now, *offs_real);
	return now;
}
#endif

1358 1359 1360 1361 1362
/**
 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
 */
ktime_t ktime_get_monotonic_offset(void)
{
1363
	struct timekeeper *tk = &timekeeper;
1364 1365 1366 1367
	unsigned long seq;
	struct timespec wtom;

	do {
1368 1369 1370
		seq = read_seqbegin(&tk->lock);
		wtom = tk->wall_to_monotonic;
	} while (read_seqretry(&tk->lock, seq));
J
John Stultz 已提交
1371

1372 1373
	return timespec_to_ktime(wtom);
}
1374 1375
EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);

T
Torben Hohn 已提交
1376 1377 1378 1379 1380 1381 1382 1383
/**
 * xtime_update() - advances the timekeeping infrastructure
 * @ticks:	number of ticks, that have elapsed since the last call.
 *
 * Must be called with interrupts disabled.
 */
void xtime_update(unsigned long ticks)
{
1384
	write_seqlock(&jiffies_lock);
T
Torben Hohn 已提交
1385
	do_timer(ticks);
1386
	write_sequnlock(&jiffies_lock);
T
Torben Hohn 已提交
1387
}