timekeeping.c 45.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 *  linux/kernel/time/timekeeping.c
 *
 *  Kernel timekeeping code and accessor functions
 *
 *  This code was moved from linux/kernel/timer.c.
 *  Please see that file for copyright and history logs.
 *
 */

11
#include <linux/timekeeper_internal.h>
12 13 14 15 16
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
17
#include <linux/sched.h>
18
#include <linux/syscore_ops.h>
19 20 21 22
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/tick.h>
23
#include <linux/stop_machine.h>
24
#include <linux/pvclock_gtod.h>
25
#include <linux/compiler.h>
26

27
#include "tick-internal.h"
28
#include "ntp_internal.h"
29
#include "timekeeping_internal.h"
30

31 32
#define TK_CLEAR_NTP		(1 << 0)
#define TK_MIRROR		(1 << 1)
33
#define TK_CLOCK_WAS_SET	(1 << 2)
34

35 36 37 38 39 40 41 42 43
/*
 * The most important data for readout fits into a single 64 byte
 * cache line.
 */
static struct {
	seqcount_t		seq;
	struct timekeeper	timekeeper;
} tk_core ____cacheline_aligned;

44
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45
static struct timekeeper shadow_timekeeper;
46

47 48 49
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;

50 51 52
/* Flag for if there is a persistent clock on this platform */
bool __read_mostly persistent_clock_exist = false;

53 54
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
55 56
	while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
		tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
57 58 59 60
		tk->xtime_sec++;
	}
}

61 62 63 64 65
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
{
	struct timespec64 ts;

	ts.tv_sec = tk->xtime_sec;
66
	ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
67 68 69
	return ts;
}

70
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
71 72
{
	tk->xtime_sec = ts->tv_sec;
73
	tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
74 75
}

76
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
77 78
{
	tk->xtime_sec += ts->tv_sec;
79
	tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
80
	tk_normalize_xtime(tk);
81
}
82

83
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
84
{
85
	struct timespec64 tmp;
86 87 88 89 90

	/*
	 * Verify consistency of: offset_real = -wall_to_monotonic
	 * before modifying anything
	 */
91
	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
92
					-tk->wall_to_monotonic.tv_nsec);
93
	WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
94
	tk->wall_to_monotonic = wtm;
95 96
	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
	tk->offs_real = timespec64_to_ktime(tmp);
97
	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
98 99
}

100
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
101
{
102
	tk->offs_boot = ktime_add(tk->offs_boot, delta);
103 104
}

105
/**
106
 * tk_setup_internals - Set up internals to use clocksource clock.
107
 *
108
 * @tk:		The target timekeeper to setup.
109 110 111 112 113 114 115
 * @clock:		Pointer to clocksource.
 *
 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 * pair and interval request.
 *
 * Unless you're the timekeeping code, you should not be using this!
 */
116
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
117 118
{
	cycle_t interval;
119
	u64 tmp, ntpinterval;
120
	struct clocksource *old_clock;
121

122 123 124 125 126
	old_clock = tk->tkr.clock;
	tk->tkr.clock = clock;
	tk->tkr.read = clock->read;
	tk->tkr.mask = clock->mask;
	tk->tkr.cycle_last = tk->tkr.read(clock);
127 128 129 130

	/* Do the ns -> cycle conversion first, using original mult */
	tmp = NTP_INTERVAL_LENGTH;
	tmp <<= clock->shift;
131
	ntpinterval = tmp;
132 133
	tmp += clock->mult/2;
	do_div(tmp, clock->mult);
134 135 136 137
	if (tmp == 0)
		tmp = 1;

	interval = (cycle_t) tmp;
138
	tk->cycle_interval = interval;
139 140

	/* Go back from cycles -> shifted ns */
141 142 143
	tk->xtime_interval = (u64) interval * clock->mult;
	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
	tk->raw_interval =
144
		((u64) interval * clock->mult) >> clock->shift;
145

146 147 148 149
	 /* if changing clocks, convert xtime_nsec shift units */
	if (old_clock) {
		int shift_change = clock->shift - old_clock->shift;
		if (shift_change < 0)
150
			tk->tkr.xtime_nsec >>= -shift_change;
151
		else
152
			tk->tkr.xtime_nsec <<= shift_change;
153
	}
154
	tk->tkr.shift = clock->shift;
155

156 157
	tk->ntp_error = 0;
	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
158 159 160 161 162 163

	/*
	 * The timekeeper keeps its own mult values for the currently
	 * active clocksource. These value will be adjusted via NTP
	 * to counteract clock drifting.
	 */
164
	tk->tkr.mult = clock->mult;
165
}
166

167
/* Timekeeper helper functions. */
168 169

#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
170 171
static u32 default_arch_gettimeoffset(void) { return 0; }
u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
172
#else
173
static inline u32 arch_gettimeoffset(void) { return 0; }
174 175
#endif

176
static inline s64 timekeeping_get_ns(struct timekeeper *tk)
177
{
178
	cycle_t cycle_now, delta;
179
	s64 nsec;
180 181

	/* read clocksource: */
182
	cycle_now = tk->tkr.read(tk->tkr.clock);
183 184

	/* calculate the delta since the last update_wall_time: */
185
	delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
186

187 188
	nsec = delta * tk->tkr.mult + tk->tkr.xtime_nsec;
	nsec >>= tk->tkr.shift;
189

190
	/* If arch requires, add in get_arch_timeoffset() */
191
	return nsec + arch_gettimeoffset();
192 193
}

194
static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
195
{
196
	struct clocksource *clock = tk->tkr.clock;
197
	cycle_t cycle_now, delta;
198
	s64 nsec;
199 200

	/* read clocksource: */
201
	cycle_now = tk->tkr.read(clock);
202 203

	/* calculate the delta since the last update_wall_time: */
204
	delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
205

206
	/* convert delta to nanoseconds. */
207
	nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
208

209
	/* If arch requires, add in get_arch_timeoffset() */
210
	return nsec + arch_gettimeoffset();
211 212
}

213 214 215 216 217 218 219
#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD

static inline void update_vsyscall(struct timekeeper *tk)
{
	struct timespec xt;

	xt = tk_xtime(tk);
220 221
	update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult,
			    tk->tkr.cycle_last);
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
}

static inline void old_vsyscall_fixup(struct timekeeper *tk)
{
	s64 remainder;

	/*
	* Store only full nanoseconds into xtime_nsec after rounding
	* it up and add the remainder to the error difference.
	* XXX - This is necessary to avoid small 1ns inconsistnecies caused
	* by truncating the remainder in vsyscalls. However, it causes
	* additional work to be done in timekeeping_adjust(). Once
	* the vsyscall implementations are converted to use xtime_nsec
	* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
	* users are removed, this can be killed.
	*/
238 239 240
	remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
	tk->tkr.xtime_nsec -= remainder;
	tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
241
	tk->ntp_error += remainder << tk->ntp_error_shift;
242
	tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
243 244 245 246 247
}
#else
#define old_vsyscall_fixup(tk)
#endif

248 249
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);

250
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
251
{
252
	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
253 254 255 256 257 258 259
}

/**
 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
 */
int pvclock_gtod_register_notifier(struct notifier_block *nb)
{
260
	struct timekeeper *tk = &tk_core.timekeeper;
261 262 263
	unsigned long flags;
	int ret;

264
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
265
	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
266
	update_pvclock_gtod(tk, true);
267
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
268 269 270 271 272 273 274 275 276 277 278 279 280 281

	return ret;
}
EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);

/**
 * pvclock_gtod_unregister_notifier - unregister a pvclock
 * timedata update listener
 */
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{
	unsigned long flags;
	int ret;

282
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
283
	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
284
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
285 286 287 288 289

	return ret;
}
EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
/*
 * Update the ktime_t based scalar nsec members of the timekeeper
 */
static inline void tk_update_ktime_data(struct timekeeper *tk)
{
	s64 nsec;

	/*
	 * The xtime based monotonic readout is:
	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
	 * The ktime based monotonic readout is:
	 *	nsec = base_mono + now();
	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
	 */
	nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
	nsec *= NSEC_PER_SEC;
	nsec += tk->wall_to_monotonic.tv_nsec;
307
	tk->tkr.base_mono = ns_to_ktime(nsec);
308 309 310

	/* Update the monotonic raw base */
	tk->base_raw = timespec64_to_ktime(tk->raw_time);
311 312
}

313
/* must hold timekeeper_lock */
314
static void timekeeping_update(struct timekeeper *tk, unsigned int action)
315
{
316
	if (action & TK_CLEAR_NTP) {
317
		tk->ntp_error = 0;
318 319
		ntp_clear();
	}
320
	update_vsyscall(tk);
321
	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
322

323 324
	tk_update_ktime_data(tk);

325
	if (action & TK_MIRROR)
326 327
		memcpy(&shadow_timekeeper, &tk_core.timekeeper,
		       sizeof(tk_core.timekeeper));
328 329
}

330
/**
331
 * timekeeping_forward_now - update clock to the current time
332
 *
333 334 335
 * Forward the current clock to update its state since the last call to
 * update_wall_time(). This is useful before significant clock changes,
 * as it avoids having to deal with this time offset explicitly.
336
 */
337
static void timekeeping_forward_now(struct timekeeper *tk)
338
{
339
	struct clocksource *clock = tk->tkr.clock;
340
	cycle_t cycle_now, delta;
341
	s64 nsec;
342

343 344 345
	cycle_now = tk->tkr.read(clock);
	delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
	tk->tkr.cycle_last = cycle_now;
346

347
	tk->tkr.xtime_nsec += delta * tk->tkr.mult;
348

349
	/* If arch requires, add in get_arch_timeoffset() */
350
	tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
351

352
	tk_normalize_xtime(tk);
353

354
	nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
355
	timespec64_add_ns(&tk->raw_time, nsec);
356 357 358
}

/**
359
 * __getnstimeofday64 - Returns the time of day in a timespec64.
360 361
 * @ts:		pointer to the timespec to be set
 *
362 363
 * Updates the time of day in the timespec.
 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
364
 */
365
int __getnstimeofday64(struct timespec64 *ts)
366
{
367
	struct timekeeper *tk = &tk_core.timekeeper;
368
	unsigned long seq;
369
	s64 nsecs = 0;
370 371

	do {
372
		seq = read_seqcount_begin(&tk_core.seq);
373

374
		ts->tv_sec = tk->xtime_sec;
375
		nsecs = timekeeping_get_ns(tk);
376

377
	} while (read_seqcount_retry(&tk_core.seq, seq));
378

379
	ts->tv_nsec = 0;
380
	timespec64_add_ns(ts, nsecs);
381 382 383 384 385 386 387 388 389

	/*
	 * Do not bail out early, in case there were callers still using
	 * the value, even in the face of the WARN_ON.
	 */
	if (unlikely(timekeeping_suspended))
		return -EAGAIN;
	return 0;
}
390
EXPORT_SYMBOL(__getnstimeofday64);
391 392

/**
393
 * getnstimeofday64 - Returns the time of day in a timespec64.
394 395 396 397
 * @ts:		pointer to the timespec to be set
 *
 * Returns the time of day in a timespec (WARN if suspended).
 */
398
void getnstimeofday64(struct timespec64 *ts)
399
{
400
	WARN_ON(__getnstimeofday64(ts));
401
}
402
EXPORT_SYMBOL(getnstimeofday64);
403

404 405
ktime_t ktime_get(void)
{
406
	struct timekeeper *tk = &tk_core.timekeeper;
407
	unsigned int seq;
408 409
	ktime_t base;
	s64 nsecs;
410 411 412 413

	WARN_ON(timekeeping_suspended);

	do {
414
		seq = read_seqcount_begin(&tk_core.seq);
415
		base = tk->tkr.base_mono;
416
		nsecs = timekeeping_get_ns(tk);
417

418
	} while (read_seqcount_retry(&tk_core.seq, seq));
419

420
	return ktime_add_ns(base, nsecs);
421 422 423
}
EXPORT_SYMBOL_GPL(ktime_get);

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
static ktime_t *offsets[TK_OFFS_MAX] = {
	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
};

ktime_t ktime_get_with_offset(enum tk_offsets offs)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	ktime_t base, *offset = offsets[offs];
	s64 nsecs;

	WARN_ON(timekeeping_suspended);

	do {
		seq = read_seqcount_begin(&tk_core.seq);
441
		base = ktime_add(tk->tkr.base_mono, *offset);
442 443 444 445 446 447 448 449 450
		nsecs = timekeeping_get_ns(tk);

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return ktime_add_ns(base, nsecs);

}
EXPORT_SYMBOL_GPL(ktime_get_with_offset);

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
/**
 * ktime_mono_to_any() - convert mononotic time to any other time
 * @tmono:	time to convert.
 * @offs:	which offset to use
 */
ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
{
	ktime_t *offset = offsets[offs];
	unsigned long seq;
	ktime_t tconv;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		tconv = ktime_add(tmono, *offset);
	} while (read_seqcount_retry(&tk_core.seq, seq));

	return tconv;
}
EXPORT_SYMBOL_GPL(ktime_mono_to_any);

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
/**
 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
 */
ktime_t ktime_get_raw(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	ktime_t base;
	s64 nsecs;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		base = tk->base_raw;
		nsecs = timekeeping_get_ns_raw(tk);

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return ktime_add_ns(base, nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get_raw);

492
/**
493
 * ktime_get_ts64 - get the monotonic clock in timespec64 format
494 495 496 497 498 499
 * @ts:		pointer to timespec variable
 *
 * The function calculates the monotonic clock from the realtime
 * clock and the wall_to_monotonic offset and stores the result
 * in normalized timespec format in the variable pointed to by @ts.
 */
500
void ktime_get_ts64(struct timespec64 *ts)
501
{
502
	struct timekeeper *tk = &tk_core.timekeeper;
503
	struct timespec64 tomono;
504
	s64 nsec;
505 506 507 508 509
	unsigned int seq;

	WARN_ON(timekeeping_suspended);

	do {
510
		seq = read_seqcount_begin(&tk_core.seq);
511
		ts->tv_sec = tk->xtime_sec;
512
		nsec = timekeeping_get_ns(tk);
513
		tomono = tk->wall_to_monotonic;
514

515
	} while (read_seqcount_retry(&tk_core.seq, seq));
516

517 518 519
	ts->tv_sec += tomono.tv_sec;
	ts->tv_nsec = 0;
	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
520
}
521
EXPORT_SYMBOL_GPL(ktime_get_ts64);
522

523 524 525 526 527 528 529 530 531 532 533 534 535
#ifdef CONFIG_NTP_PPS

/**
 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
 * @ts_raw:	pointer to the timespec to be set to raw monotonic time
 * @ts_real:	pointer to the timespec to be set to the time of day
 *
 * This function reads both the time of day and raw monotonic time at the
 * same time atomically and stores the resulting timestamps in timespec
 * format.
 */
void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
{
536
	struct timekeeper *tk = &tk_core.timekeeper;
537 538 539 540 541 542
	unsigned long seq;
	s64 nsecs_raw, nsecs_real;

	WARN_ON_ONCE(timekeeping_suspended);

	do {
543
		seq = read_seqcount_begin(&tk_core.seq);
544

545
		*ts_raw = timespec64_to_timespec(tk->raw_time);
546
		ts_real->tv_sec = tk->xtime_sec;
547
		ts_real->tv_nsec = 0;
548

549 550
		nsecs_raw = timekeeping_get_ns_raw(tk);
		nsecs_real = timekeeping_get_ns(tk);
551

552
	} while (read_seqcount_retry(&tk_core.seq, seq));
553 554 555 556 557 558 559 560

	timespec_add_ns(ts_raw, nsecs_raw);
	timespec_add_ns(ts_real, nsecs_real);
}
EXPORT_SYMBOL(getnstime_raw_and_real);

#endif /* CONFIG_NTP_PPS */

561 562 563 564
/**
 * do_gettimeofday - Returns the time of day in a timeval
 * @tv:		pointer to the timeval to be set
 *
565
 * NOTE: Users should be converted to using getnstimeofday()
566 567 568
 */
void do_gettimeofday(struct timeval *tv)
{
569
	struct timespec64 now;
570

571
	getnstimeofday64(&now);
572 573 574 575
	tv->tv_sec = now.tv_sec;
	tv->tv_usec = now.tv_nsec/1000;
}
EXPORT_SYMBOL(do_gettimeofday);
576

577 578 579 580 581 582
/**
 * do_settimeofday - Sets the time of day
 * @tv:		pointer to the timespec variable containing the new time
 *
 * Sets the time of day to the new time and update NTP and notify hrtimers
 */
583
int do_settimeofday(const struct timespec *tv)
584
{
585
	struct timekeeper *tk = &tk_core.timekeeper;
586
	struct timespec64 ts_delta, xt, tmp;
587
	unsigned long flags;
588

589
	if (!timespec_valid_strict(tv))
590 591
		return -EINVAL;

592
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
593
	write_seqcount_begin(&tk_core.seq);
594

595
	timekeeping_forward_now(tk);
596

597
	xt = tk_xtime(tk);
598 599 600
	ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
	ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;

601
	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
602

603 604
	tmp = timespec_to_timespec64(*tv);
	tk_set_xtime(tk, &tmp);
605

606
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
607

608
	write_seqcount_end(&tk_core.seq);
609
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
610 611 612 613 614 615 616 617

	/* signal hrtimers about time change */
	clock_was_set();

	return 0;
}
EXPORT_SYMBOL(do_settimeofday);

618 619 620 621 622 623 624 625
/**
 * timekeeping_inject_offset - Adds or subtracts from the current time.
 * @tv:		pointer to the timespec variable containing the offset
 *
 * Adds or subtracts an offset value from the current time.
 */
int timekeeping_inject_offset(struct timespec *ts)
{
626
	struct timekeeper *tk = &tk_core.timekeeper;
627
	unsigned long flags;
628
	struct timespec64 ts64, tmp;
629
	int ret = 0;
630 631 632 633

	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

634 635
	ts64 = timespec_to_timespec64(*ts);

636
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
637
	write_seqcount_begin(&tk_core.seq);
638

639
	timekeeping_forward_now(tk);
640

641
	/* Make sure the proposed value is valid */
642 643
	tmp = timespec64_add(tk_xtime(tk),  ts64);
	if (!timespec64_valid_strict(&tmp)) {
644 645 646
		ret = -EINVAL;
		goto error;
	}
647

648 649
	tk_xtime_add(tk, &ts64);
	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
650

651
error: /* even if we error out, we forwarded the time, so call update */
652
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
653

654
	write_seqcount_end(&tk_core.seq);
655
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
656 657 658 659

	/* signal hrtimers about time change */
	clock_was_set();

660
	return ret;
661 662 663
}
EXPORT_SYMBOL(timekeeping_inject_offset);

664 665 666 667 668 669 670

/**
 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
 *
 */
s32 timekeeping_get_tai_offset(void)
{
671
	struct timekeeper *tk = &tk_core.timekeeper;
672 673 674 675
	unsigned int seq;
	s32 ret;

	do {
676
		seq = read_seqcount_begin(&tk_core.seq);
677
		ret = tk->tai_offset;
678
	} while (read_seqcount_retry(&tk_core.seq, seq));
679 680 681 682 683 684 685 686

	return ret;
}

/**
 * __timekeeping_set_tai_offset - Lock free worker function
 *
 */
687
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
688 689
{
	tk->tai_offset = tai_offset;
690
	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
691 692 693 694 695 696 697 698
}

/**
 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
 *
 */
void timekeeping_set_tai_offset(s32 tai_offset)
{
699
	struct timekeeper *tk = &tk_core.timekeeper;
700 701
	unsigned long flags;

702
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
703
	write_seqcount_begin(&tk_core.seq);
704
	__timekeeping_set_tai_offset(tk, tai_offset);
705
	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
706
	write_seqcount_end(&tk_core.seq);
707
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
708
	clock_was_set();
709 710
}

711 712 713 714 715
/**
 * change_clocksource - Swaps clocksources if a new one is available
 *
 * Accumulates current time interval and initializes new clocksource
 */
716
static int change_clocksource(void *data)
717
{
718
	struct timekeeper *tk = &tk_core.timekeeper;
719
	struct clocksource *new, *old;
720
	unsigned long flags;
721

722
	new = (struct clocksource *) data;
723

724
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
725
	write_seqcount_begin(&tk_core.seq);
726

727
	timekeeping_forward_now(tk);
728 729 730 731 732 733
	/*
	 * If the cs is in module, get a module reference. Succeeds
	 * for built-in code (owner == NULL) as well.
	 */
	if (try_module_get(new->owner)) {
		if (!new->enable || new->enable(new) == 0) {
734
			old = tk->tkr.clock;
735 736 737 738 739 740 741
			tk_setup_internals(tk, new);
			if (old->disable)
				old->disable(old);
			module_put(old->owner);
		} else {
			module_put(new->owner);
		}
742
	}
743
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
744

745
	write_seqcount_end(&tk_core.seq);
746
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
747

748 749
	return 0;
}
750

751 752 753 754 755 756 757
/**
 * timekeeping_notify - Install a new clock source
 * @clock:		pointer to the clock source
 *
 * This function is called from clocksource.c after a new, better clock
 * source has been registered. The caller holds the clocksource_mutex.
 */
758
int timekeeping_notify(struct clocksource *clock)
759
{
760
	struct timekeeper *tk = &tk_core.timekeeper;
761

762
	if (tk->tkr.clock == clock)
763
		return 0;
764
	stop_machine(change_clocksource, clock, NULL);
765
	tick_clock_notify();
766
	return tk->tkr.clock == clock ? 0 : -1;
767
}
768

769 770 771 772 773 774 775 776
/**
 * getrawmonotonic - Returns the raw monotonic time in a timespec
 * @ts:		pointer to the timespec to be set
 *
 * Returns the raw monotonic time (completely un-modified by ntp)
 */
void getrawmonotonic(struct timespec *ts)
{
777
	struct timekeeper *tk = &tk_core.timekeeper;
778
	struct timespec64 ts64;
779 780 781 782
	unsigned long seq;
	s64 nsecs;

	do {
783
		seq = read_seqcount_begin(&tk_core.seq);
784
		nsecs = timekeeping_get_ns_raw(tk);
785
		ts64 = tk->raw_time;
786

787
	} while (read_seqcount_retry(&tk_core.seq, seq));
788

789 790
	timespec64_add_ns(&ts64, nsecs);
	*ts = timespec64_to_timespec(ts64);
791 792 793
}
EXPORT_SYMBOL(getrawmonotonic);

794
/**
795
 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
796
 */
797
int timekeeping_valid_for_hres(void)
798
{
799
	struct timekeeper *tk = &tk_core.timekeeper;
800 801 802 803
	unsigned long seq;
	int ret;

	do {
804
		seq = read_seqcount_begin(&tk_core.seq);
805

806
		ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
807

808
	} while (read_seqcount_retry(&tk_core.seq, seq));
809 810 811 812

	return ret;
}

813 814 815 816 817
/**
 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
 */
u64 timekeeping_max_deferment(void)
{
818
	struct timekeeper *tk = &tk_core.timekeeper;
J
John Stultz 已提交
819 820
	unsigned long seq;
	u64 ret;
821

J
John Stultz 已提交
822
	do {
823
		seq = read_seqcount_begin(&tk_core.seq);
J
John Stultz 已提交
824

825
		ret = tk->tkr.clock->max_idle_ns;
J
John Stultz 已提交
826

827
	} while (read_seqcount_retry(&tk_core.seq, seq));
J
John Stultz 已提交
828 829

	return ret;
830 831
}

832
/**
833
 * read_persistent_clock -  Return time from the persistent clock.
834 835
 *
 * Weak dummy function for arches that do not yet support it.
836 837
 * Reads the time from the battery backed persistent clock.
 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
838 839 840
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
841
void __weak read_persistent_clock(struct timespec *ts)
842
{
843 844
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
845 846
}

847 848 849 850 851 852 853 854 855
/**
 * read_boot_clock -  Return time of the system start.
 *
 * Weak dummy function for arches that do not yet support it.
 * Function to read the exact time the system has been started.
 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
856
void __weak read_boot_clock(struct timespec *ts)
857 858 859 860 861
{
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
}

862 863 864 865 866
/*
 * timekeeping_init - Initializes the clocksource and common timekeeping values
 */
void __init timekeeping_init(void)
{
867
	struct timekeeper *tk = &tk_core.timekeeper;
868
	struct clocksource *clock;
869
	unsigned long flags;
870 871
	struct timespec64 now, boot, tmp;
	struct timespec ts;
872

873 874 875
	read_persistent_clock(&ts);
	now = timespec_to_timespec64(ts);
	if (!timespec64_valid_strict(&now)) {
876 877 878 879
		pr_warn("WARNING: Persistent clock returned invalid value!\n"
			"         Check your CMOS/BIOS settings.\n");
		now.tv_sec = 0;
		now.tv_nsec = 0;
880 881
	} else if (now.tv_sec || now.tv_nsec)
		persistent_clock_exist = true;
882

883 884 885
	read_boot_clock(&ts);
	boot = timespec_to_timespec64(ts);
	if (!timespec64_valid_strict(&boot)) {
886 887 888 889 890
		pr_warn("WARNING: Boot clock returned invalid value!\n"
			"         Check your CMOS/BIOS settings.\n");
		boot.tv_sec = 0;
		boot.tv_nsec = 0;
	}
891

892
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
893
	write_seqcount_begin(&tk_core.seq);
894 895
	ntp_init();

896
	clock = clocksource_default_clock();
897 898
	if (clock->enable)
		clock->enable(clock);
899
	tk_setup_internals(tk, clock);
900

901 902 903
	tk_set_xtime(tk, &now);
	tk->raw_time.tv_sec = 0;
	tk->raw_time.tv_nsec = 0;
904
	tk->base_raw.tv64 = 0;
905
	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
906
		boot = tk_xtime(tk);
907

908
	set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
909
	tk_set_wall_to_mono(tk, tmp);
910

911
	timekeeping_update(tk, TK_MIRROR);
912

913
	write_seqcount_end(&tk_core.seq);
914
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
915 916 917
}

/* time in seconds when suspend began */
918
static struct timespec64 timekeeping_suspend_time;
919

920 921 922 923 924 925 926
/**
 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
 * @delta: pointer to a timespec delta value
 *
 * Takes a timespec offset measuring a suspend interval and properly
 * adds the sleep offset to the timekeeping variables.
 */
927
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
928
					   struct timespec64 *delta)
929
{
930
	if (!timespec64_valid_strict(delta)) {
931 932 933
		printk_deferred(KERN_WARNING
				"__timekeeping_inject_sleeptime: Invalid "
				"sleep delta value!\n");
934 935
		return;
	}
936
	tk_xtime_add(tk, delta);
937
	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
938
	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
939
	tk_debug_account_sleep_time(delta);
940 941 942 943 944 945 946 947 948 949 950 951 952 953
}

/**
 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
 * @delta: pointer to a timespec delta value
 *
 * This hook is for architectures that cannot support read_persistent_clock
 * because their RTC/persistent clock is only accessible when irqs are enabled.
 *
 * This function should only be called by rtc_resume(), and allows
 * a suspend offset to be injected into the timekeeping values.
 */
void timekeeping_inject_sleeptime(struct timespec *delta)
{
954
	struct timekeeper *tk = &tk_core.timekeeper;
955
	struct timespec64 tmp;
956
	unsigned long flags;
957

958 959 960 961 962
	/*
	 * Make sure we don't set the clock twice, as timekeeping_resume()
	 * already did it
	 */
	if (has_persistent_clock())
963 964
		return;

965
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
966
	write_seqcount_begin(&tk_core.seq);
J
John Stultz 已提交
967

968
	timekeeping_forward_now(tk);
969

970 971
	tmp = timespec_to_timespec64(*delta);
	__timekeeping_inject_sleeptime(tk, &tmp);
972

973
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
974

975
	write_seqcount_end(&tk_core.seq);
976
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
977 978 979 980 981

	/* signal hrtimers about time change */
	clock_was_set();
}

982 983 984 985 986 987 988
/**
 * timekeeping_resume - Resumes the generic timekeeping subsystem.
 *
 * This is for the generic clocksource timekeeping.
 * xtime/wall_to_monotonic/jiffies/etc are
 * still managed by arch specific suspend/resume code.
 */
989
static void timekeeping_resume(void)
990
{
991
	struct timekeeper *tk = &tk_core.timekeeper;
992
	struct clocksource *clock = tk->tkr.clock;
993
	unsigned long flags;
994 995
	struct timespec64 ts_new, ts_delta;
	struct timespec tmp;
996 997
	cycle_t cycle_now, cycle_delta;
	bool suspendtime_found = false;
998

999 1000
	read_persistent_clock(&tmp);
	ts_new = timespec_to_timespec64(tmp);
1001

1002
	clockevents_resume();
1003 1004
	clocksource_resume();

1005
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1006
	write_seqcount_begin(&tk_core.seq);
1007

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
	/*
	 * After system resumes, we need to calculate the suspended time and
	 * compensate it for the OS time. There are 3 sources that could be
	 * used: Nonstop clocksource during suspend, persistent clock and rtc
	 * device.
	 *
	 * One specific platform may have 1 or 2 or all of them, and the
	 * preference will be:
	 *	suspend-nonstop clocksource -> persistent clock -> rtc
	 * The less preferred source will only be tried if there is no better
	 * usable source. The rtc part is handled separately in rtc core code.
	 */
1020
	cycle_now = tk->tkr.read(clock);
1021
	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1022
		cycle_now > tk->tkr.cycle_last) {
1023 1024 1025 1026 1027
		u64 num, max = ULLONG_MAX;
		u32 mult = clock->mult;
		u32 shift = clock->shift;
		s64 nsec = 0;

1028 1029
		cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
						tk->tkr.mask);
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043

		/*
		 * "cycle_delta * mutl" may cause 64 bits overflow, if the
		 * suspended time is too long. In that case we need do the
		 * 64 bits math carefully
		 */
		do_div(max, mult);
		if (cycle_delta > max) {
			num = div64_u64(cycle_delta, max);
			nsec = (((u64) max * mult) >> shift) * num;
			cycle_delta -= num * max;
		}
		nsec += ((u64) cycle_delta * mult) >> shift;

1044
		ts_delta = ns_to_timespec64(nsec);
1045
		suspendtime_found = true;
1046 1047
	} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
		ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1048
		suspendtime_found = true;
1049
	}
1050 1051 1052 1053 1054

	if (suspendtime_found)
		__timekeeping_inject_sleeptime(tk, &ts_delta);

	/* Re-base the last cycle value */
1055
	tk->tkr.cycle_last = cycle_now;
1056
	tk->ntp_error = 0;
1057
	timekeeping_suspended = 0;
1058
	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1059
	write_seqcount_end(&tk_core.seq);
1060
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1061 1062 1063 1064 1065 1066

	touch_softlockup_watchdog();

	clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);

	/* Resume hrtimers */
1067
	hrtimers_resume();
1068 1069
}

1070
static int timekeeping_suspend(void)
1071
{
1072
	struct timekeeper *tk = &tk_core.timekeeper;
1073
	unsigned long flags;
1074 1075 1076
	struct timespec64		delta, delta_delta;
	static struct timespec64	old_delta;
	struct timespec tmp;
1077

1078 1079
	read_persistent_clock(&tmp);
	timekeeping_suspend_time = timespec_to_timespec64(tmp);
1080

1081 1082 1083 1084 1085 1086 1087 1088
	/*
	 * On some systems the persistent_clock can not be detected at
	 * timekeeping_init by its return value, so if we see a valid
	 * value returned, update the persistent_clock_exists flag.
	 */
	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
		persistent_clock_exist = true;

1089
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1090
	write_seqcount_begin(&tk_core.seq);
1091
	timekeeping_forward_now(tk);
1092
	timekeeping_suspended = 1;
1093 1094 1095 1096 1097 1098 1099

	/*
	 * To avoid drift caused by repeated suspend/resumes,
	 * which each can add ~1 second drift error,
	 * try to compensate so the difference in system time
	 * and persistent_clock time stays close to constant.
	 */
1100 1101
	delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
	delta_delta = timespec64_sub(delta, old_delta);
1102 1103 1104 1105 1106 1107 1108 1109 1110
	if (abs(delta_delta.tv_sec)  >= 2) {
		/*
		 * if delta_delta is too large, assume time correction
		 * has occured and set old_delta to the current delta.
		 */
		old_delta = delta;
	} else {
		/* Otherwise try to adjust old_system to compensate */
		timekeeping_suspend_time =
1111
			timespec64_add(timekeeping_suspend_time, delta_delta);
1112
	}
1113 1114

	timekeeping_update(tk, TK_MIRROR);
1115
	write_seqcount_end(&tk_core.seq);
1116
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1117 1118

	clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
M
Magnus Damm 已提交
1119
	clocksource_suspend();
1120
	clockevents_suspend();
1121 1122 1123 1124 1125

	return 0;
}

/* sysfs resume/suspend bits for timekeeping */
1126
static struct syscore_ops timekeeping_syscore_ops = {
1127 1128 1129 1130
	.resume		= timekeeping_resume,
	.suspend	= timekeeping_suspend,
};

1131
static int __init timekeeping_init_ops(void)
1132
{
1133 1134
	register_syscore_ops(&timekeeping_syscore_ops);
	return 0;
1135 1136
}

1137
device_initcall(timekeeping_init_ops);
1138 1139 1140 1141 1142

/*
 * If the error is already larger, we look ahead even further
 * to compensate for late or lost adjustments.
 */
1143 1144
static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
						 s64 error, s64 *interval,
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
						 s64 *offset)
{
	s64 tick_error, i;
	u32 look_ahead, adj;
	s32 error2, mult;

	/*
	 * Use the current error value to determine how much to look ahead.
	 * The larger the error the slower we adjust for it to avoid problems
	 * with losing too many ticks, otherwise we would overadjust and
	 * produce an even larger error.  The smaller the adjustment the
	 * faster we try to adjust for it, as lost ticks can do less harm
L
Li Zefan 已提交
1157
	 * here.  This is tuned so that an error of about 1 msec is adjusted
1158 1159
	 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
	 */
1160
	error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
1161 1162 1163 1164 1165 1166 1167 1168
	error2 = abs(error2);
	for (look_ahead = 0; error2 > 0; look_ahead++)
		error2 >>= 2;

	/*
	 * Now calculate the error in (1 << look_ahead) ticks, but first
	 * remove the single look ahead already included in the error.
	 */
1169 1170
	tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
	tick_error -= tk->xtime_interval >> 1;
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
	error = ((error - tick_error) >> look_ahead) + tick_error;

	/* Finally calculate the adjustment shift value.  */
	i = *interval;
	mult = 1;
	if (error < 0) {
		error = -error;
		*interval = -*interval;
		*offset = -*offset;
		mult = -1;
	}
	for (adj = 0; error > i; adj++)
		error >>= 1;

	*interval <<= adj;
	*offset <<= adj;
	return mult << adj;
}

/*
 * Adjust the multiplier to reduce the error value,
 * this is optimized for the most common adjustments of -1,0,1,
 * for other values we can do a bit more work.
 */
1195
static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1196
{
1197
	s64 error, interval = tk->cycle_interval;
1198 1199
	int adj;

1200
	/*
1201
	 * The point of this is to check if the error is greater than half
1202 1203 1204 1205 1206
	 * an interval.
	 *
	 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
	 *
	 * Note we subtract one in the shift, so that error is really error*2.
1207 1208
	 * This "saves" dividing(shifting) interval twice, but keeps the
	 * (error > interval) comparison as still measuring if error is
1209
	 * larger than half an interval.
1210
	 *
1211
	 * Note: It does not "save" on aggravation when reading the code.
1212
	 */
1213
	error = tk->ntp_error >> (tk->ntp_error_shift - 1);
1214
	if (error > interval) {
1215 1216
		/*
		 * We now divide error by 4(via shift), which checks if
1217
		 * the error is greater than twice the interval.
1218 1219 1220
		 * If it is greater, we need a bigadjust, if its smaller,
		 * we can adjust by 1.
		 */
1221 1222 1223 1224
		error >>= 2;
		if (likely(error <= interval))
			adj = 1;
		else
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
			adj = timekeeping_bigadjust(tk, error, &interval, &offset);
	} else {
		if (error < -interval) {
			/* See comment above, this is just switched for the negative */
			error >>= 2;
			if (likely(error >= -interval)) {
				adj = -1;
				interval = -interval;
				offset = -offset;
			} else {
				adj = timekeeping_bigadjust(tk, error, &interval, &offset);
			}
		} else {
			goto out_adjust;
		}
	}
1241

1242 1243
	if (unlikely(tk->tkr.clock->maxadj &&
		(tk->tkr.mult + adj > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
1244
		printk_deferred_once(KERN_WARNING
1245
			"Adjusting %s more than 11%% (%ld vs %ld)\n",
1246 1247
			tk->tkr.clock->name, (long)tk->tkr.mult + adj,
			(long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
1248
	}
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
	/*
	 * So the following can be confusing.
	 *
	 * To keep things simple, lets assume adj == 1 for now.
	 *
	 * When adj != 1, remember that the interval and offset values
	 * have been appropriately scaled so the math is the same.
	 *
	 * The basic idea here is that we're increasing the multiplier
	 * by one, this causes the xtime_interval to be incremented by
	 * one cycle_interval. This is because:
	 *	xtime_interval = cycle_interval * mult
	 * So if mult is being incremented by one:
	 *	xtime_interval = cycle_interval * (mult + 1)
	 * Its the same as:
	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
	 * Which can be shortened to:
	 *	xtime_interval += cycle_interval
	 *
	 * So offset stores the non-accumulated cycles. Thus the current
	 * time (in shifted nanoseconds) is:
	 *	now = (offset * adj) + xtime_nsec
	 * Now, even though we're adjusting the clock frequency, we have
	 * to keep time consistent. In other words, we can't jump back
	 * in time, and we also want to avoid jumping forward in time.
	 *
	 * So given the same offset value, we need the time to be the same
	 * both before and after the freq adjustment.
	 *	now = (offset * adj_1) + xtime_nsec_1
	 *	now = (offset * adj_2) + xtime_nsec_2
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_2) + xtime_nsec_2
	 * And we know:
	 *	adj_2 = adj_1 + 1
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * (adj_1+1)) + xtime_nsec_2
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_1) + offset + xtime_nsec_2
	 * Canceling the sides:
	 *	xtime_nsec_1 = offset + xtime_nsec_2
	 * Which gives us:
	 *	xtime_nsec_2 = xtime_nsec_1 - offset
	 * Which simplfies to:
	 *	xtime_nsec -= offset
	 *
	 * XXX - TODO: Doc ntp_error calculation.
	 */
1298
	tk->tkr.mult += adj;
1299
	tk->xtime_interval += interval;
1300
	tk->tkr.xtime_nsec -= offset;
1301
	tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1302

1303
out_adjust:
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	/*
	 * It may be possible that when we entered this function, xtime_nsec
	 * was very small.  Further, if we're slightly speeding the clocksource
	 * in the code above, its possible the required corrective factor to
	 * xtime_nsec could cause it to underflow.
	 *
	 * Now, since we already accumulated the second, cannot simply roll
	 * the accumulated second back, since the NTP subsystem has been
	 * notified via second_overflow. So instead we push xtime_nsec forward
	 * by the amount we underflowed, and add that amount into the error.
	 *
	 * We'll correct this error next time through this function, when
	 * xtime_nsec is not as small.
	 */
1318 1319 1320
	if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
		s64 neg = -(s64)tk->tkr.xtime_nsec;
		tk->tkr.xtime_nsec = 0;
1321
		tk->ntp_error += neg << tk->ntp_error_shift;
1322 1323
	}

1324 1325
}

1326 1327 1328 1329 1330 1331 1332 1333
/**
 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
 *
 * Helper function that accumulates a the nsecs greater then a second
 * from the xtime_nsec field to the xtime_secs field.
 * It also calls into the NTP code to handle leapsecond processing.
 *
 */
1334
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1335
{
1336
	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
1337
	unsigned int clock_set = 0;
1338

1339
	while (tk->tkr.xtime_nsec >= nsecps) {
1340 1341
		int leap;

1342
		tk->tkr.xtime_nsec -= nsecps;
1343 1344 1345 1346
		tk->xtime_sec++;

		/* Figure out if its a leap sec and apply if needed */
		leap = second_overflow(tk->xtime_sec);
1347
		if (unlikely(leap)) {
1348
			struct timespec64 ts;
1349 1350

			tk->xtime_sec += leap;
1351

1352 1353 1354
			ts.tv_sec = leap;
			ts.tv_nsec = 0;
			tk_set_wall_to_mono(tk,
1355
				timespec64_sub(tk->wall_to_monotonic, ts));
1356

1357 1358
			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);

1359
			clock_set = TK_CLOCK_WAS_SET;
1360
		}
1361
	}
1362
	return clock_set;
1363 1364
}

1365 1366 1367 1368 1369 1370 1371 1372 1373
/**
 * logarithmic_accumulation - shifted accumulation of cycles
 *
 * This functions accumulates a shifted interval of cycles into
 * into a shifted interval nanoseconds. Allows for O(log) accumulation
 * loop.
 *
 * Returns the unconsumed cycles.
 */
1374
static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1375 1376
						u32 shift,
						unsigned int *clock_set)
1377
{
T
Thomas Gleixner 已提交
1378
	cycle_t interval = tk->cycle_interval << shift;
1379
	u64 raw_nsecs;
1380

1381
	/* If the offset is smaller then a shifted interval, do nothing */
T
Thomas Gleixner 已提交
1382
	if (offset < interval)
1383 1384 1385
		return offset;

	/* Accumulate one shifted interval */
T
Thomas Gleixner 已提交
1386
	offset -= interval;
1387
	tk->tkr.cycle_last += interval;
1388

1389
	tk->tkr.xtime_nsec += tk->xtime_interval << shift;
1390
	*clock_set |= accumulate_nsecs_to_secs(tk);
1391

1392
	/* Accumulate raw time */
1393
	raw_nsecs = (u64)tk->raw_interval << shift;
1394
	raw_nsecs += tk->raw_time.tv_nsec;
1395 1396 1397
	if (raw_nsecs >= NSEC_PER_SEC) {
		u64 raw_secs = raw_nsecs;
		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1398
		tk->raw_time.tv_sec += raw_secs;
1399
	}
1400
	tk->raw_time.tv_nsec = raw_nsecs;
1401 1402

	/* Accumulate error between NTP and clock interval */
1403 1404 1405
	tk->ntp_error += ntp_tick_length() << shift;
	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
						(tk->ntp_error_shift + shift);
1406 1407 1408 1409

	return offset;
}

1410 1411 1412 1413
/**
 * update_wall_time - Uses the current clocksource to increment the wall time
 *
 */
1414
void update_wall_time(void)
1415
{
1416
	struct timekeeper *real_tk = &tk_core.timekeeper;
1417
	struct timekeeper *tk = &shadow_timekeeper;
1418
	cycle_t offset;
1419
	int shift = 0, maxshift;
1420
	unsigned int clock_set = 0;
J
John Stultz 已提交
1421 1422
	unsigned long flags;

1423
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1424 1425 1426

	/* Make sure we're fully resumed: */
	if (unlikely(timekeeping_suspended))
J
John Stultz 已提交
1427
		goto out;
1428

J
John Stultz 已提交
1429
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1430
	offset = real_tk->cycle_interval;
J
John Stultz 已提交
1431
#else
1432 1433
	offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
				   tk->tkr.cycle_last, tk->tkr.mask);
1434 1435
#endif

1436
	/* Check if there's really nothing to do */
1437
	if (offset < real_tk->cycle_interval)
1438 1439
		goto out;

1440 1441 1442 1443
	/*
	 * With NO_HZ we may have to accumulate many cycle_intervals
	 * (think "ticks") worth of time at once. To do this efficiently,
	 * we calculate the largest doubling multiple of cycle_intervals
1444
	 * that is smaller than the offset.  We then accumulate that
1445 1446
	 * chunk in one go, and then try to consume the next smaller
	 * doubled multiple.
1447
	 */
1448
	shift = ilog2(offset) - ilog2(tk->cycle_interval);
1449
	shift = max(0, shift);
1450
	/* Bound shift to one less than what overflows tick_length */
1451
	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1452
	shift = min(shift, maxshift);
1453
	while (offset >= tk->cycle_interval) {
1454 1455
		offset = logarithmic_accumulation(tk, offset, shift,
							&clock_set);
1456
		if (offset < tk->cycle_interval<<shift)
1457
			shift--;
1458 1459 1460
	}

	/* correct the clock when NTP error is too big */
1461
	timekeeping_adjust(tk, offset);
1462

J
John Stultz 已提交
1463
	/*
1464 1465 1466 1467
	 * XXX This can be killed once everyone converts
	 * to the new update_vsyscall.
	 */
	old_vsyscall_fixup(tk);
1468

J
John Stultz 已提交
1469 1470
	/*
	 * Finally, make sure that after the rounding
1471
	 * xtime_nsec isn't larger than NSEC_PER_SEC
J
John Stultz 已提交
1472
	 */
1473
	clock_set |= accumulate_nsecs_to_secs(tk);
L
Linus Torvalds 已提交
1474

1475
	write_seqcount_begin(&tk_core.seq);
1476 1477 1478 1479 1480 1481 1482
	/*
	 * Update the real timekeeper.
	 *
	 * We could avoid this memcpy by switching pointers, but that
	 * requires changes to all other timekeeper usage sites as
	 * well, i.e. move the timekeeper pointer getter into the
	 * spinlocked/seqcount protected sections. And we trade this
1483
	 * memcpy under the tk_core.seq against one before we start
1484 1485 1486
	 * updating.
	 */
	memcpy(real_tk, tk, sizeof(*tk));
1487
	timekeeping_update(real_tk, clock_set);
1488
	write_seqcount_end(&tk_core.seq);
1489
out:
1490
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1491
	if (clock_set)
1492 1493
		/* Have to call _delayed version, since in irq context*/
		clock_was_set_delayed();
1494
}
T
Tomas Janousek 已提交
1495 1496 1497 1498 1499

/**
 * getboottime - Return the real time of system boot.
 * @ts:		pointer to the timespec to be set
 *
1500
 * Returns the wall-time of boot in a timespec.
T
Tomas Janousek 已提交
1501 1502 1503 1504 1505 1506 1507 1508
 *
 * This is based on the wall_to_monotonic offset and the total suspend
 * time. Calls to settimeofday will affect the value returned (which
 * basically means that however wrong your real time clock is at boot time,
 * you get the right time here).
 */
void getboottime(struct timespec *ts)
{
1509
	struct timekeeper *tk = &tk_core.timekeeper;
1510 1511 1512
	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);

	*ts = ktime_to_timespec(t);
T
Tomas Janousek 已提交
1513
}
1514
EXPORT_SYMBOL_GPL(getboottime);
T
Tomas Janousek 已提交
1515

1516 1517
unsigned long get_seconds(void)
{
1518
	struct timekeeper *tk = &tk_core.timekeeper;
1519 1520

	return tk->xtime_sec;
1521 1522 1523
}
EXPORT_SYMBOL(get_seconds);

1524 1525
struct timespec __current_kernel_time(void)
{
1526
	struct timekeeper *tk = &tk_core.timekeeper;
1527

1528
	return timespec64_to_timespec(tk_xtime(tk));
1529
}
1530

1531 1532
struct timespec current_kernel_time(void)
{
1533
	struct timekeeper *tk = &tk_core.timekeeper;
1534
	struct timespec64 now;
1535 1536 1537
	unsigned long seq;

	do {
1538
		seq = read_seqcount_begin(&tk_core.seq);
L
Linus Torvalds 已提交
1539

1540
		now = tk_xtime(tk);
1541
	} while (read_seqcount_retry(&tk_core.seq, seq));
1542

1543
	return timespec64_to_timespec(now);
1544 1545
}
EXPORT_SYMBOL(current_kernel_time);
1546 1547 1548

struct timespec get_monotonic_coarse(void)
{
1549
	struct timekeeper *tk = &tk_core.timekeeper;
1550
	struct timespec64 now, mono;
1551 1552 1553
	unsigned long seq;

	do {
1554
		seq = read_seqcount_begin(&tk_core.seq);
L
Linus Torvalds 已提交
1555

1556 1557
		now = tk_xtime(tk);
		mono = tk->wall_to_monotonic;
1558
	} while (read_seqcount_retry(&tk_core.seq, seq));
1559

1560
	set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
1561
				now.tv_nsec + mono.tv_nsec);
1562 1563

	return timespec64_to_timespec(now);
1564
}
1565 1566

/*
1567
 * Must hold jiffies_lock
1568 1569 1570 1571 1572 1573
 */
void do_timer(unsigned long ticks)
{
	jiffies_64 += ticks;
	calc_global_load(ticks);
}
1574 1575

/**
1576 1577 1578 1579 1580 1581
 * ktime_get_update_offsets_tick - hrtimer helper
 * @offs_real:	pointer to storage for monotonic -> realtime offset
 * @offs_boot:	pointer to storage for monotonic -> boottime offset
 * @offs_tai:	pointer to storage for monotonic -> clock tai offset
 *
 * Returns monotonic time at last tick and various offsets
1582
 */
1583 1584
ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
							ktime_t *offs_tai)
1585
{
1586
	struct timekeeper *tk = &tk_core.timekeeper;
1587
	unsigned int seq;
1588 1589
	ktime_t base;
	u64 nsecs;
1590 1591

	do {
1592
		seq = read_seqcount_begin(&tk_core.seq);
1593

1594 1595
		base = tk->tkr.base_mono;
		nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
1596

1597 1598 1599
		*offs_real = tk->offs_real;
		*offs_boot = tk->offs_boot;
		*offs_tai = tk->offs_tai;
1600
	} while (read_seqcount_retry(&tk_core.seq, seq));
1601

1602
	return ktime_add_ns(base, nsecs);
1603
}
T
Torben Hohn 已提交
1604

1605 1606
#ifdef CONFIG_HIGH_RES_TIMERS
/**
1607
 * ktime_get_update_offsets_now - hrtimer helper
1608 1609
 * @offs_real:	pointer to storage for monotonic -> realtime offset
 * @offs_boot:	pointer to storage for monotonic -> boottime offset
1610
 * @offs_tai:	pointer to storage for monotonic -> clock tai offset
1611 1612
 *
 * Returns current monotonic time and updates the offsets
1613
 * Called from hrtimer_interrupt() or retrigger_next_event()
1614
 */
1615
ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
1616
							ktime_t *offs_tai)
1617
{
1618
	struct timekeeper *tk = &tk_core.timekeeper;
1619
	unsigned int seq;
1620 1621
	ktime_t base;
	u64 nsecs;
1622 1623

	do {
1624
		seq = read_seqcount_begin(&tk_core.seq);
1625

1626
		base = tk->tkr.base_mono;
1627
		nsecs = timekeeping_get_ns(tk);
1628

1629 1630
		*offs_real = tk->offs_real;
		*offs_boot = tk->offs_boot;
1631
		*offs_tai = tk->offs_tai;
1632
	} while (read_seqcount_retry(&tk_core.seq, seq));
1633

1634
	return ktime_add_ns(base, nsecs);
1635 1636 1637
}
#endif

1638 1639 1640 1641 1642
/**
 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
 */
int do_adjtimex(struct timex *txc)
{
1643
	struct timekeeper *tk = &tk_core.timekeeper;
1644
	unsigned long flags;
1645
	struct timespec64 ts;
1646
	s32 orig_tai, tai;
1647 1648 1649 1650 1651 1652 1653
	int ret;

	/* Validate the data before disabling interrupts */
	ret = ntp_validate_timex(txc);
	if (ret)
		return ret;

1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
	if (txc->modes & ADJ_SETOFFSET) {
		struct timespec delta;
		delta.tv_sec  = txc->time.tv_sec;
		delta.tv_nsec = txc->time.tv_usec;
		if (!(txc->modes & ADJ_NANO))
			delta.tv_nsec *= 1000;
		ret = timekeeping_inject_offset(&delta);
		if (ret)
			return ret;
	}

1665
	getnstimeofday64(&ts);
1666

1667
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1668
	write_seqcount_begin(&tk_core.seq);
1669

1670
	orig_tai = tai = tk->tai_offset;
1671
	ret = __do_adjtimex(txc, &ts, &tai);
1672

1673 1674
	if (tai != orig_tai) {
		__timekeeping_set_tai_offset(tk, tai);
1675
		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1676
	}
1677
	write_seqcount_end(&tk_core.seq);
1678 1679
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);

1680 1681 1682
	if (tai != orig_tai)
		clock_was_set();

1683 1684
	ntp_notify_cmos_timer();

1685 1686
	return ret;
}
1687 1688 1689 1690 1691 1692 1693

#ifdef CONFIG_NTP_PPS
/**
 * hardpps() - Accessor function to NTP __hardpps function
 */
void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
{
1694 1695 1696
	unsigned long flags;

	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1697
	write_seqcount_begin(&tk_core.seq);
1698

1699
	__hardpps(phase_ts, raw_ts);
1700

1701
	write_seqcount_end(&tk_core.seq);
1702
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1703 1704 1705 1706
}
EXPORT_SYMBOL(hardpps);
#endif

T
Torben Hohn 已提交
1707 1708 1709 1710 1711 1712 1713 1714
/**
 * xtime_update() - advances the timekeeping infrastructure
 * @ticks:	number of ticks, that have elapsed since the last call.
 *
 * Must be called with interrupts disabled.
 */
void xtime_update(unsigned long ticks)
{
1715
	write_seqlock(&jiffies_lock);
T
Torben Hohn 已提交
1716
	do_timer(ticks);
1717
	write_sequnlock(&jiffies_lock);
1718
	update_wall_time();
T
Torben Hohn 已提交
1719
}