timekeeping.c 64.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 *  linux/kernel/time/timekeeping.c
 *
 *  Kernel timekeeping code and accessor functions
 *
 *  This code was moved from linux/kernel/timer.c.
 *  Please see that file for copyright and history logs.
 *
 */

11
#include <linux/timekeeper_internal.h>
12 13 14 15 16
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
17
#include <linux/sched.h>
18
#include <linux/syscore_ops.h>
19 20 21 22
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/tick.h>
23
#include <linux/stop_machine.h>
24
#include <linux/pvclock_gtod.h>
25
#include <linux/compiler.h>
26

27
#include "tick-internal.h"
28
#include "ntp_internal.h"
29
#include "timekeeping_internal.h"
30

31 32
#define TK_CLEAR_NTP		(1 << 0)
#define TK_MIRROR		(1 << 1)
33
#define TK_CLOCK_WAS_SET	(1 << 2)
34

35 36 37 38 39 40 41 42 43
/*
 * The most important data for readout fits into a single 64 byte
 * cache line.
 */
static struct {
	seqcount_t		seq;
	struct timekeeper	timekeeper;
} tk_core ____cacheline_aligned;

44
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45
static struct timekeeper shadow_timekeeper;
46

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/**
 * struct tk_fast - NMI safe timekeeper
 * @seq:	Sequence counter for protecting updates. The lowest bit
 *		is the index for the tk_read_base array
 * @base:	tk_read_base array. Access is indexed by the lowest bit of
 *		@seq.
 *
 * See @update_fast_timekeeper() below.
 */
struct tk_fast {
	seqcount_t		seq;
	struct tk_read_base	base[2];
};

static struct tk_fast tk_fast_mono ____cacheline_aligned;
P
Peter Zijlstra 已提交
62
static struct tk_fast tk_fast_raw  ____cacheline_aligned;
63

64 65 66
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;

67 68
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
69 70
	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
71 72 73 74
		tk->xtime_sec++;
	}
}

75 76 77 78 79
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
{
	struct timespec64 ts;

	ts.tv_sec = tk->xtime_sec;
80
	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
81 82 83
	return ts;
}

84
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
85 86
{
	tk->xtime_sec = ts->tv_sec;
87
	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
88 89
}

90
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
91 92
{
	tk->xtime_sec += ts->tv_sec;
93
	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
94
	tk_normalize_xtime(tk);
95
}
96

97
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
98
{
99
	struct timespec64 tmp;
100 101 102 103 104

	/*
	 * Verify consistency of: offset_real = -wall_to_monotonic
	 * before modifying anything
	 */
105
	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106
					-tk->wall_to_monotonic.tv_nsec);
T
Thomas Gleixner 已提交
107
	WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
108
	tk->wall_to_monotonic = wtm;
109 110
	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
	tk->offs_real = timespec64_to_ktime(tmp);
111
	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
112 113
}

114
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
115
{
116
	tk->offs_boot = ktime_add(tk->offs_boot, delta);
117 118
}

119
#ifdef CONFIG_DEBUG_TIMEKEEPING
120 121
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */

122
static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
123 124
{

125
	u64 max_cycles = tk->tkr_mono.clock->max_cycles;
126
	const char *name = tk->tkr_mono.clock->name;
127 128

	if (offset > max_cycles) {
129
		printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
130
				offset, name, max_cycles);
131
		printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
132 133
	} else {
		if (offset > (max_cycles >> 1)) {
M
Masanari Iida 已提交
134
			printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
135 136 137 138
					offset, name, max_cycles >> 1);
			printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
		}
	}
139

140 141
	if (tk->underflow_seen) {
		if (jiffies - tk->last_warning > WARNING_FREQ) {
142 143 144
			printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
			printk_deferred("         Your kernel is probably still fine.\n");
145
			tk->last_warning = jiffies;
146
		}
147
		tk->underflow_seen = 0;
148 149
	}

150 151
	if (tk->overflow_seen) {
		if (jiffies - tk->last_warning > WARNING_FREQ) {
152 153 154
			printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
			printk_deferred("         Your kernel is probably still fine.\n");
155
			tk->last_warning = jiffies;
156
		}
157
		tk->overflow_seen = 0;
158
	}
159
}
160

161
static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
162
{
163
	struct timekeeper *tk = &tk_core.timekeeper;
164
	u64 now, last, mask, max, delta;
165
	unsigned int seq;
166

167 168 169 170 171 172 173 174 175 176 177 178 179 180
	/*
	 * Since we're called holding a seqlock, the data may shift
	 * under us while we're doing the calculation. This can cause
	 * false positives, since we'd note a problem but throw the
	 * results away. So nest another seqlock here to atomically
	 * grab the points we are checking with.
	 */
	do {
		seq = read_seqcount_begin(&tk_core.seq);
		now = tkr->read(tkr->clock);
		last = tkr->cycle_last;
		mask = tkr->mask;
		max = tkr->clock->max_cycles;
	} while (read_seqcount_retry(&tk_core.seq, seq));
181

182
	delta = clocksource_delta(now, last, mask);
183

184 185 186 187
	/*
	 * Try to catch underflows by checking if we are seeing small
	 * mask-relative negative values.
	 */
188
	if (unlikely((~delta & mask) < (mask >> 3))) {
189
		tk->underflow_seen = 1;
190
		delta = 0;
191
	}
192

193
	/* Cap delta value to the max_cycles values to avoid mult overflows */
194
	if (unlikely(delta > max)) {
195
		tk->overflow_seen = 1;
196
		delta = tkr->clock->max_cycles;
197
	}
198 199 200

	return delta;
}
201
#else
202
static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
203 204
{
}
205
static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
206
{
207
	u64 cycle_now, delta;
208 209 210 211 212 213 214 215 216

	/* read clocksource */
	cycle_now = tkr->read(tkr->clock);

	/* calculate the delta since the last update_wall_time */
	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);

	return delta;
}
217 218
#endif

219
/**
220
 * tk_setup_internals - Set up internals to use clocksource clock.
221
 *
222
 * @tk:		The target timekeeper to setup.
223 224 225 226 227 228 229
 * @clock:		Pointer to clocksource.
 *
 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 * pair and interval request.
 *
 * Unless you're the timekeeping code, you should not be using this!
 */
230
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
231
{
232
	u64 interval;
233
	u64 tmp, ntpinterval;
234
	struct clocksource *old_clock;
235

236
	++tk->cs_was_changed_seq;
237 238 239 240 241
	old_clock = tk->tkr_mono.clock;
	tk->tkr_mono.clock = clock;
	tk->tkr_mono.read = clock->read;
	tk->tkr_mono.mask = clock->mask;
	tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
242

P
Peter Zijlstra 已提交
243 244 245 246 247
	tk->tkr_raw.clock = clock;
	tk->tkr_raw.read = clock->read;
	tk->tkr_raw.mask = clock->mask;
	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;

248 249 250
	/* Do the ns -> cycle conversion first, using original mult */
	tmp = NTP_INTERVAL_LENGTH;
	tmp <<= clock->shift;
251
	ntpinterval = tmp;
252 253
	tmp += clock->mult/2;
	do_div(tmp, clock->mult);
254 255 256
	if (tmp == 0)
		tmp = 1;

257
	interval = (u64) tmp;
258
	tk->cycle_interval = interval;
259 260

	/* Go back from cycles -> shifted ns */
261
	tk->xtime_interval = interval * clock->mult;
262
	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
263
	tk->raw_interval = (interval * clock->mult) >> clock->shift;
264

265 266 267 268
	 /* if changing clocks, convert xtime_nsec shift units */
	if (old_clock) {
		int shift_change = clock->shift - old_clock->shift;
		if (shift_change < 0)
269
			tk->tkr_mono.xtime_nsec >>= -shift_change;
270
		else
271
			tk->tkr_mono.xtime_nsec <<= shift_change;
272
	}
P
Peter Zijlstra 已提交
273 274
	tk->tkr_raw.xtime_nsec = 0;

275
	tk->tkr_mono.shift = clock->shift;
P
Peter Zijlstra 已提交
276
	tk->tkr_raw.shift = clock->shift;
277

278 279
	tk->ntp_error = 0;
	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
280
	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
281 282 283 284 285 286

	/*
	 * The timekeeper keeps its own mult values for the currently
	 * active clocksource. These value will be adjusted via NTP
	 * to counteract clock drifting.
	 */
287
	tk->tkr_mono.mult = clock->mult;
P
Peter Zijlstra 已提交
288
	tk->tkr_raw.mult = clock->mult;
289
	tk->ntp_err_mult = 0;
290
}
291

292
/* Timekeeper helper functions. */
293 294

#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
295 296
static u32 default_arch_gettimeoffset(void) { return 0; }
u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
297
#else
298
static inline u32 arch_gettimeoffset(void) { return 0; }
299 300
#endif

301
static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
302
{
303
	u64 nsec;
304 305 306 307 308 309 310 311

	nsec = delta * tkr->mult + tkr->xtime_nsec;
	nsec >>= tkr->shift;

	/* If arch requires, add in get_arch_timeoffset() */
	return nsec + arch_gettimeoffset();
}

312
static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
313
{
314
	u64 delta;
315

316
	delta = timekeeping_get_delta(tkr);
317 318
	return timekeeping_delta_to_ns(tkr, delta);
}
319

320
static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
321
{
322
	u64 delta;
323

324 325 326
	/* calculate the delta since the last update_wall_time */
	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
	return timekeeping_delta_to_ns(tkr, delta);
327 328
}

329 330
/**
 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
331
 * @tkr: Timekeeping readout base from which we take the update
332 333 334 335
 *
 * We want to use this from any context including NMI and tracing /
 * instrumenting the timekeeping code itself.
 *
336
 * Employ the latch technique; see @raw_write_seqcount_latch.
337 338 339 340 341 342
 *
 * So if a NMI hits the update of base[0] then it will use base[1]
 * which is still consistent. In the worst case this can result is a
 * slightly wrong timestamp (a few nanoseconds). See
 * @ktime_get_mono_fast_ns.
 */
343
static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
344
{
345
	struct tk_read_base *base = tkf->base;
346 347

	/* Force readers off to base[1] */
348
	raw_write_seqcount_latch(&tkf->seq);
349 350

	/* Update base[0] */
351
	memcpy(base, tkr, sizeof(*base));
352 353

	/* Force readers back to base[0] */
354
	raw_write_seqcount_latch(&tkf->seq);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391

	/* Update base[1] */
	memcpy(base + 1, base, sizeof(*base));
}

/**
 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
 *
 * This timestamp is not guaranteed to be monotonic across an update.
 * The timestamp is calculated by:
 *
 *	now = base_mono + clock_delta * slope
 *
 * So if the update lowers the slope, readers who are forced to the
 * not yet updated second array are still using the old steeper slope.
 *
 * tmono
 * ^
 * |    o  n
 * |   o n
 * |  u
 * | o
 * |o
 * |12345678---> reader order
 *
 * o = old slope
 * u = update
 * n = new slope
 *
 * So reader 6 will observe time going backwards versus reader 5.
 *
 * While other CPUs are likely to be able observe that, the only way
 * for a CPU local observation is when an NMI hits in the middle of
 * the update. Timestamps taken from that NMI context might be ahead
 * of the following timestamps. Callers need to be aware of that and
 * deal with it.
 */
392
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
393 394 395 396 397 398
{
	struct tk_read_base *tkr;
	unsigned int seq;
	u64 now;

	do {
399
		seq = raw_read_seqcount_latch(&tkf->seq);
400
		tkr = tkf->base + (seq & 0x01);
401 402
		now = ktime_to_ns(tkr->base);

403 404 405 406 407
		now += timekeeping_delta_to_ns(tkr,
				clocksource_delta(
					tkr->read(tkr->clock),
					tkr->cycle_last,
					tkr->mask));
408
	} while (read_seqcount_retry(&tkf->seq, seq));
409 410 411

	return now;
}
412 413 414 415 416

u64 ktime_get_mono_fast_ns(void)
{
	return __ktime_get_fast_ns(&tk_fast_mono);
}
417 418
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);

P
Peter Zijlstra 已提交
419 420 421 422 423 424
u64 ktime_get_raw_fast_ns(void)
{
	return __ktime_get_fast_ns(&tk_fast_raw);
}
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
/**
 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
 *
 * To keep it NMI safe since we're accessing from tracing, we're not using a
 * separate timekeeper with updates to monotonic clock and boot offset
 * protected with seqlocks. This has the following minor side effects:
 *
 * (1) Its possible that a timestamp be taken after the boot offset is updated
 * but before the timekeeper is updated. If this happens, the new boot offset
 * is added to the old timekeeping making the clock appear to update slightly
 * earlier:
 *    CPU 0                                        CPU 1
 *    timekeeping_inject_sleeptime64()
 *    __timekeeping_inject_sleeptime(tk, delta);
 *                                                 timestamp();
 *    timekeeping_update(tk, TK_CLEAR_NTP...);
 *
 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
 * partially updated.  Since the tk->offs_boot update is a rare event, this
 * should be a rare occurrence which postprocessing should be able to handle.
 */
u64 notrace ktime_get_boot_fast_ns(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;

	return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
}
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);

454
/* Suspend-time cycles value for halted fast timekeeper. */
455
static u64 cycles_at_suspend;
456

457
static u64 dummy_clock_read(struct clocksource *cs)
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
{
	return cycles_at_suspend;
}

/**
 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
 * @tk: Timekeeper to snapshot.
 *
 * It generally is unsafe to access the clocksource after timekeeping has been
 * suspended, so take a snapshot of the readout base of @tk and use it as the
 * fast timekeeper's readout base while suspended.  It will return the same
 * number of cycles every time until timekeeping is resumed at which time the
 * proper readout base for the fast timekeeper will be restored automatically.
 */
static void halt_fast_timekeeper(struct timekeeper *tk)
{
	static struct tk_read_base tkr_dummy;
475
	struct tk_read_base *tkr = &tk->tkr_mono;
476 477 478 479

	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
	cycles_at_suspend = tkr->read(tkr->clock);
	tkr_dummy.read = dummy_clock_read;
480
	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
P
Peter Zijlstra 已提交
481 482 483 484 485

	tkr = &tk->tkr_raw;
	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
	tkr_dummy.read = dummy_clock_read;
	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
486 487
}

488 489 490 491
#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD

static inline void update_vsyscall(struct timekeeper *tk)
{
492
	struct timespec xt, wm;
493

494
	xt = timespec64_to_timespec(tk_xtime(tk));
495
	wm = timespec64_to_timespec(tk->wall_to_monotonic);
496 497
	update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
			    tk->tkr_mono.cycle_last);
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
}

static inline void old_vsyscall_fixup(struct timekeeper *tk)
{
	s64 remainder;

	/*
	* Store only full nanoseconds into xtime_nsec after rounding
	* it up and add the remainder to the error difference.
	* XXX - This is necessary to avoid small 1ns inconsistnecies caused
	* by truncating the remainder in vsyscalls. However, it causes
	* additional work to be done in timekeeping_adjust(). Once
	* the vsyscall implementations are converted to use xtime_nsec
	* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
	* users are removed, this can be killed.
	*/
514
	remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
515 516 517 518 519 520
	if (remainder != 0) {
		tk->tkr_mono.xtime_nsec -= remainder;
		tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
		tk->ntp_error += remainder << tk->ntp_error_shift;
		tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
	}
521 522 523 524 525
}
#else
#define old_vsyscall_fixup(tk)
#endif

526 527
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);

528
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
529
{
530
	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
531 532 533 534 535 536 537
}

/**
 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
 */
int pvclock_gtod_register_notifier(struct notifier_block *nb)
{
538
	struct timekeeper *tk = &tk_core.timekeeper;
539 540 541
	unsigned long flags;
	int ret;

542
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
543
	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
544
	update_pvclock_gtod(tk, true);
545
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
546 547 548 549 550 551 552 553 554 555 556 557 558 559

	return ret;
}
EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);

/**
 * pvclock_gtod_unregister_notifier - unregister a pvclock
 * timedata update listener
 */
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{
	unsigned long flags;
	int ret;

560
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
561
	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
562
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
563 564 565 566 567

	return ret;
}
EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);

568 569 570 571 572 573
/*
 * tk_update_leap_state - helper to update the next_leap_ktime
 */
static inline void tk_update_leap_state(struct timekeeper *tk)
{
	tk->next_leap_ktime = ntp_get_next_leap();
T
Thomas Gleixner 已提交
574
	if (tk->next_leap_ktime != KTIME_MAX)
575 576 577 578
		/* Convert to monotonic time */
		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
}

579 580 581 582 583
/*
 * Update the ktime_t based scalar nsec members of the timekeeper
 */
static inline void tk_update_ktime_data(struct timekeeper *tk)
{
584 585
	u64 seconds;
	u32 nsec;
586 587 588 589 590 591 592 593

	/*
	 * The xtime based monotonic readout is:
	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
	 * The ktime based monotonic readout is:
	 *	nsec = base_mono + now();
	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
	 */
594 595
	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
596
	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
597 598

	/* Update the monotonic raw base */
P
Peter Zijlstra 已提交
599
	tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
600 601 602 603 604 605

	/*
	 * The sum of the nanoseconds portions of xtime and
	 * wall_to_monotonic can be greater/equal one second. Take
	 * this into account before updating tk->ktime_sec.
	 */
606
	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
607 608 609
	if (nsec >= NSEC_PER_SEC)
		seconds++;
	tk->ktime_sec = seconds;
610 611
}

612
/* must hold timekeeper_lock */
613
static void timekeeping_update(struct timekeeper *tk, unsigned int action)
614
{
615
	if (action & TK_CLEAR_NTP) {
616
		tk->ntp_error = 0;
617 618
		ntp_clear();
	}
619

620
	tk_update_leap_state(tk);
621 622
	tk_update_ktime_data(tk);

623 624 625
	update_vsyscall(tk);
	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);

626
	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
P
Peter Zijlstra 已提交
627
	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
628 629 630

	if (action & TK_CLOCK_WAS_SET)
		tk->clock_was_set_seq++;
631 632 633 634 635 636 637 638
	/*
	 * The mirroring of the data to the shadow-timekeeper needs
	 * to happen last here to ensure we don't over-write the
	 * timekeeper structure on the next update with stale data
	 */
	if (action & TK_MIRROR)
		memcpy(&shadow_timekeeper, &tk_core.timekeeper,
		       sizeof(tk_core.timekeeper));
639 640
}

641
/**
642
 * timekeeping_forward_now - update clock to the current time
643
 *
644 645 646
 * Forward the current clock to update its state since the last call to
 * update_wall_time(). This is useful before significant clock changes,
 * as it avoids having to deal with this time offset explicitly.
647
 */
648
static void timekeeping_forward_now(struct timekeeper *tk)
649
{
650
	struct clocksource *clock = tk->tkr_mono.clock;
651
	u64 cycle_now, delta;
652
	u64 nsec;
653

654 655 656
	cycle_now = tk->tkr_mono.read(clock);
	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
	tk->tkr_mono.cycle_last = cycle_now;
P
Peter Zijlstra 已提交
657
	tk->tkr_raw.cycle_last  = cycle_now;
658

659
	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
660

661
	/* If arch requires, add in get_arch_timeoffset() */
662
	tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
663

664
	tk_normalize_xtime(tk);
665

P
Peter Zijlstra 已提交
666
	nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
667
	timespec64_add_ns(&tk->raw_time, nsec);
668 669 670
}

/**
671
 * __getnstimeofday64 - Returns the time of day in a timespec64.
672 673
 * @ts:		pointer to the timespec to be set
 *
674 675
 * Updates the time of day in the timespec.
 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
676
 */
677
int __getnstimeofday64(struct timespec64 *ts)
678
{
679
	struct timekeeper *tk = &tk_core.timekeeper;
680
	unsigned long seq;
681
	u64 nsecs;
682 683

	do {
684
		seq = read_seqcount_begin(&tk_core.seq);
685

686
		ts->tv_sec = tk->xtime_sec;
687
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
688

689
	} while (read_seqcount_retry(&tk_core.seq, seq));
690

691
	ts->tv_nsec = 0;
692
	timespec64_add_ns(ts, nsecs);
693 694 695 696 697 698 699 700 701

	/*
	 * Do not bail out early, in case there were callers still using
	 * the value, even in the face of the WARN_ON.
	 */
	if (unlikely(timekeeping_suspended))
		return -EAGAIN;
	return 0;
}
702
EXPORT_SYMBOL(__getnstimeofday64);
703 704

/**
705
 * getnstimeofday64 - Returns the time of day in a timespec64.
706
 * @ts:		pointer to the timespec64 to be set
707
 *
708
 * Returns the time of day in a timespec64 (WARN if suspended).
709
 */
710
void getnstimeofday64(struct timespec64 *ts)
711
{
712
	WARN_ON(__getnstimeofday64(ts));
713
}
714
EXPORT_SYMBOL(getnstimeofday64);
715

716 717
ktime_t ktime_get(void)
{
718
	struct timekeeper *tk = &tk_core.timekeeper;
719
	unsigned int seq;
720
	ktime_t base;
721
	u64 nsecs;
722 723 724 725

	WARN_ON(timekeeping_suspended);

	do {
726
		seq = read_seqcount_begin(&tk_core.seq);
727 728
		base = tk->tkr_mono.base;
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
729

730
	} while (read_seqcount_retry(&tk_core.seq, seq));
731

732
	return ktime_add_ns(base, nsecs);
733 734 735
}
EXPORT_SYMBOL_GPL(ktime_get);

736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
u32 ktime_get_resolution_ns(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	u32 nsecs;

	WARN_ON(timekeeping_suspended);

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
	} while (read_seqcount_retry(&tk_core.seq, seq));

	return nsecs;
}
EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);

753 754 755 756 757 758 759 760 761 762 763
static ktime_t *offsets[TK_OFFS_MAX] = {
	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
};

ktime_t ktime_get_with_offset(enum tk_offsets offs)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	ktime_t base, *offset = offsets[offs];
764
	u64 nsecs;
765 766 767 768 769

	WARN_ON(timekeeping_suspended);

	do {
		seq = read_seqcount_begin(&tk_core.seq);
770 771
		base = ktime_add(tk->tkr_mono.base, *offset);
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
772 773 774 775 776 777 778 779

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return ktime_add_ns(base, nsecs);

}
EXPORT_SYMBOL_GPL(ktime_get_with_offset);

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
/**
 * ktime_mono_to_any() - convert mononotic time to any other time
 * @tmono:	time to convert.
 * @offs:	which offset to use
 */
ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
{
	ktime_t *offset = offsets[offs];
	unsigned long seq;
	ktime_t tconv;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		tconv = ktime_add(tmono, *offset);
	} while (read_seqcount_retry(&tk_core.seq, seq));

	return tconv;
}
EXPORT_SYMBOL_GPL(ktime_mono_to_any);

800 801 802 803 804 805 806 807
/**
 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
 */
ktime_t ktime_get_raw(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	ktime_t base;
808
	u64 nsecs;
809 810 811

	do {
		seq = read_seqcount_begin(&tk_core.seq);
P
Peter Zijlstra 已提交
812 813
		base = tk->tkr_raw.base;
		nsecs = timekeeping_get_ns(&tk->tkr_raw);
814 815 816 817 818 819 820

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return ktime_add_ns(base, nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get_raw);

821
/**
822
 * ktime_get_ts64 - get the monotonic clock in timespec64 format
823 824 825 826
 * @ts:		pointer to timespec variable
 *
 * The function calculates the monotonic clock from the realtime
 * clock and the wall_to_monotonic offset and stores the result
827
 * in normalized timespec64 format in the variable pointed to by @ts.
828
 */
829
void ktime_get_ts64(struct timespec64 *ts)
830
{
831
	struct timekeeper *tk = &tk_core.timekeeper;
832
	struct timespec64 tomono;
833
	unsigned int seq;
834
	u64 nsec;
835 836 837 838

	WARN_ON(timekeeping_suspended);

	do {
839
		seq = read_seqcount_begin(&tk_core.seq);
840
		ts->tv_sec = tk->xtime_sec;
841
		nsec = timekeeping_get_ns(&tk->tkr_mono);
842
		tomono = tk->wall_to_monotonic;
843

844
	} while (read_seqcount_retry(&tk_core.seq, seq));
845

846 847 848
	ts->tv_sec += tomono.tv_sec;
	ts->tv_nsec = 0;
	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
849
}
850
EXPORT_SYMBOL_GPL(ktime_get_ts64);
851

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
/**
 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
 *
 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
 * works on both 32 and 64 bit systems. On 32 bit systems the readout
 * covers ~136 years of uptime which should be enough to prevent
 * premature wrap arounds.
 */
time64_t ktime_get_seconds(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;

	WARN_ON(timekeeping_suspended);
	return tk->ktime_sec;
}
EXPORT_SYMBOL_GPL(ktime_get_seconds);

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
/**
 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
 *
 * Returns the wall clock seconds since 1970. This replaces the
 * get_seconds() interface which is not y2038 safe on 32bit systems.
 *
 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
 * 32bit systems the access must be protected with the sequence
 * counter to provide "atomic" access to the 64bit tk->xtime_sec
 * value.
 */
time64_t ktime_get_real_seconds(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	time64_t seconds;
	unsigned int seq;

	if (IS_ENABLED(CONFIG_64BIT))
		return tk->xtime_sec;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		seconds = tk->xtime_sec;

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return seconds;
}
EXPORT_SYMBOL_GPL(ktime_get_real_seconds);

900 901 902 903 904 905 906 907 908 909 910 911
/**
 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
 * but without the sequence counter protect. This internal function
 * is called just when timekeeping lock is already held.
 */
time64_t __ktime_get_real_seconds(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;

	return tk->xtime_sec;
}

912 913 914 915 916 917 918 919 920 921
/**
 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
 * @systime_snapshot:	pointer to struct receiving the system time snapshot
 */
void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned long seq;
	ktime_t base_raw;
	ktime_t base_real;
922 923
	u64 nsec_raw;
	u64 nsec_real;
924
	u64 now;
925

926 927
	WARN_ON_ONCE(timekeeping_suspended);

928 929 930 931
	do {
		seq = read_seqcount_begin(&tk_core.seq);

		now = tk->tkr_mono.read(tk->tkr_mono.clock);
932 933
		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
934 935 936 937 938 939 940 941 942 943 944 945
		base_real = ktime_add(tk->tkr_mono.base,
				      tk_core.timekeeper.offs_real);
		base_raw = tk->tkr_raw.base;
		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
	} while (read_seqcount_retry(&tk_core.seq, seq));

	systime_snapshot->cycles = now;
	systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
	systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
}
EXPORT_SYMBOL_GPL(ktime_get_snapshot);
946

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
/* Scale base by mult/div checking for overflow */
static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
{
	u64 tmp, rem;

	tmp = div64_u64_rem(*base, div, &rem);

	if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
	    ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
		return -EOVERFLOW;
	tmp *= mult;
	rem *= mult;

	do_div(rem, div);
	*base = tmp + rem;
	return 0;
}

/**
 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
 * @history:			Snapshot representing start of history
 * @partial_history_cycles:	Cycle offset into history (fractional part)
 * @total_history_cycles:	Total history length in cycles
 * @discontinuity:		True indicates clock was set on history period
 * @ts:				Cross timestamp that should be adjusted using
 *	partial/total ratio
 *
 * Helper function used by get_device_system_crosststamp() to correct the
 * crosstimestamp corresponding to the start of the current interval to the
 * system counter value (timestamp point) provided by the driver. The
 * total_history_* quantities are the total history starting at the provided
 * reference point and ending at the start of the current interval. The cycle
 * count between the driver timestamp point and the start of the current
 * interval is partial_history_cycles.
 */
static int adjust_historical_crosststamp(struct system_time_snapshot *history,
983 984
					 u64 partial_history_cycles,
					 u64 total_history_cycles,
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
					 bool discontinuity,
					 struct system_device_crosststamp *ts)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	u64 corr_raw, corr_real;
	bool interp_forward;
	int ret;

	if (total_history_cycles == 0 || partial_history_cycles == 0)
		return 0;

	/* Interpolate shortest distance from beginning or end of history */
	interp_forward = partial_history_cycles > total_history_cycles/2 ?
		true : false;
	partial_history_cycles = interp_forward ?
		total_history_cycles - partial_history_cycles :
		partial_history_cycles;

	/*
	 * Scale the monotonic raw time delta by:
	 *	partial_history_cycles / total_history_cycles
	 */
	corr_raw = (u64)ktime_to_ns(
		ktime_sub(ts->sys_monoraw, history->raw));
	ret = scale64_check_overflow(partial_history_cycles,
				     total_history_cycles, &corr_raw);
	if (ret)
		return ret;

	/*
	 * If there is a discontinuity in the history, scale monotonic raw
	 *	correction by:
	 *	mult(real)/mult(raw) yielding the realtime correction
	 * Otherwise, calculate the realtime correction similar to monotonic
	 *	raw calculation
	 */
	if (discontinuity) {
		corr_real = mul_u64_u32_div
			(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
	} else {
		corr_real = (u64)ktime_to_ns(
			ktime_sub(ts->sys_realtime, history->real));
		ret = scale64_check_overflow(partial_history_cycles,
					     total_history_cycles, &corr_real);
		if (ret)
			return ret;
	}

	/* Fixup monotonic raw and real time time values */
	if (interp_forward) {
		ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
		ts->sys_realtime = ktime_add_ns(history->real, corr_real);
	} else {
		ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
		ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
	}

	return 0;
}

/*
 * cycle_between - true if test occurs chronologically between before and after
 */
1048
static bool cycle_between(u64 before, u64 test, u64 after)
1049 1050 1051 1052 1053 1054 1055 1056
{
	if (test > before && test < after)
		return true;
	if (test < before && before > after)
		return true;
	return false;
}

1057 1058
/**
 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1059
 * @get_time_fn:	Callback to get simultaneous device time and
1060
 *	system counter from the device driver
1061 1062 1063
 * @ctx:		Context passed to get_time_fn()
 * @history_begin:	Historical reference point used to interpolate system
 *	time when counter provided by the driver is before the current interval
1064 1065 1066 1067 1068 1069 1070 1071 1072
 * @xtstamp:		Receives simultaneously captured system and device time
 *
 * Reads a timestamp from a device and correlates it to system time
 */
int get_device_system_crosststamp(int (*get_time_fn)
				  (ktime_t *device_time,
				   struct system_counterval_t *sys_counterval,
				   void *ctx),
				  void *ctx,
1073
				  struct system_time_snapshot *history_begin,
1074 1075 1076 1077
				  struct system_device_crosststamp *xtstamp)
{
	struct system_counterval_t system_counterval;
	struct timekeeper *tk = &tk_core.timekeeper;
1078
	u64 cycles, now, interval_start;
1079
	unsigned int clock_was_set_seq = 0;
1080
	ktime_t base_real, base_raw;
1081
	u64 nsec_real, nsec_raw;
1082
	u8 cs_was_changed_seq;
1083
	unsigned long seq;
1084
	bool do_interp;
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	int ret;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		/*
		 * Try to synchronously capture device time and a system
		 * counter value calling back into the device driver
		 */
		ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
		if (ret)
			return ret;

		/*
		 * Verify that the clocksource associated with the captured
		 * system counter value is the same as the currently installed
		 * timekeeper clocksource
		 */
		if (tk->tkr_mono.clock != system_counterval.cs)
			return -ENODEV;
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
		cycles = system_counterval.cycles;

		/*
		 * Check whether the system counter value provided by the
		 * device driver is on the current timekeeping interval.
		 */
		now = tk->tkr_mono.read(tk->tkr_mono.clock);
		interval_start = tk->tkr_mono.cycle_last;
		if (!cycle_between(interval_start, cycles, now)) {
			clock_was_set_seq = tk->clock_was_set_seq;
			cs_was_changed_seq = tk->cs_was_changed_seq;
			cycles = interval_start;
			do_interp = true;
		} else {
			do_interp = false;
		}
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132

		base_real = ktime_add(tk->tkr_mono.base,
				      tk_core.timekeeper.offs_real);
		base_raw = tk->tkr_raw.base;

		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
						     system_counterval.cycles);
		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
						    system_counterval.cycles);
	} while (read_seqcount_retry(&tk_core.seq, seq));

	xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
	xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1133 1134 1135 1136 1137 1138

	/*
	 * Interpolate if necessary, adjusting back from the start of the
	 * current interval
	 */
	if (do_interp) {
1139
		u64 partial_history_cycles, total_history_cycles;
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
		bool discontinuity;

		/*
		 * Check that the counter value occurs after the provided
		 * history reference and that the history doesn't cross a
		 * clocksource change
		 */
		if (!history_begin ||
		    !cycle_between(history_begin->cycles,
				   system_counterval.cycles, cycles) ||
		    history_begin->cs_was_changed_seq != cs_was_changed_seq)
			return -EINVAL;
		partial_history_cycles = cycles - system_counterval.cycles;
		total_history_cycles = cycles - history_begin->cycles;
		discontinuity =
			history_begin->clock_was_set_seq != clock_was_set_seq;

		ret = adjust_historical_crosststamp(history_begin,
						    partial_history_cycles,
						    total_history_cycles,
						    discontinuity, xtstamp);
		if (ret)
			return ret;
	}

1165 1166 1167 1168
	return 0;
}
EXPORT_SYMBOL_GPL(get_device_system_crosststamp);

1169 1170 1171 1172
/**
 * do_gettimeofday - Returns the time of day in a timeval
 * @tv:		pointer to the timeval to be set
 *
1173
 * NOTE: Users should be converted to using getnstimeofday()
1174 1175 1176
 */
void do_gettimeofday(struct timeval *tv)
{
1177
	struct timespec64 now;
1178

1179
	getnstimeofday64(&now);
1180 1181 1182 1183
	tv->tv_sec = now.tv_sec;
	tv->tv_usec = now.tv_nsec/1000;
}
EXPORT_SYMBOL(do_gettimeofday);
1184

1185
/**
1186 1187
 * do_settimeofday64 - Sets the time of day.
 * @ts:     pointer to the timespec64 variable containing the new time
1188 1189 1190
 *
 * Sets the time of day to the new time and update NTP and notify hrtimers
 */
1191
int do_settimeofday64(const struct timespec64 *ts)
1192
{
1193
	struct timekeeper *tk = &tk_core.timekeeper;
1194
	struct timespec64 ts_delta, xt;
1195
	unsigned long flags;
1196
	int ret = 0;
1197

1198
	if (!timespec64_valid_strict(ts))
1199 1200
		return -EINVAL;

1201
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1202
	write_seqcount_begin(&tk_core.seq);
1203

1204
	timekeeping_forward_now(tk);
1205

1206
	xt = tk_xtime(tk);
1207 1208
	ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
	ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1209

1210 1211 1212 1213 1214
	if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
		ret = -EINVAL;
		goto out;
	}

1215
	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1216

1217
	tk_set_xtime(tk, ts);
1218
out:
1219
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1220

1221
	write_seqcount_end(&tk_core.seq);
1222
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1223 1224 1225 1226

	/* signal hrtimers about time change */
	clock_was_set();

1227
	return ret;
1228
}
1229
EXPORT_SYMBOL(do_settimeofday64);
1230

1231 1232 1233 1234 1235 1236 1237 1238
/**
 * timekeeping_inject_offset - Adds or subtracts from the current time.
 * @tv:		pointer to the timespec variable containing the offset
 *
 * Adds or subtracts an offset value from the current time.
 */
int timekeeping_inject_offset(struct timespec *ts)
{
1239
	struct timekeeper *tk = &tk_core.timekeeper;
1240
	unsigned long flags;
1241
	struct timespec64 ts64, tmp;
1242
	int ret = 0;
1243

1244
	if (!timespec_inject_offset_valid(ts))
1245 1246
		return -EINVAL;

1247 1248
	ts64 = timespec_to_timespec64(*ts);

1249
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1250
	write_seqcount_begin(&tk_core.seq);
1251

1252
	timekeeping_forward_now(tk);
1253

1254
	/* Make sure the proposed value is valid */
1255
	tmp = timespec64_add(tk_xtime(tk),  ts64);
1256 1257
	if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
	    !timespec64_valid_strict(&tmp)) {
1258 1259 1260
		ret = -EINVAL;
		goto error;
	}
1261

1262 1263
	tk_xtime_add(tk, &ts64);
	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
1264

1265
error: /* even if we error out, we forwarded the time, so call update */
1266
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1267

1268
	write_seqcount_end(&tk_core.seq);
1269
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1270 1271 1272 1273

	/* signal hrtimers about time change */
	clock_was_set();

1274
	return ret;
1275 1276 1277
}
EXPORT_SYMBOL(timekeeping_inject_offset);

1278 1279 1280 1281 1282 1283 1284

/**
 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
 *
 */
s32 timekeeping_get_tai_offset(void)
{
1285
	struct timekeeper *tk = &tk_core.timekeeper;
1286 1287 1288 1289
	unsigned int seq;
	s32 ret;

	do {
1290
		seq = read_seqcount_begin(&tk_core.seq);
1291
		ret = tk->tai_offset;
1292
	} while (read_seqcount_retry(&tk_core.seq, seq));
1293 1294 1295 1296 1297 1298 1299 1300

	return ret;
}

/**
 * __timekeeping_set_tai_offset - Lock free worker function
 *
 */
1301
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1302 1303
{
	tk->tai_offset = tai_offset;
1304
	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1305 1306 1307 1308 1309 1310 1311 1312
}

/**
 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
 *
 */
void timekeeping_set_tai_offset(s32 tai_offset)
{
1313
	struct timekeeper *tk = &tk_core.timekeeper;
1314 1315
	unsigned long flags;

1316
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1317
	write_seqcount_begin(&tk_core.seq);
1318
	__timekeeping_set_tai_offset(tk, tai_offset);
1319
	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1320
	write_seqcount_end(&tk_core.seq);
1321
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1322
	clock_was_set();
1323 1324
}

1325 1326 1327 1328 1329
/**
 * change_clocksource - Swaps clocksources if a new one is available
 *
 * Accumulates current time interval and initializes new clocksource
 */
1330
static int change_clocksource(void *data)
1331
{
1332
	struct timekeeper *tk = &tk_core.timekeeper;
1333
	struct clocksource *new, *old;
1334
	unsigned long flags;
1335

1336
	new = (struct clocksource *) data;
1337

1338
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1339
	write_seqcount_begin(&tk_core.seq);
1340

1341
	timekeeping_forward_now(tk);
1342 1343 1344 1345 1346 1347
	/*
	 * If the cs is in module, get a module reference. Succeeds
	 * for built-in code (owner == NULL) as well.
	 */
	if (try_module_get(new->owner)) {
		if (!new->enable || new->enable(new) == 0) {
1348
			old = tk->tkr_mono.clock;
1349 1350 1351 1352 1353 1354 1355
			tk_setup_internals(tk, new);
			if (old->disable)
				old->disable(old);
			module_put(old->owner);
		} else {
			module_put(new->owner);
		}
1356
	}
1357
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1358

1359
	write_seqcount_end(&tk_core.seq);
1360
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1361

1362 1363
	return 0;
}
1364

1365 1366 1367 1368 1369 1370 1371
/**
 * timekeeping_notify - Install a new clock source
 * @clock:		pointer to the clock source
 *
 * This function is called from clocksource.c after a new, better clock
 * source has been registered. The caller holds the clocksource_mutex.
 */
1372
int timekeeping_notify(struct clocksource *clock)
1373
{
1374
	struct timekeeper *tk = &tk_core.timekeeper;
1375

1376
	if (tk->tkr_mono.clock == clock)
1377
		return 0;
1378
	stop_machine(change_clocksource, clock, NULL);
1379
	tick_clock_notify();
1380
	return tk->tkr_mono.clock == clock ? 0 : -1;
1381
}
1382

1383
/**
1384 1385
 * getrawmonotonic64 - Returns the raw monotonic time in a timespec
 * @ts:		pointer to the timespec64 to be set
1386 1387 1388
 *
 * Returns the raw monotonic time (completely un-modified by ntp)
 */
1389
void getrawmonotonic64(struct timespec64 *ts)
1390
{
1391
	struct timekeeper *tk = &tk_core.timekeeper;
1392
	struct timespec64 ts64;
1393
	unsigned long seq;
1394
	u64 nsecs;
1395 1396

	do {
1397
		seq = read_seqcount_begin(&tk_core.seq);
P
Peter Zijlstra 已提交
1398
		nsecs = timekeeping_get_ns(&tk->tkr_raw);
1399
		ts64 = tk->raw_time;
1400

1401
	} while (read_seqcount_retry(&tk_core.seq, seq));
1402

1403
	timespec64_add_ns(&ts64, nsecs);
1404
	*ts = ts64;
1405
}
1406 1407
EXPORT_SYMBOL(getrawmonotonic64);

1408

1409
/**
1410
 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1411
 */
1412
int timekeeping_valid_for_hres(void)
1413
{
1414
	struct timekeeper *tk = &tk_core.timekeeper;
1415 1416 1417 1418
	unsigned long seq;
	int ret;

	do {
1419
		seq = read_seqcount_begin(&tk_core.seq);
1420

1421
		ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1422

1423
	} while (read_seqcount_retry(&tk_core.seq, seq));
1424 1425 1426 1427

	return ret;
}

1428 1429 1430 1431 1432
/**
 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
 */
u64 timekeeping_max_deferment(void)
{
1433
	struct timekeeper *tk = &tk_core.timekeeper;
J
John Stultz 已提交
1434 1435
	unsigned long seq;
	u64 ret;
1436

J
John Stultz 已提交
1437
	do {
1438
		seq = read_seqcount_begin(&tk_core.seq);
J
John Stultz 已提交
1439

1440
		ret = tk->tkr_mono.clock->max_idle_ns;
J
John Stultz 已提交
1441

1442
	} while (read_seqcount_retry(&tk_core.seq, seq));
J
John Stultz 已提交
1443 1444

	return ret;
1445 1446
}

1447
/**
1448
 * read_persistent_clock -  Return time from the persistent clock.
1449 1450
 *
 * Weak dummy function for arches that do not yet support it.
1451 1452
 * Reads the time from the battery backed persistent clock.
 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1453 1454 1455
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
1456
void __weak read_persistent_clock(struct timespec *ts)
1457
{
1458 1459
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
1460 1461
}

1462 1463 1464 1465 1466 1467 1468 1469
void __weak read_persistent_clock64(struct timespec64 *ts64)
{
	struct timespec ts;

	read_persistent_clock(&ts);
	*ts64 = timespec_to_timespec64(ts);
}

1470
/**
X
Xunlei Pang 已提交
1471
 * read_boot_clock64 -  Return time of the system start.
1472 1473 1474
 *
 * Weak dummy function for arches that do not yet support it.
 * Function to read the exact time the system has been started.
X
Xunlei Pang 已提交
1475
 * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
1476 1477 1478
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
X
Xunlei Pang 已提交
1479
void __weak read_boot_clock64(struct timespec64 *ts)
1480 1481 1482 1483 1484
{
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
}

1485 1486 1487 1488 1489 1490
/* Flag for if timekeeping_resume() has injected sleeptime */
static bool sleeptime_injected;

/* Flag for if there is a persistent clock on this platform */
static bool persistent_clock_exists;

1491 1492 1493 1494 1495
/*
 * timekeeping_init - Initializes the clocksource and common timekeeping values
 */
void __init timekeeping_init(void)
{
1496
	struct timekeeper *tk = &tk_core.timekeeper;
1497
	struct clocksource *clock;
1498
	unsigned long flags;
1499
	struct timespec64 now, boot, tmp;
1500

1501
	read_persistent_clock64(&now);
1502
	if (!timespec64_valid_strict(&now)) {
1503 1504 1505 1506
		pr_warn("WARNING: Persistent clock returned invalid value!\n"
			"         Check your CMOS/BIOS settings.\n");
		now.tv_sec = 0;
		now.tv_nsec = 0;
1507
	} else if (now.tv_sec || now.tv_nsec)
1508
		persistent_clock_exists = true;
1509

1510
	read_boot_clock64(&boot);
1511
	if (!timespec64_valid_strict(&boot)) {
1512 1513 1514 1515 1516
		pr_warn("WARNING: Boot clock returned invalid value!\n"
			"         Check your CMOS/BIOS settings.\n");
		boot.tv_sec = 0;
		boot.tv_nsec = 0;
	}
1517

1518
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1519
	write_seqcount_begin(&tk_core.seq);
1520 1521
	ntp_init();

1522
	clock = clocksource_default_clock();
1523 1524
	if (clock->enable)
		clock->enable(clock);
1525
	tk_setup_internals(tk, clock);
1526

1527 1528 1529
	tk_set_xtime(tk, &now);
	tk->raw_time.tv_sec = 0;
	tk->raw_time.tv_nsec = 0;
1530
	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1531
		boot = tk_xtime(tk);
1532

1533
	set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1534
	tk_set_wall_to_mono(tk, tmp);
1535

1536
	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1537

1538
	write_seqcount_end(&tk_core.seq);
1539
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1540 1541
}

1542
/* time in seconds when suspend began for persistent clock */
1543
static struct timespec64 timekeeping_suspend_time;
1544

1545 1546 1547 1548 1549 1550 1551
/**
 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
 * @delta: pointer to a timespec delta value
 *
 * Takes a timespec offset measuring a suspend interval and properly
 * adds the sleep offset to the timekeeping variables.
 */
1552
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1553
					   struct timespec64 *delta)
1554
{
1555
	if (!timespec64_valid_strict(delta)) {
1556 1557 1558
		printk_deferred(KERN_WARNING
				"__timekeeping_inject_sleeptime: Invalid "
				"sleep delta value!\n");
1559 1560
		return;
	}
1561
	tk_xtime_add(tk, delta);
1562
	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1563
	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1564
	tk_debug_account_sleep_time(delta);
1565 1566
}

1567
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
/**
 * We have three kinds of time sources to use for sleep time
 * injection, the preference order is:
 * 1) non-stop clocksource
 * 2) persistent clock (ie: RTC accessible when irqs are off)
 * 3) RTC
 *
 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
 * If system has neither 1) nor 2), 3) will be used finally.
 *
 *
 * If timekeeping has injected sleeptime via either 1) or 2),
 * 3) becomes needless, so in this case we don't need to call
 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
 * means.
 */
bool timekeeping_rtc_skipresume(void)
{
	return sleeptime_injected;
}

/**
 * 1) can be determined whether to use or not only when doing
 * timekeeping_resume() which is invoked after rtc_suspend(),
 * so we can't skip rtc_suspend() surely if system has 1).
 *
 * But if system has 2), 2) will definitely be used, so in this
 * case we don't need to call rtc_suspend(), and this is what
 * timekeeping_rtc_skipsuspend() means.
 */
bool timekeeping_rtc_skipsuspend(void)
{
	return persistent_clock_exists;
}

1603
/**
1604 1605
 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
 * @delta: pointer to a timespec64 delta value
1606
 *
1607
 * This hook is for architectures that cannot support read_persistent_clock64
1608
 * because their RTC/persistent clock is only accessible when irqs are enabled.
1609
 * and also don't have an effective nonstop clocksource.
1610 1611 1612 1613
 *
 * This function should only be called by rtc_resume(), and allows
 * a suspend offset to be injected into the timekeeping values.
 */
1614
void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1615
{
1616
	struct timekeeper *tk = &tk_core.timekeeper;
1617
	unsigned long flags;
1618

1619
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1620
	write_seqcount_begin(&tk_core.seq);
J
John Stultz 已提交
1621

1622
	timekeeping_forward_now(tk);
1623

1624
	__timekeeping_inject_sleeptime(tk, delta);
1625

1626
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1627

1628
	write_seqcount_end(&tk_core.seq);
1629
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1630 1631 1632 1633

	/* signal hrtimers about time change */
	clock_was_set();
}
1634
#endif
1635

1636 1637 1638
/**
 * timekeeping_resume - Resumes the generic timekeeping subsystem.
 */
1639
void timekeeping_resume(void)
1640
{
1641
	struct timekeeper *tk = &tk_core.timekeeper;
1642
	struct clocksource *clock = tk->tkr_mono.clock;
1643
	unsigned long flags;
1644
	struct timespec64 ts_new, ts_delta;
1645
	u64 cycle_now;
1646

1647
	sleeptime_injected = false;
1648
	read_persistent_clock64(&ts_new);
1649

1650
	clockevents_resume();
1651 1652
	clocksource_resume();

1653
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1654
	write_seqcount_begin(&tk_core.seq);
1655

1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
	/*
	 * After system resumes, we need to calculate the suspended time and
	 * compensate it for the OS time. There are 3 sources that could be
	 * used: Nonstop clocksource during suspend, persistent clock and rtc
	 * device.
	 *
	 * One specific platform may have 1 or 2 or all of them, and the
	 * preference will be:
	 *	suspend-nonstop clocksource -> persistent clock -> rtc
	 * The less preferred source will only be tried if there is no better
	 * usable source. The rtc part is handled separately in rtc core code.
	 */
1668
	cycle_now = tk->tkr_mono.read(clock);
1669
	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1670
		cycle_now > tk->tkr_mono.cycle_last) {
1671
		u64 nsec, cyc_delta;
1672

1673 1674 1675
		cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
					      tk->tkr_mono.mask);
		nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
1676
		ts_delta = ns_to_timespec64(nsec);
1677
		sleeptime_injected = true;
1678 1679
	} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
		ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1680
		sleeptime_injected = true;
1681
	}
1682

1683
	if (sleeptime_injected)
1684 1685 1686
		__timekeeping_inject_sleeptime(tk, &ts_delta);

	/* Re-base the last cycle value */
1687
	tk->tkr_mono.cycle_last = cycle_now;
P
Peter Zijlstra 已提交
1688 1689
	tk->tkr_raw.cycle_last  = cycle_now;

1690
	tk->ntp_error = 0;
1691
	timekeeping_suspended = 0;
1692
	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1693
	write_seqcount_end(&tk_core.seq);
1694
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1695 1696 1697

	touch_softlockup_watchdog();

1698
	tick_resume();
1699
	hrtimers_resume();
1700 1701
}

1702
int timekeeping_suspend(void)
1703
{
1704
	struct timekeeper *tk = &tk_core.timekeeper;
1705
	unsigned long flags;
1706 1707
	struct timespec64		delta, delta_delta;
	static struct timespec64	old_delta;
1708

1709
	read_persistent_clock64(&timekeeping_suspend_time);
1710

1711 1712 1713 1714 1715 1716
	/*
	 * On some systems the persistent_clock can not be detected at
	 * timekeeping_init by its return value, so if we see a valid
	 * value returned, update the persistent_clock_exists flag.
	 */
	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1717
		persistent_clock_exists = true;
1718

1719
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1720
	write_seqcount_begin(&tk_core.seq);
1721
	timekeeping_forward_now(tk);
1722
	timekeeping_suspended = 1;
1723

1724
	if (persistent_clock_exists) {
1725
		/*
1726 1727 1728 1729
		 * To avoid drift caused by repeated suspend/resumes,
		 * which each can add ~1 second drift error,
		 * try to compensate so the difference in system time
		 * and persistent_clock time stays close to constant.
1730
		 */
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
		delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
		delta_delta = timespec64_sub(delta, old_delta);
		if (abs(delta_delta.tv_sec) >= 2) {
			/*
			 * if delta_delta is too large, assume time correction
			 * has occurred and set old_delta to the current delta.
			 */
			old_delta = delta;
		} else {
			/* Otherwise try to adjust old_system to compensate */
			timekeeping_suspend_time =
				timespec64_add(timekeeping_suspend_time, delta_delta);
		}
1744
	}
1745 1746

	timekeeping_update(tk, TK_MIRROR);
1747
	halt_fast_timekeeper(tk);
1748
	write_seqcount_end(&tk_core.seq);
1749
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1750

1751
	tick_suspend();
M
Magnus Damm 已提交
1752
	clocksource_suspend();
1753
	clockevents_suspend();
1754 1755 1756 1757 1758

	return 0;
}

/* sysfs resume/suspend bits for timekeeping */
1759
static struct syscore_ops timekeeping_syscore_ops = {
1760 1761 1762 1763
	.resume		= timekeeping_resume,
	.suspend	= timekeeping_suspend,
};

1764
static int __init timekeeping_init_ops(void)
1765
{
1766 1767
	register_syscore_ops(&timekeeping_syscore_ops);
	return 0;
1768
}
1769
device_initcall(timekeeping_init_ops);
1770 1771

/*
1772
 * Apply a multiplier adjustment to the timekeeper
1773
 */
1774 1775 1776 1777
static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
							 s64 offset,
							 bool negative,
							 int adj_scale)
1778
{
1779 1780
	s64 interval = tk->cycle_interval;
	s32 mult_adj = 1;
1781

1782 1783 1784 1785
	if (negative) {
		mult_adj = -mult_adj;
		interval = -interval;
		offset  = -offset;
1786
	}
1787 1788 1789
	mult_adj <<= adj_scale;
	interval <<= adj_scale;
	offset <<= adj_scale;
1790

1791 1792 1793
	/*
	 * So the following can be confusing.
	 *
1794
	 * To keep things simple, lets assume mult_adj == 1 for now.
1795
	 *
1796
	 * When mult_adj != 1, remember that the interval and offset values
1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
	 * have been appropriately scaled so the math is the same.
	 *
	 * The basic idea here is that we're increasing the multiplier
	 * by one, this causes the xtime_interval to be incremented by
	 * one cycle_interval. This is because:
	 *	xtime_interval = cycle_interval * mult
	 * So if mult is being incremented by one:
	 *	xtime_interval = cycle_interval * (mult + 1)
	 * Its the same as:
	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
	 * Which can be shortened to:
	 *	xtime_interval += cycle_interval
	 *
	 * So offset stores the non-accumulated cycles. Thus the current
	 * time (in shifted nanoseconds) is:
	 *	now = (offset * adj) + xtime_nsec
	 * Now, even though we're adjusting the clock frequency, we have
	 * to keep time consistent. In other words, we can't jump back
	 * in time, and we also want to avoid jumping forward in time.
	 *
	 * So given the same offset value, we need the time to be the same
	 * both before and after the freq adjustment.
	 *	now = (offset * adj_1) + xtime_nsec_1
	 *	now = (offset * adj_2) + xtime_nsec_2
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_2) + xtime_nsec_2
	 * And we know:
	 *	adj_2 = adj_1 + 1
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * (adj_1+1)) + xtime_nsec_2
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_1) + offset + xtime_nsec_2
	 * Canceling the sides:
	 *	xtime_nsec_1 = offset + xtime_nsec_2
	 * Which gives us:
	 *	xtime_nsec_2 = xtime_nsec_1 - offset
	 * Which simplfies to:
	 *	xtime_nsec -= offset
	 *
	 * XXX - TODO: Doc ntp_error calculation.
	 */
1840
	if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1841 1842 1843 1844 1845
		/* NTP adjustment caused clocksource mult overflow */
		WARN_ON_ONCE(1);
		return;
	}

1846
	tk->tkr_mono.mult += mult_adj;
1847
	tk->xtime_interval += interval;
1848
	tk->tkr_mono.xtime_nsec -= offset;
1849
	tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
}

/*
 * Calculate the multiplier adjustment needed to match the frequency
 * specified by NTP
 */
static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
							s64 offset)
{
	s64 interval = tk->cycle_interval;
	s64 xinterval = tk->xtime_interval;
1861 1862 1863
	u32 base = tk->tkr_mono.clock->mult;
	u32 max = tk->tkr_mono.clock->maxadj;
	u32 cur_adj = tk->tkr_mono.mult;
1864 1865
	s64 tick_error;
	bool negative;
1866
	u32 adj_scale;
1867 1868 1869 1870 1871

	/* Remove any current error adj from freq calculation */
	if (tk->ntp_err_mult)
		xinterval -= tk->cycle_interval;

1872 1873
	tk->ntp_tick = ntp_tick_length();

1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
	/* Calculate current error per tick */
	tick_error = ntp_tick_length() >> tk->ntp_error_shift;
	tick_error -= (xinterval + tk->xtime_remainder);

	/* Don't worry about correcting it if its small */
	if (likely((tick_error >= 0) && (tick_error <= interval)))
		return;

	/* preserve the direction of correction */
	negative = (tick_error < 0);

1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
	/* If any adjustment would pass the max, just return */
	if (negative && (cur_adj - 1) <= (base - max))
		return;
	if (!negative && (cur_adj + 1) >= (base + max))
		return;
	/*
	 * Sort out the magnitude of the correction, but
	 * avoid making so large a correction that we go
	 * over the max adjustment.
	 */
	adj_scale = 0;
A
Andrew Morton 已提交
1896
	tick_error = abs(tick_error);
1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
	while (tick_error > interval) {
		u32 adj = 1 << (adj_scale + 1);

		/* Check if adjustment gets us within 1 unit from the max */
		if (negative && (cur_adj - adj) <= (base - max))
			break;
		if (!negative && (cur_adj + adj) >= (base + max))
			break;

		adj_scale++;
1907
		tick_error >>= 1;
1908
	}
1909 1910

	/* scale the corrections */
1911
	timekeeping_apply_adjustment(tk, offset, negative, adj_scale);
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
}

/*
 * Adjust the timekeeper's multiplier to the correct frequency
 * and also to reduce the accumulated error value.
 */
static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
{
	/* Correct for the current frequency error */
	timekeeping_freqadjust(tk, offset);

	/* Next make a small adjustment to fix any cumulative error */
	if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
		tk->ntp_err_mult = 1;
		timekeeping_apply_adjustment(tk, offset, 0, 0);
	} else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
		/* Undo any existing error adjustment */
		timekeeping_apply_adjustment(tk, offset, 1, 0);
		tk->ntp_err_mult = 0;
	}

1933 1934 1935
	if (unlikely(tk->tkr_mono.clock->maxadj &&
		(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
			> tk->tkr_mono.clock->maxadj))) {
1936 1937
		printk_once(KERN_WARNING
			"Adjusting %s more than 11%% (%ld vs %ld)\n",
1938 1939
			tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
			(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1940
	}
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955

	/*
	 * It may be possible that when we entered this function, xtime_nsec
	 * was very small.  Further, if we're slightly speeding the clocksource
	 * in the code above, its possible the required corrective factor to
	 * xtime_nsec could cause it to underflow.
	 *
	 * Now, since we already accumulated the second, cannot simply roll
	 * the accumulated second back, since the NTP subsystem has been
	 * notified via second_overflow. So instead we push xtime_nsec forward
	 * by the amount we underflowed, and add that amount into the error.
	 *
	 * We'll correct this error next time through this function, when
	 * xtime_nsec is not as small.
	 */
1956 1957 1958
	if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
		s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
		tk->tkr_mono.xtime_nsec = 0;
1959
		tk->ntp_error += neg << tk->ntp_error_shift;
1960
	}
1961 1962
}

1963 1964 1965
/**
 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
 *
Z
Zhen Lei 已提交
1966
 * Helper function that accumulates the nsecs greater than a second
1967 1968 1969 1970
 * from the xtime_nsec field to the xtime_secs field.
 * It also calls into the NTP code to handle leapsecond processing.
 *
 */
1971
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1972
{
1973
	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1974
	unsigned int clock_set = 0;
1975

1976
	while (tk->tkr_mono.xtime_nsec >= nsecps) {
1977 1978
		int leap;

1979
		tk->tkr_mono.xtime_nsec -= nsecps;
1980 1981 1982 1983
		tk->xtime_sec++;

		/* Figure out if its a leap sec and apply if needed */
		leap = second_overflow(tk->xtime_sec);
1984
		if (unlikely(leap)) {
1985
			struct timespec64 ts;
1986 1987

			tk->xtime_sec += leap;
1988

1989 1990 1991
			ts.tv_sec = leap;
			ts.tv_nsec = 0;
			tk_set_wall_to_mono(tk,
1992
				timespec64_sub(tk->wall_to_monotonic, ts));
1993

1994 1995
			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);

1996
			clock_set = TK_CLOCK_WAS_SET;
1997
		}
1998
	}
1999
	return clock_set;
2000 2001
}

2002 2003 2004 2005 2006 2007 2008 2009 2010
/**
 * logarithmic_accumulation - shifted accumulation of cycles
 *
 * This functions accumulates a shifted interval of cycles into
 * into a shifted interval nanoseconds. Allows for O(log) accumulation
 * loop.
 *
 * Returns the unconsumed cycles.
 */
2011 2012
static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
				    u32 shift, unsigned int *clock_set)
2013
{
2014
	u64 interval = tk->cycle_interval << shift;
2015
	u64 raw_nsecs;
2016

Z
Zhen Lei 已提交
2017
	/* If the offset is smaller than a shifted interval, do nothing */
T
Thomas Gleixner 已提交
2018
	if (offset < interval)
2019 2020 2021
		return offset;

	/* Accumulate one shifted interval */
T
Thomas Gleixner 已提交
2022
	offset -= interval;
2023
	tk->tkr_mono.cycle_last += interval;
P
Peter Zijlstra 已提交
2024
	tk->tkr_raw.cycle_last  += interval;
2025

2026
	tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2027
	*clock_set |= accumulate_nsecs_to_secs(tk);
2028

2029
	/* Accumulate raw time */
2030
	raw_nsecs = (u64)tk->raw_interval << shift;
2031
	raw_nsecs += tk->raw_time.tv_nsec;
2032 2033 2034
	if (raw_nsecs >= NSEC_PER_SEC) {
		u64 raw_secs = raw_nsecs;
		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
2035
		tk->raw_time.tv_sec += raw_secs;
2036
	}
2037
	tk->raw_time.tv_nsec = raw_nsecs;
2038 2039

	/* Accumulate error between NTP and clock interval */
2040
	tk->ntp_error += tk->ntp_tick << shift;
2041 2042
	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
						(tk->ntp_error_shift + shift);
2043 2044 2045 2046

	return offset;
}

2047 2048 2049 2050
/**
 * update_wall_time - Uses the current clocksource to increment the wall time
 *
 */
2051
void update_wall_time(void)
2052
{
2053
	struct timekeeper *real_tk = &tk_core.timekeeper;
2054
	struct timekeeper *tk = &shadow_timekeeper;
2055
	u64 offset;
2056
	int shift = 0, maxshift;
2057
	unsigned int clock_set = 0;
J
John Stultz 已提交
2058 2059
	unsigned long flags;

2060
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2061 2062 2063

	/* Make sure we're fully resumed: */
	if (unlikely(timekeeping_suspended))
J
John Stultz 已提交
2064
		goto out;
2065

J
John Stultz 已提交
2066
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2067
	offset = real_tk->cycle_interval;
J
John Stultz 已提交
2068
#else
2069 2070
	offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2071 2072
#endif

2073
	/* Check if there's really nothing to do */
2074
	if (offset < real_tk->cycle_interval)
2075 2076
		goto out;

2077 2078 2079
	/* Do some additional sanity checking */
	timekeeping_check_update(real_tk, offset);

2080 2081 2082 2083
	/*
	 * With NO_HZ we may have to accumulate many cycle_intervals
	 * (think "ticks") worth of time at once. To do this efficiently,
	 * we calculate the largest doubling multiple of cycle_intervals
2084
	 * that is smaller than the offset.  We then accumulate that
2085 2086
	 * chunk in one go, and then try to consume the next smaller
	 * doubled multiple.
2087
	 */
2088
	shift = ilog2(offset) - ilog2(tk->cycle_interval);
2089
	shift = max(0, shift);
2090
	/* Bound shift to one less than what overflows tick_length */
2091
	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2092
	shift = min(shift, maxshift);
2093
	while (offset >= tk->cycle_interval) {
2094 2095
		offset = logarithmic_accumulation(tk, offset, shift,
							&clock_set);
2096
		if (offset < tk->cycle_interval<<shift)
2097
			shift--;
2098 2099 2100
	}

	/* correct the clock when NTP error is too big */
2101
	timekeeping_adjust(tk, offset);
2102

J
John Stultz 已提交
2103
	/*
2104 2105 2106 2107
	 * XXX This can be killed once everyone converts
	 * to the new update_vsyscall.
	 */
	old_vsyscall_fixup(tk);
2108

J
John Stultz 已提交
2109 2110
	/*
	 * Finally, make sure that after the rounding
2111
	 * xtime_nsec isn't larger than NSEC_PER_SEC
J
John Stultz 已提交
2112
	 */
2113
	clock_set |= accumulate_nsecs_to_secs(tk);
L
Linus Torvalds 已提交
2114

2115
	write_seqcount_begin(&tk_core.seq);
2116 2117 2118 2119 2120 2121 2122
	/*
	 * Update the real timekeeper.
	 *
	 * We could avoid this memcpy by switching pointers, but that
	 * requires changes to all other timekeeper usage sites as
	 * well, i.e. move the timekeeper pointer getter into the
	 * spinlocked/seqcount protected sections. And we trade this
2123
	 * memcpy under the tk_core.seq against one before we start
2124 2125
	 * updating.
	 */
2126
	timekeeping_update(tk, clock_set);
2127
	memcpy(real_tk, tk, sizeof(*tk));
2128
	/* The memcpy must come last. Do not put anything here! */
2129
	write_seqcount_end(&tk_core.seq);
2130
out:
2131
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2132
	if (clock_set)
2133 2134
		/* Have to call _delayed version, since in irq context*/
		clock_was_set_delayed();
2135
}
T
Tomas Janousek 已提交
2136 2137

/**
2138 2139
 * getboottime64 - Return the real time of system boot.
 * @ts:		pointer to the timespec64 to be set
T
Tomas Janousek 已提交
2140
 *
2141
 * Returns the wall-time of boot in a timespec64.
T
Tomas Janousek 已提交
2142 2143 2144 2145 2146 2147
 *
 * This is based on the wall_to_monotonic offset and the total suspend
 * time. Calls to settimeofday will affect the value returned (which
 * basically means that however wrong your real time clock is at boot time,
 * you get the right time here).
 */
2148
void getboottime64(struct timespec64 *ts)
T
Tomas Janousek 已提交
2149
{
2150
	struct timekeeper *tk = &tk_core.timekeeper;
2151 2152
	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);

2153
	*ts = ktime_to_timespec64(t);
T
Tomas Janousek 已提交
2154
}
2155
EXPORT_SYMBOL_GPL(getboottime64);
T
Tomas Janousek 已提交
2156

2157 2158
unsigned long get_seconds(void)
{
2159
	struct timekeeper *tk = &tk_core.timekeeper;
2160 2161

	return tk->xtime_sec;
2162 2163 2164
}
EXPORT_SYMBOL(get_seconds);

2165 2166
struct timespec __current_kernel_time(void)
{
2167
	struct timekeeper *tk = &tk_core.timekeeper;
2168

2169
	return timespec64_to_timespec(tk_xtime(tk));
2170
}
2171

2172
struct timespec64 current_kernel_time64(void)
2173
{
2174
	struct timekeeper *tk = &tk_core.timekeeper;
2175
	struct timespec64 now;
2176 2177 2178
	unsigned long seq;

	do {
2179
		seq = read_seqcount_begin(&tk_core.seq);
L
Linus Torvalds 已提交
2180

2181
		now = tk_xtime(tk);
2182
	} while (read_seqcount_retry(&tk_core.seq, seq));
2183

2184
	return now;
2185
}
2186
EXPORT_SYMBOL(current_kernel_time64);
2187

2188
struct timespec64 get_monotonic_coarse64(void)
2189
{
2190
	struct timekeeper *tk = &tk_core.timekeeper;
2191
	struct timespec64 now, mono;
2192 2193 2194
	unsigned long seq;

	do {
2195
		seq = read_seqcount_begin(&tk_core.seq);
L
Linus Torvalds 已提交
2196

2197 2198
		now = tk_xtime(tk);
		mono = tk->wall_to_monotonic;
2199
	} while (read_seqcount_retry(&tk_core.seq, seq));
2200

2201
	set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
2202
				now.tv_nsec + mono.tv_nsec);
2203

2204
	return now;
2205
}
2206
EXPORT_SYMBOL(get_monotonic_coarse64);
2207 2208

/*
2209
 * Must hold jiffies_lock
2210 2211 2212 2213 2214 2215
 */
void do_timer(unsigned long ticks)
{
	jiffies_64 += ticks;
	calc_global_load(ticks);
}
2216

2217
/**
2218
 * ktime_get_update_offsets_now - hrtimer helper
2219
 * @cwsseq:	pointer to check and store the clock was set sequence number
2220 2221
 * @offs_real:	pointer to storage for monotonic -> realtime offset
 * @offs_boot:	pointer to storage for monotonic -> boottime offset
2222
 * @offs_tai:	pointer to storage for monotonic -> clock tai offset
2223
 *
2224 2225 2226 2227
 * Returns current monotonic time and updates the offsets if the
 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
 * different.
 *
2228
 * Called from hrtimer_interrupt() or retrigger_next_event()
2229
 */
2230 2231
ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
				     ktime_t *offs_boot, ktime_t *offs_tai)
2232
{
2233
	struct timekeeper *tk = &tk_core.timekeeper;
2234
	unsigned int seq;
2235 2236
	ktime_t base;
	u64 nsecs;
2237 2238

	do {
2239
		seq = read_seqcount_begin(&tk_core.seq);
2240

2241 2242
		base = tk->tkr_mono.base;
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
2243 2244
		base = ktime_add_ns(base, nsecs);

2245 2246 2247 2248 2249 2250
		if (*cwsseq != tk->clock_was_set_seq) {
			*cwsseq = tk->clock_was_set_seq;
			*offs_real = tk->offs_real;
			*offs_boot = tk->offs_boot;
			*offs_tai = tk->offs_tai;
		}
2251 2252

		/* Handle leapsecond insertion adjustments */
T
Thomas Gleixner 已提交
2253
		if (unlikely(base >= tk->next_leap_ktime))
2254 2255
			*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));

2256
	} while (read_seqcount_retry(&tk_core.seq, seq));
2257

2258
	return base;
2259 2260
}

2261 2262 2263 2264 2265
/**
 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
 */
int do_adjtimex(struct timex *txc)
{
2266
	struct timekeeper *tk = &tk_core.timekeeper;
2267
	unsigned long flags;
2268
	struct timespec64 ts;
2269
	s32 orig_tai, tai;
2270 2271 2272 2273 2274 2275 2276
	int ret;

	/* Validate the data before disabling interrupts */
	ret = ntp_validate_timex(txc);
	if (ret)
		return ret;

2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
	if (txc->modes & ADJ_SETOFFSET) {
		struct timespec delta;
		delta.tv_sec  = txc->time.tv_sec;
		delta.tv_nsec = txc->time.tv_usec;
		if (!(txc->modes & ADJ_NANO))
			delta.tv_nsec *= 1000;
		ret = timekeeping_inject_offset(&delta);
		if (ret)
			return ret;
	}

2288
	getnstimeofday64(&ts);
2289

2290
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2291
	write_seqcount_begin(&tk_core.seq);
2292

2293
	orig_tai = tai = tk->tai_offset;
2294
	ret = __do_adjtimex(txc, &ts, &tai);
2295

2296 2297
	if (tai != orig_tai) {
		__timekeeping_set_tai_offset(tk, tai);
2298
		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2299
	}
2300 2301
	tk_update_leap_state(tk);

2302
	write_seqcount_end(&tk_core.seq);
2303 2304
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);

2305 2306 2307
	if (tai != orig_tai)
		clock_was_set();

2308 2309
	ntp_notify_cmos_timer();

2310 2311
	return ret;
}
2312 2313 2314 2315 2316

#ifdef CONFIG_NTP_PPS
/**
 * hardpps() - Accessor function to NTP __hardpps function
 */
2317
void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2318
{
2319 2320 2321
	unsigned long flags;

	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2322
	write_seqcount_begin(&tk_core.seq);
2323

2324
	__hardpps(phase_ts, raw_ts);
2325

2326
	write_seqcount_end(&tk_core.seq);
2327
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2328 2329 2330 2331
}
EXPORT_SYMBOL(hardpps);
#endif

T
Torben Hohn 已提交
2332 2333 2334 2335 2336 2337 2338 2339
/**
 * xtime_update() - advances the timekeeping infrastructure
 * @ticks:	number of ticks, that have elapsed since the last call.
 *
 * Must be called with interrupts disabled.
 */
void xtime_update(unsigned long ticks)
{
2340
	write_seqlock(&jiffies_lock);
T
Torben Hohn 已提交
2341
	do_timer(ticks);
2342
	write_sequnlock(&jiffies_lock);
2343
	update_wall_time();
T
Torben Hohn 已提交
2344
}