timekeeping.c 55.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 *  linux/kernel/time/timekeeping.c
 *
 *  Kernel timekeeping code and accessor functions
 *
 *  This code was moved from linux/kernel/timer.c.
 *  Please see that file for copyright and history logs.
 *
 */

11
#include <linux/timekeeper_internal.h>
12 13 14 15 16
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
17
#include <linux/sched.h>
18
#include <linux/syscore_ops.h>
19 20 21 22
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/tick.h>
23
#include <linux/stop_machine.h>
24
#include <linux/pvclock_gtod.h>
25
#include <linux/compiler.h>
26

27
#include "tick-internal.h"
28
#include "ntp_internal.h"
29
#include "timekeeping_internal.h"
30

31 32
#define TK_CLEAR_NTP		(1 << 0)
#define TK_MIRROR		(1 << 1)
33
#define TK_CLOCK_WAS_SET	(1 << 2)
34

35 36 37 38 39 40 41 42 43
/*
 * The most important data for readout fits into a single 64 byte
 * cache line.
 */
static struct {
	seqcount_t		seq;
	struct timekeeper	timekeeper;
} tk_core ____cacheline_aligned;

44
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45
static struct timekeeper shadow_timekeeper;
46

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/**
 * struct tk_fast - NMI safe timekeeper
 * @seq:	Sequence counter for protecting updates. The lowest bit
 *		is the index for the tk_read_base array
 * @base:	tk_read_base array. Access is indexed by the lowest bit of
 *		@seq.
 *
 * See @update_fast_timekeeper() below.
 */
struct tk_fast {
	seqcount_t		seq;
	struct tk_read_base	base[2];
};

static struct tk_fast tk_fast_mono ____cacheline_aligned;
P
Peter Zijlstra 已提交
62
static struct tk_fast tk_fast_raw  ____cacheline_aligned;
63

64 65 66
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;

67 68
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
69 70
	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
71 72 73 74
		tk->xtime_sec++;
	}
}

75 76 77 78 79
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
{
	struct timespec64 ts;

	ts.tv_sec = tk->xtime_sec;
80
	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
81 82 83
	return ts;
}

84
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
85 86
{
	tk->xtime_sec = ts->tv_sec;
87
	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
88 89
}

90
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
91 92
{
	tk->xtime_sec += ts->tv_sec;
93
	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
94
	tk_normalize_xtime(tk);
95
}
96

97
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
98
{
99
	struct timespec64 tmp;
100 101 102 103 104

	/*
	 * Verify consistency of: offset_real = -wall_to_monotonic
	 * before modifying anything
	 */
105
	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106
					-tk->wall_to_monotonic.tv_nsec);
107
	WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
108
	tk->wall_to_monotonic = wtm;
109 110
	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
	tk->offs_real = timespec64_to_ktime(tmp);
111
	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
112 113
}

114
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
115
{
116
	tk->offs_boot = ktime_add(tk->offs_boot, delta);
117 118
}

119
#ifdef CONFIG_DEBUG_TIMEKEEPING
120 121
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */

122 123 124
static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
{

125 126
	cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
	const char *name = tk->tkr_mono.clock->name;
127 128

	if (offset > max_cycles) {
129
		printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
130
				offset, name, max_cycles);
131
		printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
132 133 134 135 136 137 138
	} else {
		if (offset > (max_cycles >> 1)) {
			printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n",
					offset, name, max_cycles >> 1);
			printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
		}
	}
139

140 141
	if (tk->underflow_seen) {
		if (jiffies - tk->last_warning > WARNING_FREQ) {
142 143 144
			printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
			printk_deferred("         Your kernel is probably still fine.\n");
145
			tk->last_warning = jiffies;
146
		}
147
		tk->underflow_seen = 0;
148 149
	}

150 151
	if (tk->overflow_seen) {
		if (jiffies - tk->last_warning > WARNING_FREQ) {
152 153 154
			printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
			printk_deferred("         Your kernel is probably still fine.\n");
155
			tk->last_warning = jiffies;
156
		}
157
		tk->overflow_seen = 0;
158
	}
159
}
160 161 162

static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
{
163
	struct timekeeper *tk = &tk_core.timekeeper;
164 165
	cycle_t now, last, mask, max, delta;
	unsigned int seq;
166

167 168 169 170 171 172 173 174 175 176 177 178 179 180
	/*
	 * Since we're called holding a seqlock, the data may shift
	 * under us while we're doing the calculation. This can cause
	 * false positives, since we'd note a problem but throw the
	 * results away. So nest another seqlock here to atomically
	 * grab the points we are checking with.
	 */
	do {
		seq = read_seqcount_begin(&tk_core.seq);
		now = tkr->read(tkr->clock);
		last = tkr->cycle_last;
		mask = tkr->mask;
		max = tkr->clock->max_cycles;
	} while (read_seqcount_retry(&tk_core.seq, seq));
181

182
	delta = clocksource_delta(now, last, mask);
183

184 185 186 187
	/*
	 * Try to catch underflows by checking if we are seeing small
	 * mask-relative negative values.
	 */
188
	if (unlikely((~delta & mask) < (mask >> 3))) {
189
		tk->underflow_seen = 1;
190
		delta = 0;
191
	}
192

193
	/* Cap delta value to the max_cycles values to avoid mult overflows */
194
	if (unlikely(delta > max)) {
195
		tk->overflow_seen = 1;
196
		delta = tkr->clock->max_cycles;
197
	}
198 199 200

	return delta;
}
201 202 203 204
#else
static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
{
}
205 206 207 208 209 210 211 212 213 214 215 216
static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
{
	cycle_t cycle_now, delta;

	/* read clocksource */
	cycle_now = tkr->read(tkr->clock);

	/* calculate the delta since the last update_wall_time */
	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);

	return delta;
}
217 218
#endif

219
/**
220
 * tk_setup_internals - Set up internals to use clocksource clock.
221
 *
222
 * @tk:		The target timekeeper to setup.
223 224 225 226 227 228 229
 * @clock:		Pointer to clocksource.
 *
 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 * pair and interval request.
 *
 * Unless you're the timekeeping code, you should not be using this!
 */
230
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
231 232
{
	cycle_t interval;
233
	u64 tmp, ntpinterval;
234
	struct clocksource *old_clock;
235

236 237 238 239 240
	old_clock = tk->tkr_mono.clock;
	tk->tkr_mono.clock = clock;
	tk->tkr_mono.read = clock->read;
	tk->tkr_mono.mask = clock->mask;
	tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
241

P
Peter Zijlstra 已提交
242 243 244 245 246
	tk->tkr_raw.clock = clock;
	tk->tkr_raw.read = clock->read;
	tk->tkr_raw.mask = clock->mask;
	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;

247 248 249
	/* Do the ns -> cycle conversion first, using original mult */
	tmp = NTP_INTERVAL_LENGTH;
	tmp <<= clock->shift;
250
	ntpinterval = tmp;
251 252
	tmp += clock->mult/2;
	do_div(tmp, clock->mult);
253 254 255 256
	if (tmp == 0)
		tmp = 1;

	interval = (cycle_t) tmp;
257
	tk->cycle_interval = interval;
258 259

	/* Go back from cycles -> shifted ns */
260 261 262
	tk->xtime_interval = (u64) interval * clock->mult;
	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
	tk->raw_interval =
263
		((u64) interval * clock->mult) >> clock->shift;
264

265 266 267 268
	 /* if changing clocks, convert xtime_nsec shift units */
	if (old_clock) {
		int shift_change = clock->shift - old_clock->shift;
		if (shift_change < 0)
269
			tk->tkr_mono.xtime_nsec >>= -shift_change;
270
		else
271
			tk->tkr_mono.xtime_nsec <<= shift_change;
272
	}
P
Peter Zijlstra 已提交
273 274
	tk->tkr_raw.xtime_nsec = 0;

275
	tk->tkr_mono.shift = clock->shift;
P
Peter Zijlstra 已提交
276
	tk->tkr_raw.shift = clock->shift;
277

278 279
	tk->ntp_error = 0;
	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
280
	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
281 282 283 284 285 286

	/*
	 * The timekeeper keeps its own mult values for the currently
	 * active clocksource. These value will be adjusted via NTP
	 * to counteract clock drifting.
	 */
287
	tk->tkr_mono.mult = clock->mult;
P
Peter Zijlstra 已提交
288
	tk->tkr_raw.mult = clock->mult;
289
	tk->ntp_err_mult = 0;
290
}
291

292
/* Timekeeper helper functions. */
293 294

#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
295 296
static u32 default_arch_gettimeoffset(void) { return 0; }
u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
297
#else
298
static inline u32 arch_gettimeoffset(void) { return 0; }
299 300
#endif

301
static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
302
{
303
	cycle_t delta;
304
	s64 nsec;
305

306
	delta = timekeeping_get_delta(tkr);
307

308 309
	nsec = delta * tkr->mult + tkr->xtime_nsec;
	nsec >>= tkr->shift;
310

311
	/* If arch requires, add in get_arch_timeoffset() */
312
	return nsec + arch_gettimeoffset();
313 314
}

315 316
/**
 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
317
 * @tkr: Timekeeping readout base from which we take the update
318 319 320 321
 *
 * We want to use this from any context including NMI and tracing /
 * instrumenting the timekeeping code itself.
 *
322
 * Employ the latch technique; see @raw_write_seqcount_latch.
323 324 325 326 327 328
 *
 * So if a NMI hits the update of base[0] then it will use base[1]
 * which is still consistent. In the worst case this can result is a
 * slightly wrong timestamp (a few nanoseconds). See
 * @ktime_get_mono_fast_ns.
 */
329
static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
330
{
331
	struct tk_read_base *base = tkf->base;
332 333

	/* Force readers off to base[1] */
334
	raw_write_seqcount_latch(&tkf->seq);
335 336

	/* Update base[0] */
337
	memcpy(base, tkr, sizeof(*base));
338 339

	/* Force readers back to base[0] */
340
	raw_write_seqcount_latch(&tkf->seq);
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377

	/* Update base[1] */
	memcpy(base + 1, base, sizeof(*base));
}

/**
 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
 *
 * This timestamp is not guaranteed to be monotonic across an update.
 * The timestamp is calculated by:
 *
 *	now = base_mono + clock_delta * slope
 *
 * So if the update lowers the slope, readers who are forced to the
 * not yet updated second array are still using the old steeper slope.
 *
 * tmono
 * ^
 * |    o  n
 * |   o n
 * |  u
 * | o
 * |o
 * |12345678---> reader order
 *
 * o = old slope
 * u = update
 * n = new slope
 *
 * So reader 6 will observe time going backwards versus reader 5.
 *
 * While other CPUs are likely to be able observe that, the only way
 * for a CPU local observation is when an NMI hits in the middle of
 * the update. Timestamps taken from that NMI context might be ahead
 * of the following timestamps. Callers need to be aware of that and
 * deal with it.
 */
378
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
379 380 381 382 383 384
{
	struct tk_read_base *tkr;
	unsigned int seq;
	u64 now;

	do {
385
		seq = raw_read_seqcount_latch(&tkf->seq);
386
		tkr = tkf->base + (seq & 0x01);
387
		now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
388
	} while (read_seqcount_retry(&tkf->seq, seq));
389 390 391

	return now;
}
392 393 394 395 396

u64 ktime_get_mono_fast_ns(void)
{
	return __ktime_get_fast_ns(&tk_fast_mono);
}
397 398
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);

P
Peter Zijlstra 已提交
399 400 401 402 403 404
u64 ktime_get_raw_fast_ns(void)
{
	return __ktime_get_fast_ns(&tk_fast_raw);
}
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
/* Suspend-time cycles value for halted fast timekeeper. */
static cycle_t cycles_at_suspend;

static cycle_t dummy_clock_read(struct clocksource *cs)
{
	return cycles_at_suspend;
}

/**
 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
 * @tk: Timekeeper to snapshot.
 *
 * It generally is unsafe to access the clocksource after timekeeping has been
 * suspended, so take a snapshot of the readout base of @tk and use it as the
 * fast timekeeper's readout base while suspended.  It will return the same
 * number of cycles every time until timekeeping is resumed at which time the
 * proper readout base for the fast timekeeper will be restored automatically.
 */
static void halt_fast_timekeeper(struct timekeeper *tk)
{
	static struct tk_read_base tkr_dummy;
426
	struct tk_read_base *tkr = &tk->tkr_mono;
427 428 429 430

	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
	cycles_at_suspend = tkr->read(tkr->clock);
	tkr_dummy.read = dummy_clock_read;
431
	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
P
Peter Zijlstra 已提交
432 433 434 435 436

	tkr = &tk->tkr_raw;
	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
	tkr_dummy.read = dummy_clock_read;
	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
437 438
}

439 440 441 442
#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD

static inline void update_vsyscall(struct timekeeper *tk)
{
443
	struct timespec xt, wm;
444

445
	xt = timespec64_to_timespec(tk_xtime(tk));
446
	wm = timespec64_to_timespec(tk->wall_to_monotonic);
447 448
	update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
			    tk->tkr_mono.cycle_last);
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
}

static inline void old_vsyscall_fixup(struct timekeeper *tk)
{
	s64 remainder;

	/*
	* Store only full nanoseconds into xtime_nsec after rounding
	* it up and add the remainder to the error difference.
	* XXX - This is necessary to avoid small 1ns inconsistnecies caused
	* by truncating the remainder in vsyscalls. However, it causes
	* additional work to be done in timekeeping_adjust(). Once
	* the vsyscall implementations are converted to use xtime_nsec
	* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
	* users are removed, this can be killed.
	*/
465 466 467
	remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
	tk->tkr_mono.xtime_nsec -= remainder;
	tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
468
	tk->ntp_error += remainder << tk->ntp_error_shift;
469
	tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
470 471 472 473 474
}
#else
#define old_vsyscall_fixup(tk)
#endif

475 476
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);

477
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
478
{
479
	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
480 481 482 483 484 485 486
}

/**
 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
 */
int pvclock_gtod_register_notifier(struct notifier_block *nb)
{
487
	struct timekeeper *tk = &tk_core.timekeeper;
488 489 490
	unsigned long flags;
	int ret;

491
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
492
	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
493
	update_pvclock_gtod(tk, true);
494
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
495 496 497 498 499 500 501 502 503 504 505 506 507 508

	return ret;
}
EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);

/**
 * pvclock_gtod_unregister_notifier - unregister a pvclock
 * timedata update listener
 */
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{
	unsigned long flags;
	int ret;

509
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
510
	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
511
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
512 513 514 515 516

	return ret;
}
EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);

517 518 519 520 521 522 523 524 525 526 527
/*
 * tk_update_leap_state - helper to update the next_leap_ktime
 */
static inline void tk_update_leap_state(struct timekeeper *tk)
{
	tk->next_leap_ktime = ntp_get_next_leap();
	if (tk->next_leap_ktime.tv64 != KTIME_MAX)
		/* Convert to monotonic time */
		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
}

528 529 530 531 532
/*
 * Update the ktime_t based scalar nsec members of the timekeeper
 */
static inline void tk_update_ktime_data(struct timekeeper *tk)
{
533 534
	u64 seconds;
	u32 nsec;
535 536 537 538 539 540 541 542

	/*
	 * The xtime based monotonic readout is:
	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
	 * The ktime based monotonic readout is:
	 *	nsec = base_mono + now();
	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
	 */
543 544
	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
545
	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
546 547

	/* Update the monotonic raw base */
P
Peter Zijlstra 已提交
548
	tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
549 550 551 552 553 554

	/*
	 * The sum of the nanoseconds portions of xtime and
	 * wall_to_monotonic can be greater/equal one second. Take
	 * this into account before updating tk->ktime_sec.
	 */
555
	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
556 557 558
	if (nsec >= NSEC_PER_SEC)
		seconds++;
	tk->ktime_sec = seconds;
559 560
}

561
/* must hold timekeeper_lock */
562
static void timekeeping_update(struct timekeeper *tk, unsigned int action)
563
{
564
	if (action & TK_CLEAR_NTP) {
565
		tk->ntp_error = 0;
566 567
		ntp_clear();
	}
568

569
	tk_update_leap_state(tk);
570 571
	tk_update_ktime_data(tk);

572 573 574
	update_vsyscall(tk);
	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);

575
	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
P
Peter Zijlstra 已提交
576
	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
577 578 579

	if (action & TK_CLOCK_WAS_SET)
		tk->clock_was_set_seq++;
580 581 582 583 584 585 586 587
	/*
	 * The mirroring of the data to the shadow-timekeeper needs
	 * to happen last here to ensure we don't over-write the
	 * timekeeper structure on the next update with stale data
	 */
	if (action & TK_MIRROR)
		memcpy(&shadow_timekeeper, &tk_core.timekeeper,
		       sizeof(tk_core.timekeeper));
588 589
}

590
/**
591
 * timekeeping_forward_now - update clock to the current time
592
 *
593 594 595
 * Forward the current clock to update its state since the last call to
 * update_wall_time(). This is useful before significant clock changes,
 * as it avoids having to deal with this time offset explicitly.
596
 */
597
static void timekeeping_forward_now(struct timekeeper *tk)
598
{
599
	struct clocksource *clock = tk->tkr_mono.clock;
600
	cycle_t cycle_now, delta;
601
	s64 nsec;
602

603 604 605
	cycle_now = tk->tkr_mono.read(clock);
	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
	tk->tkr_mono.cycle_last = cycle_now;
P
Peter Zijlstra 已提交
606
	tk->tkr_raw.cycle_last  = cycle_now;
607

608
	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
609

610
	/* If arch requires, add in get_arch_timeoffset() */
611
	tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
612

613
	tk_normalize_xtime(tk);
614

P
Peter Zijlstra 已提交
615
	nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
616
	timespec64_add_ns(&tk->raw_time, nsec);
617 618 619
}

/**
620
 * __getnstimeofday64 - Returns the time of day in a timespec64.
621 622
 * @ts:		pointer to the timespec to be set
 *
623 624
 * Updates the time of day in the timespec.
 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
625
 */
626
int __getnstimeofday64(struct timespec64 *ts)
627
{
628
	struct timekeeper *tk = &tk_core.timekeeper;
629
	unsigned long seq;
630
	s64 nsecs = 0;
631 632

	do {
633
		seq = read_seqcount_begin(&tk_core.seq);
634

635
		ts->tv_sec = tk->xtime_sec;
636
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
637

638
	} while (read_seqcount_retry(&tk_core.seq, seq));
639

640
	ts->tv_nsec = 0;
641
	timespec64_add_ns(ts, nsecs);
642 643 644 645 646 647 648 649 650

	/*
	 * Do not bail out early, in case there were callers still using
	 * the value, even in the face of the WARN_ON.
	 */
	if (unlikely(timekeeping_suspended))
		return -EAGAIN;
	return 0;
}
651
EXPORT_SYMBOL(__getnstimeofday64);
652 653

/**
654
 * getnstimeofday64 - Returns the time of day in a timespec64.
655
 * @ts:		pointer to the timespec64 to be set
656
 *
657
 * Returns the time of day in a timespec64 (WARN if suspended).
658
 */
659
void getnstimeofday64(struct timespec64 *ts)
660
{
661
	WARN_ON(__getnstimeofday64(ts));
662
}
663
EXPORT_SYMBOL(getnstimeofday64);
664

665 666
ktime_t ktime_get(void)
{
667
	struct timekeeper *tk = &tk_core.timekeeper;
668
	unsigned int seq;
669 670
	ktime_t base;
	s64 nsecs;
671 672 673 674

	WARN_ON(timekeeping_suspended);

	do {
675
		seq = read_seqcount_begin(&tk_core.seq);
676 677
		base = tk->tkr_mono.base;
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
678

679
	} while (read_seqcount_retry(&tk_core.seq, seq));
680

681
	return ktime_add_ns(base, nsecs);
682 683 684
}
EXPORT_SYMBOL_GPL(ktime_get);

685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
u32 ktime_get_resolution_ns(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	u32 nsecs;

	WARN_ON(timekeeping_suspended);

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
	} while (read_seqcount_retry(&tk_core.seq, seq));

	return nsecs;
}
EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
static ktime_t *offsets[TK_OFFS_MAX] = {
	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
};

ktime_t ktime_get_with_offset(enum tk_offsets offs)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	ktime_t base, *offset = offsets[offs];
	s64 nsecs;

	WARN_ON(timekeeping_suspended);

	do {
		seq = read_seqcount_begin(&tk_core.seq);
719 720
		base = ktime_add(tk->tkr_mono.base, *offset);
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
721 722 723 724 725 726 727 728

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return ktime_add_ns(base, nsecs);

}
EXPORT_SYMBOL_GPL(ktime_get_with_offset);

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
/**
 * ktime_mono_to_any() - convert mononotic time to any other time
 * @tmono:	time to convert.
 * @offs:	which offset to use
 */
ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
{
	ktime_t *offset = offsets[offs];
	unsigned long seq;
	ktime_t tconv;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		tconv = ktime_add(tmono, *offset);
	} while (read_seqcount_retry(&tk_core.seq, seq));

	return tconv;
}
EXPORT_SYMBOL_GPL(ktime_mono_to_any);

749 750 751 752 753 754 755 756 757 758 759 760
/**
 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
 */
ktime_t ktime_get_raw(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	ktime_t base;
	s64 nsecs;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
P
Peter Zijlstra 已提交
761 762
		base = tk->tkr_raw.base;
		nsecs = timekeeping_get_ns(&tk->tkr_raw);
763 764 765 766 767 768 769

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return ktime_add_ns(base, nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get_raw);

770
/**
771
 * ktime_get_ts64 - get the monotonic clock in timespec64 format
772 773 774 775
 * @ts:		pointer to timespec variable
 *
 * The function calculates the monotonic clock from the realtime
 * clock and the wall_to_monotonic offset and stores the result
776
 * in normalized timespec64 format in the variable pointed to by @ts.
777
 */
778
void ktime_get_ts64(struct timespec64 *ts)
779
{
780
	struct timekeeper *tk = &tk_core.timekeeper;
781
	struct timespec64 tomono;
782
	s64 nsec;
783 784 785 786 787
	unsigned int seq;

	WARN_ON(timekeeping_suspended);

	do {
788
		seq = read_seqcount_begin(&tk_core.seq);
789
		ts->tv_sec = tk->xtime_sec;
790
		nsec = timekeeping_get_ns(&tk->tkr_mono);
791
		tomono = tk->wall_to_monotonic;
792

793
	} while (read_seqcount_retry(&tk_core.seq, seq));
794

795 796 797
	ts->tv_sec += tomono.tv_sec;
	ts->tv_nsec = 0;
	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
798
}
799
EXPORT_SYMBOL_GPL(ktime_get_ts64);
800

801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
/**
 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
 *
 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
 * works on both 32 and 64 bit systems. On 32 bit systems the readout
 * covers ~136 years of uptime which should be enough to prevent
 * premature wrap arounds.
 */
time64_t ktime_get_seconds(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;

	WARN_ON(timekeeping_suspended);
	return tk->ktime_sec;
}
EXPORT_SYMBOL_GPL(ktime_get_seconds);

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
/**
 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
 *
 * Returns the wall clock seconds since 1970. This replaces the
 * get_seconds() interface which is not y2038 safe on 32bit systems.
 *
 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
 * 32bit systems the access must be protected with the sequence
 * counter to provide "atomic" access to the 64bit tk->xtime_sec
 * value.
 */
time64_t ktime_get_real_seconds(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	time64_t seconds;
	unsigned int seq;

	if (IS_ENABLED(CONFIG_64BIT))
		return tk->xtime_sec;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		seconds = tk->xtime_sec;

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return seconds;
}
EXPORT_SYMBOL_GPL(ktime_get_real_seconds);

849 850 851 852 853 854 855 856 857 858 859 860 861
#ifdef CONFIG_NTP_PPS

/**
 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
 * @ts_raw:	pointer to the timespec to be set to raw monotonic time
 * @ts_real:	pointer to the timespec to be set to the time of day
 *
 * This function reads both the time of day and raw monotonic time at the
 * same time atomically and stores the resulting timestamps in timespec
 * format.
 */
void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
{
862
	struct timekeeper *tk = &tk_core.timekeeper;
863 864 865 866 867 868
	unsigned long seq;
	s64 nsecs_raw, nsecs_real;

	WARN_ON_ONCE(timekeeping_suspended);

	do {
869
		seq = read_seqcount_begin(&tk_core.seq);
870

871
		*ts_raw = timespec64_to_timespec(tk->raw_time);
872
		ts_real->tv_sec = tk->xtime_sec;
873
		ts_real->tv_nsec = 0;
874

P
Peter Zijlstra 已提交
875
		nsecs_raw  = timekeeping_get_ns(&tk->tkr_raw);
876
		nsecs_real = timekeeping_get_ns(&tk->tkr_mono);
877

878
	} while (read_seqcount_retry(&tk_core.seq, seq));
879 880 881 882 883 884 885 886

	timespec_add_ns(ts_raw, nsecs_raw);
	timespec_add_ns(ts_real, nsecs_real);
}
EXPORT_SYMBOL(getnstime_raw_and_real);

#endif /* CONFIG_NTP_PPS */

887 888 889 890
/**
 * do_gettimeofday - Returns the time of day in a timeval
 * @tv:		pointer to the timeval to be set
 *
891
 * NOTE: Users should be converted to using getnstimeofday()
892 893 894
 */
void do_gettimeofday(struct timeval *tv)
{
895
	struct timespec64 now;
896

897
	getnstimeofday64(&now);
898 899 900 901
	tv->tv_sec = now.tv_sec;
	tv->tv_usec = now.tv_nsec/1000;
}
EXPORT_SYMBOL(do_gettimeofday);
902

903
/**
904 905
 * do_settimeofday64 - Sets the time of day.
 * @ts:     pointer to the timespec64 variable containing the new time
906 907 908
 *
 * Sets the time of day to the new time and update NTP and notify hrtimers
 */
909
int do_settimeofday64(const struct timespec64 *ts)
910
{
911
	struct timekeeper *tk = &tk_core.timekeeper;
912
	struct timespec64 ts_delta, xt;
913
	unsigned long flags;
914

915
	if (!timespec64_valid_strict(ts))
916 917
		return -EINVAL;

918
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
919
	write_seqcount_begin(&tk_core.seq);
920

921
	timekeeping_forward_now(tk);
922

923
	xt = tk_xtime(tk);
924 925
	ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
	ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
926

927
	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
928

929
	tk_set_xtime(tk, ts);
930

931
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
932

933
	write_seqcount_end(&tk_core.seq);
934
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
935 936 937 938 939 940

	/* signal hrtimers about time change */
	clock_was_set();

	return 0;
}
941
EXPORT_SYMBOL(do_settimeofday64);
942

943 944 945 946 947 948 949 950
/**
 * timekeeping_inject_offset - Adds or subtracts from the current time.
 * @tv:		pointer to the timespec variable containing the offset
 *
 * Adds or subtracts an offset value from the current time.
 */
int timekeeping_inject_offset(struct timespec *ts)
{
951
	struct timekeeper *tk = &tk_core.timekeeper;
952
	unsigned long flags;
953
	struct timespec64 ts64, tmp;
954
	int ret = 0;
955 956 957 958

	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

959 960
	ts64 = timespec_to_timespec64(*ts);

961
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
962
	write_seqcount_begin(&tk_core.seq);
963

964
	timekeeping_forward_now(tk);
965

966
	/* Make sure the proposed value is valid */
967 968
	tmp = timespec64_add(tk_xtime(tk),  ts64);
	if (!timespec64_valid_strict(&tmp)) {
969 970 971
		ret = -EINVAL;
		goto error;
	}
972

973 974
	tk_xtime_add(tk, &ts64);
	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
975

976
error: /* even if we error out, we forwarded the time, so call update */
977
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
978

979
	write_seqcount_end(&tk_core.seq);
980
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
981 982 983 984

	/* signal hrtimers about time change */
	clock_was_set();

985
	return ret;
986 987 988
}
EXPORT_SYMBOL(timekeeping_inject_offset);

989 990 991 992 993 994 995

/**
 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
 *
 */
s32 timekeeping_get_tai_offset(void)
{
996
	struct timekeeper *tk = &tk_core.timekeeper;
997 998 999 1000
	unsigned int seq;
	s32 ret;

	do {
1001
		seq = read_seqcount_begin(&tk_core.seq);
1002
		ret = tk->tai_offset;
1003
	} while (read_seqcount_retry(&tk_core.seq, seq));
1004 1005 1006 1007 1008 1009 1010 1011

	return ret;
}

/**
 * __timekeeping_set_tai_offset - Lock free worker function
 *
 */
1012
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1013 1014
{
	tk->tai_offset = tai_offset;
1015
	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1016 1017 1018 1019 1020 1021 1022 1023
}

/**
 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
 *
 */
void timekeeping_set_tai_offset(s32 tai_offset)
{
1024
	struct timekeeper *tk = &tk_core.timekeeper;
1025 1026
	unsigned long flags;

1027
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1028
	write_seqcount_begin(&tk_core.seq);
1029
	__timekeeping_set_tai_offset(tk, tai_offset);
1030
	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1031
	write_seqcount_end(&tk_core.seq);
1032
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1033
	clock_was_set();
1034 1035
}

1036 1037 1038 1039 1040
/**
 * change_clocksource - Swaps clocksources if a new one is available
 *
 * Accumulates current time interval and initializes new clocksource
 */
1041
static int change_clocksource(void *data)
1042
{
1043
	struct timekeeper *tk = &tk_core.timekeeper;
1044
	struct clocksource *new, *old;
1045
	unsigned long flags;
1046

1047
	new = (struct clocksource *) data;
1048

1049
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1050
	write_seqcount_begin(&tk_core.seq);
1051

1052
	timekeeping_forward_now(tk);
1053 1054 1055 1056 1057 1058
	/*
	 * If the cs is in module, get a module reference. Succeeds
	 * for built-in code (owner == NULL) as well.
	 */
	if (try_module_get(new->owner)) {
		if (!new->enable || new->enable(new) == 0) {
1059
			old = tk->tkr_mono.clock;
1060 1061 1062 1063 1064 1065 1066
			tk_setup_internals(tk, new);
			if (old->disable)
				old->disable(old);
			module_put(old->owner);
		} else {
			module_put(new->owner);
		}
1067
	}
1068
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1069

1070
	write_seqcount_end(&tk_core.seq);
1071
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1072

1073 1074
	return 0;
}
1075

1076 1077 1078 1079 1080 1081 1082
/**
 * timekeeping_notify - Install a new clock source
 * @clock:		pointer to the clock source
 *
 * This function is called from clocksource.c after a new, better clock
 * source has been registered. The caller holds the clocksource_mutex.
 */
1083
int timekeeping_notify(struct clocksource *clock)
1084
{
1085
	struct timekeeper *tk = &tk_core.timekeeper;
1086

1087
	if (tk->tkr_mono.clock == clock)
1088
		return 0;
1089
	stop_machine(change_clocksource, clock, NULL);
1090
	tick_clock_notify();
1091
	return tk->tkr_mono.clock == clock ? 0 : -1;
1092
}
1093

1094
/**
1095 1096
 * getrawmonotonic64 - Returns the raw monotonic time in a timespec
 * @ts:		pointer to the timespec64 to be set
1097 1098 1099
 *
 * Returns the raw monotonic time (completely un-modified by ntp)
 */
1100
void getrawmonotonic64(struct timespec64 *ts)
1101
{
1102
	struct timekeeper *tk = &tk_core.timekeeper;
1103
	struct timespec64 ts64;
1104 1105 1106 1107
	unsigned long seq;
	s64 nsecs;

	do {
1108
		seq = read_seqcount_begin(&tk_core.seq);
P
Peter Zijlstra 已提交
1109
		nsecs = timekeeping_get_ns(&tk->tkr_raw);
1110
		ts64 = tk->raw_time;
1111

1112
	} while (read_seqcount_retry(&tk_core.seq, seq));
1113

1114
	timespec64_add_ns(&ts64, nsecs);
1115
	*ts = ts64;
1116
}
1117 1118
EXPORT_SYMBOL(getrawmonotonic64);

1119

1120
/**
1121
 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1122
 */
1123
int timekeeping_valid_for_hres(void)
1124
{
1125
	struct timekeeper *tk = &tk_core.timekeeper;
1126 1127 1128 1129
	unsigned long seq;
	int ret;

	do {
1130
		seq = read_seqcount_begin(&tk_core.seq);
1131

1132
		ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1133

1134
	} while (read_seqcount_retry(&tk_core.seq, seq));
1135 1136 1137 1138

	return ret;
}

1139 1140 1141 1142 1143
/**
 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
 */
u64 timekeeping_max_deferment(void)
{
1144
	struct timekeeper *tk = &tk_core.timekeeper;
J
John Stultz 已提交
1145 1146
	unsigned long seq;
	u64 ret;
1147

J
John Stultz 已提交
1148
	do {
1149
		seq = read_seqcount_begin(&tk_core.seq);
J
John Stultz 已提交
1150

1151
		ret = tk->tkr_mono.clock->max_idle_ns;
J
John Stultz 已提交
1152

1153
	} while (read_seqcount_retry(&tk_core.seq, seq));
J
John Stultz 已提交
1154 1155

	return ret;
1156 1157
}

1158
/**
1159
 * read_persistent_clock -  Return time from the persistent clock.
1160 1161
 *
 * Weak dummy function for arches that do not yet support it.
1162 1163
 * Reads the time from the battery backed persistent clock.
 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
1164 1165 1166
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
1167
void __weak read_persistent_clock(struct timespec *ts)
1168
{
1169 1170
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
1171 1172
}

1173 1174 1175 1176 1177 1178 1179 1180
void __weak read_persistent_clock64(struct timespec64 *ts64)
{
	struct timespec ts;

	read_persistent_clock(&ts);
	*ts64 = timespec_to_timespec64(ts);
}

1181
/**
X
Xunlei Pang 已提交
1182
 * read_boot_clock64 -  Return time of the system start.
1183 1184 1185
 *
 * Weak dummy function for arches that do not yet support it.
 * Function to read the exact time the system has been started.
X
Xunlei Pang 已提交
1186
 * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
1187 1188 1189
 *
 *  XXX - Do be sure to remove it once all arches implement it.
 */
X
Xunlei Pang 已提交
1190
void __weak read_boot_clock64(struct timespec64 *ts)
1191 1192 1193 1194 1195
{
	ts->tv_sec = 0;
	ts->tv_nsec = 0;
}

1196 1197 1198 1199 1200 1201
/* Flag for if timekeeping_resume() has injected sleeptime */
static bool sleeptime_injected;

/* Flag for if there is a persistent clock on this platform */
static bool persistent_clock_exists;

1202 1203 1204 1205 1206
/*
 * timekeeping_init - Initializes the clocksource and common timekeeping values
 */
void __init timekeeping_init(void)
{
1207
	struct timekeeper *tk = &tk_core.timekeeper;
1208
	struct clocksource *clock;
1209
	unsigned long flags;
1210
	struct timespec64 now, boot, tmp;
1211

1212
	read_persistent_clock64(&now);
1213
	if (!timespec64_valid_strict(&now)) {
1214 1215 1216 1217
		pr_warn("WARNING: Persistent clock returned invalid value!\n"
			"         Check your CMOS/BIOS settings.\n");
		now.tv_sec = 0;
		now.tv_nsec = 0;
1218
	} else if (now.tv_sec || now.tv_nsec)
1219
		persistent_clock_exists = true;
1220

1221
	read_boot_clock64(&boot);
1222
	if (!timespec64_valid_strict(&boot)) {
1223 1224 1225 1226 1227
		pr_warn("WARNING: Boot clock returned invalid value!\n"
			"         Check your CMOS/BIOS settings.\n");
		boot.tv_sec = 0;
		boot.tv_nsec = 0;
	}
1228

1229
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1230
	write_seqcount_begin(&tk_core.seq);
1231 1232
	ntp_init();

1233
	clock = clocksource_default_clock();
1234 1235
	if (clock->enable)
		clock->enable(clock);
1236
	tk_setup_internals(tk, clock);
1237

1238 1239 1240
	tk_set_xtime(tk, &now);
	tk->raw_time.tv_sec = 0;
	tk->raw_time.tv_nsec = 0;
1241
	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1242
		boot = tk_xtime(tk);
1243

1244
	set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1245
	tk_set_wall_to_mono(tk, tmp);
1246

1247
	timekeeping_update(tk, TK_MIRROR);
1248

1249
	write_seqcount_end(&tk_core.seq);
1250
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1251 1252
}

1253
/* time in seconds when suspend began for persistent clock */
1254
static struct timespec64 timekeeping_suspend_time;
1255

1256 1257 1258 1259 1260 1261 1262
/**
 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
 * @delta: pointer to a timespec delta value
 *
 * Takes a timespec offset measuring a suspend interval and properly
 * adds the sleep offset to the timekeeping variables.
 */
1263
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1264
					   struct timespec64 *delta)
1265
{
1266
	if (!timespec64_valid_strict(delta)) {
1267 1268 1269
		printk_deferred(KERN_WARNING
				"__timekeeping_inject_sleeptime: Invalid "
				"sleep delta value!\n");
1270 1271
		return;
	}
1272
	tk_xtime_add(tk, delta);
1273
	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1274
	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1275
	tk_debug_account_sleep_time(delta);
1276 1277
}

1278
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
/**
 * We have three kinds of time sources to use for sleep time
 * injection, the preference order is:
 * 1) non-stop clocksource
 * 2) persistent clock (ie: RTC accessible when irqs are off)
 * 3) RTC
 *
 * 1) and 2) are used by timekeeping, 3) by RTC subsystem.
 * If system has neither 1) nor 2), 3) will be used finally.
 *
 *
 * If timekeeping has injected sleeptime via either 1) or 2),
 * 3) becomes needless, so in this case we don't need to call
 * rtc_resume(), and this is what timekeeping_rtc_skipresume()
 * means.
 */
bool timekeeping_rtc_skipresume(void)
{
	return sleeptime_injected;
}

/**
 * 1) can be determined whether to use or not only when doing
 * timekeeping_resume() which is invoked after rtc_suspend(),
 * so we can't skip rtc_suspend() surely if system has 1).
 *
 * But if system has 2), 2) will definitely be used, so in this
 * case we don't need to call rtc_suspend(), and this is what
 * timekeeping_rtc_skipsuspend() means.
 */
bool timekeeping_rtc_skipsuspend(void)
{
	return persistent_clock_exists;
}

1314
/**
1315 1316
 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
 * @delta: pointer to a timespec64 delta value
1317
 *
1318
 * This hook is for architectures that cannot support read_persistent_clock64
1319
 * because their RTC/persistent clock is only accessible when irqs are enabled.
1320
 * and also don't have an effective nonstop clocksource.
1321 1322 1323 1324
 *
 * This function should only be called by rtc_resume(), and allows
 * a suspend offset to be injected into the timekeeping values.
 */
1325
void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1326
{
1327
	struct timekeeper *tk = &tk_core.timekeeper;
1328
	unsigned long flags;
1329

1330
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1331
	write_seqcount_begin(&tk_core.seq);
J
John Stultz 已提交
1332

1333
	timekeeping_forward_now(tk);
1334

1335
	__timekeeping_inject_sleeptime(tk, delta);
1336

1337
	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1338

1339
	write_seqcount_end(&tk_core.seq);
1340
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1341 1342 1343 1344

	/* signal hrtimers about time change */
	clock_was_set();
}
1345
#endif
1346

1347 1348 1349
/**
 * timekeeping_resume - Resumes the generic timekeeping subsystem.
 */
1350
void timekeeping_resume(void)
1351
{
1352
	struct timekeeper *tk = &tk_core.timekeeper;
1353
	struct clocksource *clock = tk->tkr_mono.clock;
1354
	unsigned long flags;
1355
	struct timespec64 ts_new, ts_delta;
1356
	cycle_t cycle_now, cycle_delta;
1357

1358
	sleeptime_injected = false;
1359
	read_persistent_clock64(&ts_new);
1360

1361
	clockevents_resume();
1362 1363
	clocksource_resume();

1364
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1365
	write_seqcount_begin(&tk_core.seq);
1366

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
	/*
	 * After system resumes, we need to calculate the suspended time and
	 * compensate it for the OS time. There are 3 sources that could be
	 * used: Nonstop clocksource during suspend, persistent clock and rtc
	 * device.
	 *
	 * One specific platform may have 1 or 2 or all of them, and the
	 * preference will be:
	 *	suspend-nonstop clocksource -> persistent clock -> rtc
	 * The less preferred source will only be tried if there is no better
	 * usable source. The rtc part is handled separately in rtc core code.
	 */
1379
	cycle_now = tk->tkr_mono.read(clock);
1380
	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1381
		cycle_now > tk->tkr_mono.cycle_last) {
1382 1383 1384 1385 1386
		u64 num, max = ULLONG_MAX;
		u32 mult = clock->mult;
		u32 shift = clock->shift;
		s64 nsec = 0;

1387 1388
		cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
						tk->tkr_mono.mask);
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402

		/*
		 * "cycle_delta * mutl" may cause 64 bits overflow, if the
		 * suspended time is too long. In that case we need do the
		 * 64 bits math carefully
		 */
		do_div(max, mult);
		if (cycle_delta > max) {
			num = div64_u64(cycle_delta, max);
			nsec = (((u64) max * mult) >> shift) * num;
			cycle_delta -= num * max;
		}
		nsec += ((u64) cycle_delta * mult) >> shift;

1403
		ts_delta = ns_to_timespec64(nsec);
1404
		sleeptime_injected = true;
1405 1406
	} else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
		ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1407
		sleeptime_injected = true;
1408
	}
1409

1410
	if (sleeptime_injected)
1411 1412 1413
		__timekeeping_inject_sleeptime(tk, &ts_delta);

	/* Re-base the last cycle value */
1414
	tk->tkr_mono.cycle_last = cycle_now;
P
Peter Zijlstra 已提交
1415 1416
	tk->tkr_raw.cycle_last  = cycle_now;

1417
	tk->ntp_error = 0;
1418
	timekeeping_suspended = 0;
1419
	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1420
	write_seqcount_end(&tk_core.seq);
1421
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1422 1423 1424

	touch_softlockup_watchdog();

1425
	tick_resume();
1426
	hrtimers_resume();
1427 1428
}

1429
int timekeeping_suspend(void)
1430
{
1431
	struct timekeeper *tk = &tk_core.timekeeper;
1432
	unsigned long flags;
1433 1434
	struct timespec64		delta, delta_delta;
	static struct timespec64	old_delta;
1435

1436
	read_persistent_clock64(&timekeeping_suspend_time);
1437

1438 1439 1440 1441 1442 1443
	/*
	 * On some systems the persistent_clock can not be detected at
	 * timekeeping_init by its return value, so if we see a valid
	 * value returned, update the persistent_clock_exists flag.
	 */
	if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1444
		persistent_clock_exists = true;
1445

1446
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1447
	write_seqcount_begin(&tk_core.seq);
1448
	timekeeping_forward_now(tk);
1449
	timekeeping_suspended = 1;
1450

1451
	if (persistent_clock_exists) {
1452
		/*
1453 1454 1455 1456
		 * To avoid drift caused by repeated suspend/resumes,
		 * which each can add ~1 second drift error,
		 * try to compensate so the difference in system time
		 * and persistent_clock time stays close to constant.
1457
		 */
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
		delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
		delta_delta = timespec64_sub(delta, old_delta);
		if (abs(delta_delta.tv_sec) >= 2) {
			/*
			 * if delta_delta is too large, assume time correction
			 * has occurred and set old_delta to the current delta.
			 */
			old_delta = delta;
		} else {
			/* Otherwise try to adjust old_system to compensate */
			timekeeping_suspend_time =
				timespec64_add(timekeeping_suspend_time, delta_delta);
		}
1471
	}
1472 1473

	timekeeping_update(tk, TK_MIRROR);
1474
	halt_fast_timekeeper(tk);
1475
	write_seqcount_end(&tk_core.seq);
1476
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1477

1478
	tick_suspend();
M
Magnus Damm 已提交
1479
	clocksource_suspend();
1480
	clockevents_suspend();
1481 1482 1483 1484 1485

	return 0;
}

/* sysfs resume/suspend bits for timekeeping */
1486
static struct syscore_ops timekeeping_syscore_ops = {
1487 1488 1489 1490
	.resume		= timekeeping_resume,
	.suspend	= timekeeping_suspend,
};

1491
static int __init timekeeping_init_ops(void)
1492
{
1493 1494
	register_syscore_ops(&timekeeping_syscore_ops);
	return 0;
1495
}
1496
device_initcall(timekeeping_init_ops);
1497 1498

/*
1499
 * Apply a multiplier adjustment to the timekeeper
1500
 */
1501 1502 1503 1504
static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
							 s64 offset,
							 bool negative,
							 int adj_scale)
1505
{
1506 1507
	s64 interval = tk->cycle_interval;
	s32 mult_adj = 1;
1508

1509 1510 1511 1512
	if (negative) {
		mult_adj = -mult_adj;
		interval = -interval;
		offset  = -offset;
1513
	}
1514 1515 1516
	mult_adj <<= adj_scale;
	interval <<= adj_scale;
	offset <<= adj_scale;
1517

1518 1519 1520
	/*
	 * So the following can be confusing.
	 *
1521
	 * To keep things simple, lets assume mult_adj == 1 for now.
1522
	 *
1523
	 * When mult_adj != 1, remember that the interval and offset values
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
	 * have been appropriately scaled so the math is the same.
	 *
	 * The basic idea here is that we're increasing the multiplier
	 * by one, this causes the xtime_interval to be incremented by
	 * one cycle_interval. This is because:
	 *	xtime_interval = cycle_interval * mult
	 * So if mult is being incremented by one:
	 *	xtime_interval = cycle_interval * (mult + 1)
	 * Its the same as:
	 *	xtime_interval = (cycle_interval * mult) + cycle_interval
	 * Which can be shortened to:
	 *	xtime_interval += cycle_interval
	 *
	 * So offset stores the non-accumulated cycles. Thus the current
	 * time (in shifted nanoseconds) is:
	 *	now = (offset * adj) + xtime_nsec
	 * Now, even though we're adjusting the clock frequency, we have
	 * to keep time consistent. In other words, we can't jump back
	 * in time, and we also want to avoid jumping forward in time.
	 *
	 * So given the same offset value, we need the time to be the same
	 * both before and after the freq adjustment.
	 *	now = (offset * adj_1) + xtime_nsec_1
	 *	now = (offset * adj_2) + xtime_nsec_2
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_2) + xtime_nsec_2
	 * And we know:
	 *	adj_2 = adj_1 + 1
	 * So:
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * (adj_1+1)) + xtime_nsec_2
	 *	(offset * adj_1) + xtime_nsec_1 =
	 *		(offset * adj_1) + offset + xtime_nsec_2
	 * Canceling the sides:
	 *	xtime_nsec_1 = offset + xtime_nsec_2
	 * Which gives us:
	 *	xtime_nsec_2 = xtime_nsec_1 - offset
	 * Which simplfies to:
	 *	xtime_nsec -= offset
	 *
	 * XXX - TODO: Doc ntp_error calculation.
	 */
1567
	if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1568 1569 1570 1571 1572
		/* NTP adjustment caused clocksource mult overflow */
		WARN_ON_ONCE(1);
		return;
	}

1573
	tk->tkr_mono.mult += mult_adj;
1574
	tk->xtime_interval += interval;
1575
	tk->tkr_mono.xtime_nsec -= offset;
1576
	tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
}

/*
 * Calculate the multiplier adjustment needed to match the frequency
 * specified by NTP
 */
static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
							s64 offset)
{
	s64 interval = tk->cycle_interval;
	s64 xinterval = tk->xtime_interval;
	s64 tick_error;
	bool negative;
	u32 adj;

	/* Remove any current error adj from freq calculation */
	if (tk->ntp_err_mult)
		xinterval -= tk->cycle_interval;

1596 1597
	tk->ntp_tick = ntp_tick_length();

1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
	/* Calculate current error per tick */
	tick_error = ntp_tick_length() >> tk->ntp_error_shift;
	tick_error -= (xinterval + tk->xtime_remainder);

	/* Don't worry about correcting it if its small */
	if (likely((tick_error >= 0) && (tick_error <= interval)))
		return;

	/* preserve the direction of correction */
	negative = (tick_error < 0);

	/* Sort out the magnitude of the correction */
	tick_error = abs(tick_error);
	for (adj = 0; tick_error > interval; adj++)
		tick_error >>= 1;

	/* scale the corrections */
	timekeeping_apply_adjustment(tk, offset, negative, adj);
}

/*
 * Adjust the timekeeper's multiplier to the correct frequency
 * and also to reduce the accumulated error value.
 */
static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
{
	/* Correct for the current frequency error */
	timekeeping_freqadjust(tk, offset);

	/* Next make a small adjustment to fix any cumulative error */
	if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
		tk->ntp_err_mult = 1;
		timekeeping_apply_adjustment(tk, offset, 0, 0);
	} else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
		/* Undo any existing error adjustment */
		timekeeping_apply_adjustment(tk, offset, 1, 0);
		tk->ntp_err_mult = 0;
	}

1637 1638 1639
	if (unlikely(tk->tkr_mono.clock->maxadj &&
		(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
			> tk->tkr_mono.clock->maxadj))) {
1640 1641
		printk_once(KERN_WARNING
			"Adjusting %s more than 11%% (%ld vs %ld)\n",
1642 1643
			tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
			(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1644
	}
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659

	/*
	 * It may be possible that when we entered this function, xtime_nsec
	 * was very small.  Further, if we're slightly speeding the clocksource
	 * in the code above, its possible the required corrective factor to
	 * xtime_nsec could cause it to underflow.
	 *
	 * Now, since we already accumulated the second, cannot simply roll
	 * the accumulated second back, since the NTP subsystem has been
	 * notified via second_overflow. So instead we push xtime_nsec forward
	 * by the amount we underflowed, and add that amount into the error.
	 *
	 * We'll correct this error next time through this function, when
	 * xtime_nsec is not as small.
	 */
1660 1661 1662
	if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
		s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
		tk->tkr_mono.xtime_nsec = 0;
1663
		tk->ntp_error += neg << tk->ntp_error_shift;
1664
	}
1665 1666
}

1667 1668 1669 1670 1671 1672 1673 1674
/**
 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
 *
 * Helper function that accumulates a the nsecs greater then a second
 * from the xtime_nsec field to the xtime_secs field.
 * It also calls into the NTP code to handle leapsecond processing.
 *
 */
1675
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1676
{
1677
	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1678
	unsigned int clock_set = 0;
1679

1680
	while (tk->tkr_mono.xtime_nsec >= nsecps) {
1681 1682
		int leap;

1683
		tk->tkr_mono.xtime_nsec -= nsecps;
1684 1685 1686 1687
		tk->xtime_sec++;

		/* Figure out if its a leap sec and apply if needed */
		leap = second_overflow(tk->xtime_sec);
1688
		if (unlikely(leap)) {
1689
			struct timespec64 ts;
1690 1691

			tk->xtime_sec += leap;
1692

1693 1694 1695
			ts.tv_sec = leap;
			ts.tv_nsec = 0;
			tk_set_wall_to_mono(tk,
1696
				timespec64_sub(tk->wall_to_monotonic, ts));
1697

1698 1699
			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);

1700
			clock_set = TK_CLOCK_WAS_SET;
1701
		}
1702
	}
1703
	return clock_set;
1704 1705
}

1706 1707 1708 1709 1710 1711 1712 1713 1714
/**
 * logarithmic_accumulation - shifted accumulation of cycles
 *
 * This functions accumulates a shifted interval of cycles into
 * into a shifted interval nanoseconds. Allows for O(log) accumulation
 * loop.
 *
 * Returns the unconsumed cycles.
 */
1715
static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1716 1717
						u32 shift,
						unsigned int *clock_set)
1718
{
T
Thomas Gleixner 已提交
1719
	cycle_t interval = tk->cycle_interval << shift;
1720
	u64 raw_nsecs;
1721

1722
	/* If the offset is smaller then a shifted interval, do nothing */
T
Thomas Gleixner 已提交
1723
	if (offset < interval)
1724 1725 1726
		return offset;

	/* Accumulate one shifted interval */
T
Thomas Gleixner 已提交
1727
	offset -= interval;
1728
	tk->tkr_mono.cycle_last += interval;
P
Peter Zijlstra 已提交
1729
	tk->tkr_raw.cycle_last  += interval;
1730

1731
	tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
1732
	*clock_set |= accumulate_nsecs_to_secs(tk);
1733

1734
	/* Accumulate raw time */
1735
	raw_nsecs = (u64)tk->raw_interval << shift;
1736
	raw_nsecs += tk->raw_time.tv_nsec;
1737 1738 1739
	if (raw_nsecs >= NSEC_PER_SEC) {
		u64 raw_secs = raw_nsecs;
		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1740
		tk->raw_time.tv_sec += raw_secs;
1741
	}
1742
	tk->raw_time.tv_nsec = raw_nsecs;
1743 1744

	/* Accumulate error between NTP and clock interval */
1745
	tk->ntp_error += tk->ntp_tick << shift;
1746 1747
	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
						(tk->ntp_error_shift + shift);
1748 1749 1750 1751

	return offset;
}

1752 1753 1754 1755
/**
 * update_wall_time - Uses the current clocksource to increment the wall time
 *
 */
1756
void update_wall_time(void)
1757
{
1758
	struct timekeeper *real_tk = &tk_core.timekeeper;
1759
	struct timekeeper *tk = &shadow_timekeeper;
1760
	cycle_t offset;
1761
	int shift = 0, maxshift;
1762
	unsigned int clock_set = 0;
J
John Stultz 已提交
1763 1764
	unsigned long flags;

1765
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1766 1767 1768

	/* Make sure we're fully resumed: */
	if (unlikely(timekeeping_suspended))
J
John Stultz 已提交
1769
		goto out;
1770

J
John Stultz 已提交
1771
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1772
	offset = real_tk->cycle_interval;
J
John Stultz 已提交
1773
#else
1774 1775
	offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
1776 1777
#endif

1778
	/* Check if there's really nothing to do */
1779
	if (offset < real_tk->cycle_interval)
1780 1781
		goto out;

1782 1783 1784
	/* Do some additional sanity checking */
	timekeeping_check_update(real_tk, offset);

1785 1786 1787 1788
	/*
	 * With NO_HZ we may have to accumulate many cycle_intervals
	 * (think "ticks") worth of time at once. To do this efficiently,
	 * we calculate the largest doubling multiple of cycle_intervals
1789
	 * that is smaller than the offset.  We then accumulate that
1790 1791
	 * chunk in one go, and then try to consume the next smaller
	 * doubled multiple.
1792
	 */
1793
	shift = ilog2(offset) - ilog2(tk->cycle_interval);
1794
	shift = max(0, shift);
1795
	/* Bound shift to one less than what overflows tick_length */
1796
	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1797
	shift = min(shift, maxshift);
1798
	while (offset >= tk->cycle_interval) {
1799 1800
		offset = logarithmic_accumulation(tk, offset, shift,
							&clock_set);
1801
		if (offset < tk->cycle_interval<<shift)
1802
			shift--;
1803 1804 1805
	}

	/* correct the clock when NTP error is too big */
1806
	timekeeping_adjust(tk, offset);
1807

J
John Stultz 已提交
1808
	/*
1809 1810 1811 1812
	 * XXX This can be killed once everyone converts
	 * to the new update_vsyscall.
	 */
	old_vsyscall_fixup(tk);
1813

J
John Stultz 已提交
1814 1815
	/*
	 * Finally, make sure that after the rounding
1816
	 * xtime_nsec isn't larger than NSEC_PER_SEC
J
John Stultz 已提交
1817
	 */
1818
	clock_set |= accumulate_nsecs_to_secs(tk);
L
Linus Torvalds 已提交
1819

1820
	write_seqcount_begin(&tk_core.seq);
1821 1822 1823 1824 1825 1826 1827
	/*
	 * Update the real timekeeper.
	 *
	 * We could avoid this memcpy by switching pointers, but that
	 * requires changes to all other timekeeper usage sites as
	 * well, i.e. move the timekeeper pointer getter into the
	 * spinlocked/seqcount protected sections. And we trade this
1828
	 * memcpy under the tk_core.seq against one before we start
1829 1830
	 * updating.
	 */
1831
	timekeeping_update(tk, clock_set);
1832
	memcpy(real_tk, tk, sizeof(*tk));
1833
	/* The memcpy must come last. Do not put anything here! */
1834
	write_seqcount_end(&tk_core.seq);
1835
out:
1836
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1837
	if (clock_set)
1838 1839
		/* Have to call _delayed version, since in irq context*/
		clock_was_set_delayed();
1840
}
T
Tomas Janousek 已提交
1841 1842

/**
1843 1844
 * getboottime64 - Return the real time of system boot.
 * @ts:		pointer to the timespec64 to be set
T
Tomas Janousek 已提交
1845
 *
1846
 * Returns the wall-time of boot in a timespec64.
T
Tomas Janousek 已提交
1847 1848 1849 1850 1851 1852
 *
 * This is based on the wall_to_monotonic offset and the total suspend
 * time. Calls to settimeofday will affect the value returned (which
 * basically means that however wrong your real time clock is at boot time,
 * you get the right time here).
 */
1853
void getboottime64(struct timespec64 *ts)
T
Tomas Janousek 已提交
1854
{
1855
	struct timekeeper *tk = &tk_core.timekeeper;
1856 1857
	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);

1858
	*ts = ktime_to_timespec64(t);
T
Tomas Janousek 已提交
1859
}
1860
EXPORT_SYMBOL_GPL(getboottime64);
T
Tomas Janousek 已提交
1861

1862 1863
unsigned long get_seconds(void)
{
1864
	struct timekeeper *tk = &tk_core.timekeeper;
1865 1866

	return tk->xtime_sec;
1867 1868 1869
}
EXPORT_SYMBOL(get_seconds);

1870 1871
struct timespec __current_kernel_time(void)
{
1872
	struct timekeeper *tk = &tk_core.timekeeper;
1873

1874
	return timespec64_to_timespec(tk_xtime(tk));
1875
}
1876

1877 1878
struct timespec current_kernel_time(void)
{
1879
	struct timekeeper *tk = &tk_core.timekeeper;
1880
	struct timespec64 now;
1881 1882 1883
	unsigned long seq;

	do {
1884
		seq = read_seqcount_begin(&tk_core.seq);
L
Linus Torvalds 已提交
1885

1886
		now = tk_xtime(tk);
1887
	} while (read_seqcount_retry(&tk_core.seq, seq));
1888

1889
	return timespec64_to_timespec(now);
1890 1891
}
EXPORT_SYMBOL(current_kernel_time);
1892

1893
struct timespec64 get_monotonic_coarse64(void)
1894
{
1895
	struct timekeeper *tk = &tk_core.timekeeper;
1896
	struct timespec64 now, mono;
1897 1898 1899
	unsigned long seq;

	do {
1900
		seq = read_seqcount_begin(&tk_core.seq);
L
Linus Torvalds 已提交
1901

1902 1903
		now = tk_xtime(tk);
		mono = tk->wall_to_monotonic;
1904
	} while (read_seqcount_retry(&tk_core.seq, seq));
1905

1906
	set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
1907
				now.tv_nsec + mono.tv_nsec);
1908

1909
	return now;
1910
}
1911 1912

/*
1913
 * Must hold jiffies_lock
1914 1915 1916 1917 1918 1919
 */
void do_timer(unsigned long ticks)
{
	jiffies_64 += ticks;
	calc_global_load(ticks);
}
1920

1921
/**
1922
 * ktime_get_update_offsets_now - hrtimer helper
1923
 * @cwsseq:	pointer to check and store the clock was set sequence number
1924 1925
 * @offs_real:	pointer to storage for monotonic -> realtime offset
 * @offs_boot:	pointer to storage for monotonic -> boottime offset
1926
 * @offs_tai:	pointer to storage for monotonic -> clock tai offset
1927
 *
1928 1929 1930 1931
 * Returns current monotonic time and updates the offsets if the
 * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
 * different.
 *
1932
 * Called from hrtimer_interrupt() or retrigger_next_event()
1933
 */
1934 1935
ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
				     ktime_t *offs_boot, ktime_t *offs_tai)
1936
{
1937
	struct timekeeper *tk = &tk_core.timekeeper;
1938
	unsigned int seq;
1939 1940
	ktime_t base;
	u64 nsecs;
1941 1942

	do {
1943
		seq = read_seqcount_begin(&tk_core.seq);
1944

1945 1946
		base = tk->tkr_mono.base;
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
1947 1948
		base = ktime_add_ns(base, nsecs);

1949 1950 1951 1952 1953 1954
		if (*cwsseq != tk->clock_was_set_seq) {
			*cwsseq = tk->clock_was_set_seq;
			*offs_real = tk->offs_real;
			*offs_boot = tk->offs_boot;
			*offs_tai = tk->offs_tai;
		}
1955 1956 1957 1958 1959

		/* Handle leapsecond insertion adjustments */
		if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64))
			*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));

1960
	} while (read_seqcount_retry(&tk_core.seq, seq));
1961

1962
	return base;
1963 1964
}

1965 1966 1967 1968 1969
/**
 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
 */
int do_adjtimex(struct timex *txc)
{
1970
	struct timekeeper *tk = &tk_core.timekeeper;
1971
	unsigned long flags;
1972
	struct timespec64 ts;
1973
	s32 orig_tai, tai;
1974 1975 1976 1977 1978 1979 1980
	int ret;

	/* Validate the data before disabling interrupts */
	ret = ntp_validate_timex(txc);
	if (ret)
		return ret;

1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
	if (txc->modes & ADJ_SETOFFSET) {
		struct timespec delta;
		delta.tv_sec  = txc->time.tv_sec;
		delta.tv_nsec = txc->time.tv_usec;
		if (!(txc->modes & ADJ_NANO))
			delta.tv_nsec *= 1000;
		ret = timekeeping_inject_offset(&delta);
		if (ret)
			return ret;
	}

1992
	getnstimeofday64(&ts);
1993

1994
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
1995
	write_seqcount_begin(&tk_core.seq);
1996

1997
	orig_tai = tai = tk->tai_offset;
1998
	ret = __do_adjtimex(txc, &ts, &tai);
1999

2000 2001
	if (tai != orig_tai) {
		__timekeeping_set_tai_offset(tk, tai);
2002
		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2003
	}
2004 2005
	tk_update_leap_state(tk);

2006
	write_seqcount_end(&tk_core.seq);
2007 2008
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);

2009 2010 2011
	if (tai != orig_tai)
		clock_was_set();

2012 2013
	ntp_notify_cmos_timer();

2014 2015
	return ret;
}
2016 2017 2018 2019 2020 2021 2022

#ifdef CONFIG_NTP_PPS
/**
 * hardpps() - Accessor function to NTP __hardpps function
 */
void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
{
2023 2024 2025
	unsigned long flags;

	raw_spin_lock_irqsave(&timekeeper_lock, flags);
2026
	write_seqcount_begin(&tk_core.seq);
2027

2028
	__hardpps(phase_ts, raw_ts);
2029

2030
	write_seqcount_end(&tk_core.seq);
2031
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2032 2033 2034 2035
}
EXPORT_SYMBOL(hardpps);
#endif

T
Torben Hohn 已提交
2036 2037 2038 2039 2040 2041 2042 2043
/**
 * xtime_update() - advances the timekeeping infrastructure
 * @ticks:	number of ticks, that have elapsed since the last call.
 *
 * Must be called with interrupts disabled.
 */
void xtime_update(unsigned long ticks)
{
2044
	write_seqlock(&jiffies_lock);
T
Torben Hohn 已提交
2045
	do_timer(ticks);
2046
	write_sequnlock(&jiffies_lock);
2047
	update_wall_time();
T
Torben Hohn 已提交
2048
}