timekeeper_internal.h 4.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * You SHOULD NOT be including this unless you're vsyscall
 * handling code or timekeeping internal code!
 */

#ifndef _LINUX_TIMEKEEPER_INTERNAL_H
#define _LINUX_TIMEKEEPER_INTERNAL_H

#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>

13 14 15 16 17 18
/**
 * struct tk_read_base - base structure for timekeeping readout
 * @clock:	Current clocksource used for timekeeping.
 * @read:	Read function of @clock
 * @mask:	Bitmask for two's complement subtraction of non 64bit clocks
 * @cycle_last: @clock cycle value at last update
19
 * @mult:	(NTP adjusted) multiplier for scaled math conversion
20 21
 * @shift:	Shift value for scaled math conversion
 * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
22
 * @base:	ktime_t (nanoseconds) base time for readout
23
 *
24 25
 * This struct has size 56 byte on 64 bit. Together with a seqcount it
 * occupies a single 64byte cache line.
26
 *
27
 * The struct is separate from struct timekeeper as it is also used
28
 * for a fast NMI safe accessors.
29
 */
30
struct tk_read_base {
31
	struct clocksource	*clock;
32 33
	cycle_t			(*read)(struct clocksource *cs);
	cycle_t			mask;
34
	cycle_t			cycle_last;
35 36
	u32			mult;
	u32			shift;
37
	u64			xtime_nsec;
38
	ktime_t			base;
39
};
40

41 42
/**
 * struct timekeeper - Structure holding internal timekeeping values.
43
 * @tkr_mono:		The readout base structure for CLOCK_MONOTONIC
P
Peter Zijlstra 已提交
44
 * @tkr_raw:		The readout base structure for CLOCK_MONOTONIC_RAW
45
 * @xtime_sec:		Current CLOCK_REALTIME time in seconds
46
 * @ktime_sec:		Current CLOCK_MONOTONIC time in seconds
47 48 49 50 51
 * @wall_to_monotonic:	CLOCK_REALTIME to CLOCK_MONOTONIC offset
 * @offs_real:		Offset clock monotonic -> clock realtime
 * @offs_boot:		Offset clock monotonic -> clock boottime
 * @offs_tai:		Offset clock monotonic -> clock tai
 * @tai_offset:		The current UTC to TAI offset in seconds
52
 * @clock_was_set_seq:	The sequence number of clock was set events
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
 * @raw_time:		Monotonic raw base time in timespec64 format
 * @cycle_interval:	Number of clock cycles in one NTP interval
 * @xtime_interval:	Number of clock shifted nano seconds in one NTP
 *			interval.
 * @xtime_remainder:	Shifted nano seconds left over when rounding
 *			@cycle_interval
 * @raw_interval:	Raw nano seconds accumulated per NTP interval.
 * @ntp_error:		Difference between accumulated time and NTP time in ntp
 *			shifted nano seconds.
 * @ntp_error_shift:	Shift conversion between clock shifted nano seconds and
 *			ntp shifted nano seconds.
 *
 * Note: For timespec(64) based interfaces wall_to_monotonic is what
 * we need to add to xtime (or xtime corrected for sub jiffie times)
 * to get to monotonic time.  Monotonic is pegged at zero at system
 * boot time, so wall_to_monotonic will be negative, however, we will
 * ALWAYS keep the tv_nsec part positive so we can use the usual
 * normalization.
 *
 * wall_to_monotonic is moved after resume from suspend for the
 * monotonic time not to jump. We need to add total_sleep_time to
 * wall_to_monotonic to get the real boot based time offset.
 *
 * wall_to_monotonic is no longer the boot time, getboottime must be
 * used instead.
 */
struct timekeeper {
80
	struct tk_read_base	tkr_mono;
P
Peter Zijlstra 已提交
81
	struct tk_read_base	tkr_raw;
82
	u64			xtime_sec;
83
	unsigned long		ktime_sec;
84 85 86 87 88
	struct timespec64	wall_to_monotonic;
	ktime_t			offs_real;
	ktime_t			offs_boot;
	ktime_t			offs_tai;
	s32			tai_offset;
89
	unsigned int		clock_was_set_seq;
90 91
	struct timespec64	raw_time;

92
	/* The following members are for timekeeping internal use */
93 94 95 96
	cycle_t			cycle_interval;
	u64			xtime_interval;
	s64			xtime_remainder;
	u32			raw_interval;
97 98 99 100 101 102 103 104 105
	/* The ntp_tick_length() value currently being used.
	 * This cached copy ensures we consistently apply the tick
	 * length for an entire tick, as ntp_tick_length may change
	 * mid-tick, and we don't want to apply that new value to
	 * the tick in progress.
	 */
	u64			ntp_tick;
	/* Difference between accumulated time and NTP time in ntp
	 * shifted nano seconds. */
106
	s64			ntp_error;
107
	u32			ntp_error_shift;
108
	u32			ntp_err_mult;
109
};
110

111 112 113 114
#ifdef CONFIG_GENERIC_TIME_VSYSCALL

extern void update_vsyscall(struct timekeeper *tk);
extern void update_vsyscall_tz(void);
115

116 117 118
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)

extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
119
				struct clocksource *c, u32 mult,
120
				cycle_t cycle_last);
121
extern void update_vsyscall_tz(void);
122

123
#else
124 125

static inline void update_vsyscall(struct timekeeper *tk)
126 127 128 129 130 131 132
{
}
static inline void update_vsyscall_tz(void)
{
}
#endif

133
#endif /* _LINUX_TIMEKEEPER_INTERNAL_H */