timekeeper_internal.h 2.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * You SHOULD NOT be including this unless you're vsyscall
 * handling code or timekeeping internal code!
 */

#ifndef _LINUX_TIMEKEEPER_INTERNAL_H
#define _LINUX_TIMEKEEPER_INTERNAL_H

#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>

13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Structure holding internal timekeeping values.
 *
 * Note: wall_to_monotonic is what we need to add to xtime (or xtime
 * corrected for sub jiffie times) to get to monotonic time.
 * Monotonic is pegged at zero at system boot time, so
 * wall_to_monotonic will be negative, however, we will ALWAYS keep
 * the tv_nsec part positive so we can use the usual normalization.
 *
 * wall_to_monotonic is moved after resume from suspend for the
 * monotonic time not to jump. We need to add total_sleep_time to
 * wall_to_monotonic to get the real boot based time offset.
 *
 * - wall_to_monotonic is no longer the boot time, getboottime must be
 * used instead.
 */
29 30 31 32 33 34 35
struct timekeeper {
	/* Current clocksource used for timekeeping. */
	struct clocksource	*clock;
	/* NTP adjusted clock multiplier */
	u32			mult;
	/* The shift value of the current clocksource. */
	u32			shift;
36 37 38
	/* Clock shifted nano seconds */
	u64			xtime_nsec;

39 40 41
	/* Monotonic base time */
	ktime_t			base_mono;

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
	/* Current CLOCK_REALTIME time in seconds */
	u64			xtime_sec;
	/* CLOCK_REALTIME to CLOCK_MONOTONIC offset */
	struct timespec64	wall_to_monotonic;

	/* Offset clock monotonic -> clock realtime */
	ktime_t			offs_real;
	/* Offset clock monotonic -> clock boottime */
	ktime_t			offs_boot;
	/* Offset clock monotonic -> clock tai */
	ktime_t			offs_tai;

	/* time spent in suspend */
	struct timespec64	total_sleep_time;
	/* The current UTC to TAI offset in seconds */
	s32			tai_offset;

	/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
	struct timespec64	raw_time;

62 63
	/* Number of clock cycles in one NTP interval. */
	cycle_t			cycle_interval;
64 65
	/* Last cycle value (also stored in clock->cycle_last) */
	cycle_t			cycle_last;
66 67 68 69 70 71 72
	/* Number of clock shifted nano seconds in one NTP interval. */
	u64			xtime_interval;
	/* shifted nano seconds left over when rounding cycle_interval */
	s64			xtime_remainder;
	/* Raw nano seconds accumulated per NTP interval. */
	u32			raw_interval;

73 74 75 76
	/*
	 * Difference between accumulated time and NTP time in ntp
	 * shifted nano seconds.
	 */
77 78
	s64			ntp_error;
	/*
79 80
	 * Shift conversion between clock shifted nano seconds and
	 * ntp shifted nano seconds.
81
	 */
82
	u32			ntp_error_shift;
83
};
84

85 86 87 88
#ifdef CONFIG_GENERIC_TIME_VSYSCALL

extern void update_vsyscall(struct timekeeper *tk);
extern void update_vsyscall_tz(void);
89

90 91 92 93
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)

extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
				struct clocksource *c, u32 mult);
94
extern void update_vsyscall_tz(void);
95

96
#else
97 98

static inline void update_vsyscall(struct timekeeper *tk)
99 100 101 102 103 104 105
{
}
static inline void update_vsyscall_tz(void)
{
}
#endif

106
#endif /* _LINUX_TIMEKEEPER_INTERNAL_H */