clocksource.h 10.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*  linux/include/linux/clocksource.h
 *
 *  This file contains the structure definitions for clocksources.
 *
 *  If you are not a clocksource, or timekeeping code, you should
 *  not be including this file!
 */
#ifndef _LINUX_CLOCKSOURCE_H
#define _LINUX_CLOCKSOURCE_H

#include <linux/types.h>
#include <linux/timex.h>
#include <linux/time.h>
#include <linux/list.h>
15
#include <linux/cache.h>
16
#include <linux/timer.h>
17 18 19 20 21
#include <asm/div64.h>
#include <asm/io.h>

/* clocksource cycle base type */
typedef u64 cycle_t;
22
struct clocksource;
23

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
/**
 * struct cyclecounter - hardware abstraction for a free running counter
 *	Provides completely state-free accessors to the underlying hardware.
 *	Depending on which hardware it reads, the cycle counter may wrap
 *	around quickly. Locking rules (if necessary) have to be defined
 *	by the implementor and user of specific instances of this API.
 *
 * @read:		returns the current cycle value
 * @mask:		bitmask for two's complement
 *			subtraction of non 64 bit counters,
 *			see CLOCKSOURCE_MASK() helper macro
 * @mult:		cycle to nanosecond multiplier
 * @shift:		cycle to nanosecond divisor (power of two)
 */
struct cyclecounter {
	cycle_t (*read)(const struct cyclecounter *cc);
	cycle_t mask;
	u32 mult;
	u32 shift;
};

/**
 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
 *	Contains the state needed by timecounter_read() to detect
 *	cycle counter wrap around. Initialize with
 *	timecounter_init(). Also used to convert cycle counts into the
 *	corresponding nanosecond counts with timecounter_cyc2time(). Users
 *	of this code are responsible for initializing the underlying
 *	cycle counter hardware, locking issues and reading the time
 *	more often than the cycle counter wraps around. The nanosecond
 *	counter will only wrap around after ~585 years.
 *
 * @cc:			the cycle counter used by this instance
 * @cycle_last:		most recent cycle counter value seen by
 *			timecounter_read()
 * @nsec:		continuously increasing count
 */
struct timecounter {
	const struct cyclecounter *cc;
	cycle_t cycle_last;
	u64 nsec;
};

/**
 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
 * @tc:		Pointer to cycle counter.
 * @cycles:	Cycles
 *
 * XXX - This could use some mult_lxl_ll() asm optimization. Same code
 * as in cyc2ns, but with unsigned result.
 */
static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
				      cycle_t cycles)
{
	u64 ret = (u64)cycles;
	ret = (ret * cc->mult) >> cc->shift;
	return ret;
}

/**
 * timecounter_init - initialize a time counter
 * @tc:			Pointer to time counter which is to be initialized/reset
 * @cc:			A cycle counter, ready to be used.
 * @start_tstamp:	Arbitrary initial time stamp.
 *
 * After this call the current cycle register (roughly) corresponds to
 * the initial time stamp. Every call to timecounter_read() increments
 * the time stamp counter by the number of elapsed nanoseconds.
 */
extern void timecounter_init(struct timecounter *tc,
			     const struct cyclecounter *cc,
			     u64 start_tstamp);

/**
 * timecounter_read - return nanoseconds elapsed since timecounter_init()
 *                    plus the initial time stamp
 * @tc:          Pointer to time counter.
 *
 * In other words, keeps track of time since the same epoch as
 * the function which generated the initial time stamp.
 */
extern u64 timecounter_read(struct timecounter *tc);

/**
 * timecounter_cyc2time - convert a cycle counter to same
 *                        time base as values returned by
 *                        timecounter_read()
 * @tc:		Pointer to time counter.
 * @cycle:	a value returned by tc->cc->read()
 *
 * Cycle counts that are converted correctly as long as they
 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
 * with "max cycle count" == cs->mask+1.
 *
 * This allows conversion of cycle counter values which were generated
 * in the past.
 */
extern u64 timecounter_cyc2time(struct timecounter *tc,
				cycle_t cycle_tstamp);

124 125 126
/**
 * struct clocksource - hardware abstraction for a free running counter
 *	Provides mostly state-free accessors to the underlying hardware.
127
 *	This is the structure used for system time.
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
 *
 * @name:		ptr to clocksource name
 * @list:		list head for registration
 * @rating:		rating value for selection (higher is better)
 *			To avoid rating inflation the following
 *			list should give you a guide as to how
 *			to assign your clocksource a rating
 *			1-99: Unfit for real use
 *				Only available for bootup and testing purposes.
 *			100-199: Base level usability.
 *				Functional for real use, but not desired.
 *			200-299: Good.
 *				A correct and usable clocksource.
 *			300-399: Desired.
 *				A reasonably fast and accurate clocksource.
 *			400-499: Perfect
 *				The ideal clocksource. A must-use where
 *				available.
146
 * @read:		returns a cycle value, passes clocksource as argument
147 148
 * @enable:		optional function to enable the clocksource
 * @disable:		optional function to disable the clocksource
149 150
 * @mask:		bitmask for two's complement
 *			subtraction of non 64 bit counters
151 152
 * @mult:		cycle to nanosecond multiplier (adjusted by NTP)
 * @mult_orig:		cycle to nanosecond multiplier (unadjusted by NTP)
153
 * @shift:		cycle to nanosecond divisor (power of two)
154
 * @flags:		flags describing special properties
155
 * @vread:		vsyscall based read
T
Thomas Gleixner 已提交
156
 * @resume:		resume function for the clocksource, if necessary
157 158
 * @cycle_interval:	Used internally by timekeeping core, please ignore.
 * @xtime_interval:	Used internally by timekeeping core, please ignore.
159 160
 */
struct clocksource {
161 162 163
	/*
	 * First part of structure is read mostly
	 */
164 165 166
	char *name;
	struct list_head list;
	int rating;
167
	cycle_t (*read)(struct clocksource *cs);
168 169
	int (*enable)(struct clocksource *cs);
	void (*disable)(struct clocksource *cs);
170 171
	cycle_t mask;
	u32 mult;
172
	u32 mult_orig;
173
	u32 shift;
174
	unsigned long flags;
175
	cycle_t (*vread)(void);
T
Thomas Gleixner 已提交
176
	void (*resume)(void);
177 178 179 180 181 182
#ifdef CONFIG_IA64
	void *fsys_mmio;        /* used by fsyscall asm code */
#define CLKSRC_FSYS_MMIO_SET(mmio, addr)      ((mmio) = (addr))
#else
#define CLKSRC_FSYS_MMIO_SET(mmio, addr)      do { } while (0)
#endif
183 184

	/* timekeeping specific data, ignore */
185 186
	cycle_t cycle_interval;
	u64	xtime_interval;
187
	u32	raw_interval;
188 189 190 191 192 193 194
	/*
	 * Second part is written at each timer interrupt
	 * Keep it in a different cache line to dirty no
	 * more than one cache line.
	 */
	cycle_t cycle_last ____cacheline_aligned_in_smp;
	u64 xtime_nsec;
195
	s64 error;
196
	struct timespec raw_time;
197 198 199 200 201 202

#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
	/* Watchdog related data, used by the framework */
	struct list_head wd_list;
	cycle_t wd_last;
#endif
203 204
};

R
Roman Zippel 已提交
205 206
extern struct clocksource *clock;	/* current clocksource */

207 208 209
/*
 * Clock source flags bits::
 */
210 211 212 213 214
#define CLOCK_SOURCE_IS_CONTINUOUS		0x01
#define CLOCK_SOURCE_MUST_VERIFY		0x02

#define CLOCK_SOURCE_WATCHDOG			0x10
#define CLOCK_SOURCE_VALID_FOR_HRES		0x20
215

216
/* simplify initialization of mask field */
217
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286

/**
 * clocksource_khz2mult - calculates mult from khz and shift
 * @khz:		Clocksource frequency in KHz
 * @shift_constant:	Clocksource shift factor
 *
 * Helper functions that converts a khz counter frequency to a timsource
 * multiplier, given the clocksource shift value
 */
static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
{
	/*  khz = cyc/(Million ns)
	 *  mult/2^shift  = ns/cyc
	 *  mult = ns/cyc * 2^shift
	 *  mult = 1Million/khz * 2^shift
	 *  mult = 1000000 * 2^shift / khz
	 *  mult = (1000000<<shift) / khz
	 */
	u64 tmp = ((u64)1000000) << shift_constant;

	tmp += khz/2; /* round for do_div */
	do_div(tmp, khz);

	return (u32)tmp;
}

/**
 * clocksource_hz2mult - calculates mult from hz and shift
 * @hz:			Clocksource frequency in Hz
 * @shift_constant:	Clocksource shift factor
 *
 * Helper functions that converts a hz counter
 * frequency to a timsource multiplier, given the
 * clocksource shift value
 */
static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
{
	/*  hz = cyc/(Billion ns)
	 *  mult/2^shift  = ns/cyc
	 *  mult = ns/cyc * 2^shift
	 *  mult = 1Billion/hz * 2^shift
	 *  mult = 1000000000 * 2^shift / hz
	 *  mult = (1000000000<<shift) / hz
	 */
	u64 tmp = ((u64)1000000000) << shift_constant;

	tmp += hz/2; /* round for do_div */
	do_div(tmp, hz);

	return (u32)tmp;
}

/**
 * cyc2ns - converts clocksource cycles to nanoseconds
 * @cs:		Pointer to clocksource
 * @cycles:	Cycles
 *
 * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds.
 *
 * XXX - This could use some mult_lxl_ll() asm optimization
 */
static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles)
{
	u64 ret = (u64)cycles;
	ret = (ret * cs->mult) >> cs->shift;
	return ret;
}

/**
287
 * clocksource_calculate_interval - Calculates a clocksource interval struct
288 289 290 291 292 293 294 295 296
 *
 * @c:		Pointer to clocksource.
 * @length_nsec: Desired interval length in nanoseconds.
 *
 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 * pair and interval request.
 *
 * Unless you're the timekeeping code, you should not be using this!
 */
297
static inline void clocksource_calculate_interval(struct clocksource *c,
D
Daniel Walker 已提交
298
					  	  unsigned long length_nsec)
299 300 301
{
	u64 tmp;

302
	/* Do the ns -> cycle conversion first, using original mult */
303 304
	tmp = length_nsec;
	tmp <<= c->shift;
305 306
	tmp += c->mult_orig/2;
	do_div(tmp, c->mult_orig);
307

308 309 310
	c->cycle_interval = (cycle_t)tmp;
	if (c->cycle_interval == 0)
		c->cycle_interval = 1;
311

312
	/* Go back from cycles -> shifted ns, this time use ntp adjused mult */
313
	c->xtime_interval = (u64)c->cycle_interval * c->mult;
314
	c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
315 316 317
}


318
/* used to install a new clocksource */
319
extern int clocksource_register(struct clocksource*);
320
extern void clocksource_unregister(struct clocksource*);
J
Jason Wessel 已提交
321
extern void clocksource_touch_watchdog(void);
322 323
extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
T
Thomas Gleixner 已提交
324
extern void clocksource_resume(void);
325

326 327
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
328
extern void update_vsyscall_tz(void);
329 330 331 332
#else
static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
{
}
333 334 335 336

static inline void update_vsyscall_tz(void)
{
}
337 338
#endif

339
#endif /* _LINUX_CLOCKSOURCE_H */