tsc_32.c 9.0 KB
Newer Older
1
#include <linux/sched.h>
2
#include <linux/clocksource.h>
3 4 5 6
#include <linux/workqueue.h>
#include <linux/cpufreq.h>
#include <linux/jiffies.h>
#include <linux/init.h>
7
#include <linux/dmi.h>
8

9
#include <asm/delay.h>
10 11
#include <asm/tsc.h>
#include <asm/io.h>
12
#include <asm/timer.h>
13 14 15

#include "mach_timer.h"

16 17
static int tsc_enabled;

18 19 20 21 22 23
/*
 * On some systems the TSC frequency does not
 * change with the cpu frequency. So we need
 * an extra value to store the TSC freq
 */
unsigned int tsc_khz;
R
Rusty Russell 已提交
24
EXPORT_SYMBOL_GPL(tsc_khz);
25

26
int tsc_disable;
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55

#ifdef CONFIG_X86_TSC
static int __init tsc_setup(char *str)
{
	printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
				"cannot disable TSC.\n");
	return 1;
}
#else
/*
 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
 * in cpu/common.c
 */
static int __init tsc_setup(char *str)
{
	tsc_disable = 1;

	return 1;
}
#endif

__setup("notsc", tsc_setup);

/*
 * code to mark and check if the TSC is unstable
 * due to cpufreq or due to unsynced TSCs
 */
static int tsc_unstable;

R
Rusty Russell 已提交
56
int check_tsc_unstable(void)
57 58 59
{
	return tsc_unstable;
}
R
Rusty Russell 已提交
60
EXPORT_SYMBOL_GPL(check_tsc_unstable);
61

S
Simon Arlott 已提交
62
/* Accelerators for sched_clock()
63 64 65 66 67 68 69 70 71 72 73 74 75 76
 * convert from cycles(64bits) => nanoseconds (64bits)
 *  basic equation:
 *		ns = cycles / (freq / ns_per_sec)
 *		ns = cycles * (ns_per_sec / freq)
 *		ns = cycles * (10^9 / (cpu_khz * 10^3))
 *		ns = cycles * (10^6 / cpu_khz)
 *
 *	Then we use scaling math (suggested by george@mvista.com) to get:
 *		ns = cycles * (10^6 * SC / cpu_khz) / SC
 *		ns = cycles * cyc2ns_scale / SC
 *
 *	And since SC is a constant power of two, we can convert the div
 *  into a shift.
 *
77
 *  We can use khz divisor instead of mhz to keep a better precision, since
78 79 80 81 82
 *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
 *  (mathieu.desnoyers@polymtl.ca)
 *
 *			-johnstul@us.ibm.com "math is hard, lets go shopping!"
 */
83
unsigned long cyc2ns_scale __read_mostly;
84 85 86 87 88 89 90 91 92 93 94

#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */

static inline void set_cyc2ns_scale(unsigned long cpu_khz)
{
	cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
}

/*
 * Scheduler clock - returns current time in nanosec units.
 */
95
unsigned long long native_sched_clock(void)
96 97 98 99
{
	unsigned long long this_offset;

	/*
100
	 * Fall back to jiffies if there's no TSC available:
101 102 103 104 105
	 * ( But note that we still use it if the TSC is marked
	 *   unstable. We do this because unlike Time Of Day,
	 *   the scheduler clock tolerates small errors and it's
	 *   very important for it to be as fast as the platform
	 *   can achive it. )
106
	 */
107
	if (unlikely(!tsc_enabled && !tsc_unstable))
108
		/* No locking but a rare wrong value is not a big deal: */
109 110 111
		return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);

	/* read the Time Stamp Counter: */
112
	rdtscll(this_offset);
113 114 115 116 117

	/* return the value in ns */
	return cycles_2_ns(this_offset);
}

118 119 120 121 122 123 124 125 126 127 128 129
/* We need to define a real function for sched_clock, to override the
   weak default version */
#ifdef CONFIG_PARAVIRT
unsigned long long sched_clock(void)
{
	return paravirt_sched_clock();
}
#else
unsigned long long sched_clock(void)
	__attribute__((alias("native_sched_clock")));
#endif

Z
Zachary Amsden 已提交
130
unsigned long native_calculate_cpu_khz(void)
131 132 133
{
	unsigned long long start, end;
	unsigned long count;
134
	u64 delta64 = (u64)ULLONG_MAX;
135 136 137 138 139 140 141 142 143 144 145
	int i;
	unsigned long flags;

	local_irq_save(flags);

	/* run 3 times to ensure the cache is warm */
	for (i = 0; i < 3; i++) {
		mach_prepare_counter();
		rdtscll(start);
		mach_countup(&count);
		rdtscll(end);
146
		delta64 = min(delta64, (end - start));
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
	}
	/*
	 * Error: ECTCNEVERSET
	 * The CTC wasn't reliable: we got a hit on the very first read,
	 * or the CPU was so fast/slow that the quotient wouldn't fit in
	 * 32 bits..
	 */
	if (count <= 1)
		goto err;

	/* cpu freq too fast: */
	if (delta64 > (1ULL<<32))
		goto err;

	/* cpu freq too slow: */
	if (delta64 <= CALIBRATE_TIME_MSEC)
		goto err;

	delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
	do_div(delta64,CALIBRATE_TIME_MSEC);

	local_irq_restore(flags);
	return (unsigned long)delta64;
err:
	local_irq_restore(flags);
	return 0;
}

int recalibrate_cpu_khz(void)
{
#ifndef CONFIG_SMP
	unsigned long cpu_khz_old = cpu_khz;

	if (cpu_has_tsc) {
		cpu_khz = calculate_cpu_khz();
		tsc_khz = cpu_khz;
183 184
		cpu_data(0).loops_per_jiffy =
			cpufreq_scale(cpu_data(0).loops_per_jiffy,
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
					cpu_khz_old, cpu_khz);
		return 0;
	} else
		return -ENODEV;
#else
	return -ENODEV;
#endif
}

EXPORT_SYMBOL(recalibrate_cpu_khz);

#ifdef CONFIG_CPU_FREQ

/*
 * if the CPU frequency is scaled, TSC-based delays will need a different
 * loops_per_jiffy value to function properly.
 */
static unsigned int ref_freq = 0;
static unsigned long loops_per_jiffy_ref = 0;
static unsigned long cpu_khz_ref = 0;

static int
time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
{
	struct cpufreq_freqs *freq = data;

	if (!ref_freq) {
		if (!freq->old){
			ref_freq = freq->new;
214
			return 0;
215 216
		}
		ref_freq = freq->old;
217
		loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
218 219 220 221 222 223 224
		cpu_khz_ref = cpu_khz;
	}

	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
	    (val == CPUFREQ_RESUMECHANGE)) {
		if (!(freq->flags & CPUFREQ_CONST_LOOPS))
225
			cpu_data(freq->cpu).loops_per_jiffy =
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
				cpufreq_scale(loops_per_jiffy_ref,
						ref_freq, freq->new);

		if (cpu_khz) {

			if (num_online_cpus() == 1)
				cpu_khz = cpufreq_scale(cpu_khz_ref,
						ref_freq, freq->new);
			if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
				tsc_khz = cpu_khz;
				set_cyc2ns_scale(cpu_khz);
				/*
				 * TSC based sched_clock turns
				 * to junk w/ cpufreq
				 */
241
				mark_tsc_unstable("cpufreq changes");
242 243 244 245 246 247 248 249 250 251 252 253 254
			}
		}
	}

	return 0;
}

static struct notifier_block time_cpufreq_notifier_block = {
	.notifier_call	= time_cpufreq_notifier
};

static int __init cpufreq_tsc(void)
{
255 256
	return cpufreq_register_notifier(&time_cpufreq_notifier_block,
					 CPUFREQ_TRANSITION_NOTIFIER);
257 258 259 260
}
core_initcall(cpufreq_tsc);

#endif
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278

/* clock source code */

static unsigned long current_tsc_khz = 0;

static cycle_t read_tsc(void)
{
	cycle_t ret;

	rdtscll(ret);

	return ret;
}

static struct clocksource clocksource_tsc = {
	.name			= "tsc",
	.rating			= 300,
	.read			= read_tsc,
279
	.mask			= CLOCKSOURCE_MASK(64),
280 281
	.mult			= 0, /* to be set */
	.shift			= 22,
282 283
	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
				  CLOCK_SOURCE_MUST_VERIFY,
284 285
};

286
void mark_tsc_unstable(char *reason)
287
{
288 289
	if (!tsc_unstable) {
		tsc_unstable = 1;
290
		tsc_enabled = 0;
291
		printk("Marking TSC unstable due to: %s.\n", reason);
292 293 294 295 296
		/* Can be called before registration */
		if (clocksource_tsc.mult)
			clocksource_change_rating(&clocksource_tsc, 0);
		else
			clocksource_tsc.rating = 0;
297 298
	}
}
299
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
300

301
static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
302 303 304
{
	printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
		       d->ident);
305
	tsc_unstable = 1;
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	return 0;
}

/* List of systems that have known TSC problems */
static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
	{
	 .callback = dmi_mark_tsc_unstable,
	 .ident = "IBM Thinkpad 380XD",
	 .matches = {
		     DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
		     DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
		     },
	 },
	 {}
};

/*
 * Make an educated guess if the TSC is trustworthy and synchronized
 * over all CPUs.
 */
326
__cpuinit int unsynchronized_tsc(void)
327
{
328 329
	if (!cpu_has_tsc || tsc_unstable)
		return 1;
330 331 332 333
	/*
	 * Intel systems are normally all synchronized.
	 * Exceptions must mark TSC as unstable:
	 */
334 335 336 337 338 339
	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
		/* assume multi socket systems are not synchronized: */
		if (num_possible_cpus() > 1)
			tsc_unstable = 1;
	}
	return tsc_unstable;
340 341
}

342 343 344 345 346 347 348 349 350
/*
 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
 */
#ifdef CONFIG_MGEODE_LX
/* RTSC counts during suspend */
#define RTSC_SUSP 0x100

static void __init check_geode_tsc_reliable(void)
{
351
	unsigned long res_low, res_high;
352

353 354
	rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
	if (res_low & RTSC_SUSP)
355 356 357 358 359 360
		clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
}
#else
static inline void check_geode_tsc_reliable(void) { }
#endif

361 362

void __init tsc_init(void)
363
{
364 365
	if (!cpu_has_tsc || tsc_disable)
		goto out_no_tsc;
366

367 368
	cpu_khz = calculate_cpu_khz();
	tsc_khz = cpu_khz;
369

370 371
	if (!cpu_khz)
		goto out_no_tsc;
372

373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	printk("Detected %lu.%03lu MHz processor.\n",
				(unsigned long)cpu_khz / 1000,
				(unsigned long)cpu_khz % 1000);

	set_cyc2ns_scale(cpu_khz);
	use_tsc_delay();

	/* Check and install the TSC clocksource */
	dmi_check_system(bad_tsc_dmi_table);

	unsynchronized_tsc();
	check_geode_tsc_reliable();
	current_tsc_khz = tsc_khz;
	clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
							clocksource_tsc.shift);
	/* lower the rating if we already know its unstable: */
	if (check_tsc_unstable()) {
		clocksource_tsc.rating = 0;
		clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
392 393 394
	} else
		tsc_enabled = 1;

395
	clocksource_register(&clocksource_tsc);
396

397
	return;
398

399 400 401 402 403 404 405 406
out_no_tsc:
	/*
	 * Set the tsc_disable flag if there's no TSC support, this
	 * makes it a fast flag for the kernel to see whether it
	 * should be using the TSC.
	 */
	tsc_disable = 1;
}