time.c 8.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 *  linux/arch/parisc/kernel/time.c
 *
 *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
 *  Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
 *  Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
 *
 * 1994-07-02  Alan Modra
 *             fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
 * 1998-12-20  Updated NTP code according to technical memorandum Jan '96
 *             "A Kernel Model for Precision Timekeeping" by Dave Mills
 */
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/profile.h>
25
#include <linux/clocksource.h>
26
#include <linux/platform_device.h>
27
#include <linux/ftrace.h>
L
Linus Torvalds 已提交
28 29 30 31

#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
32
#include <asm/page.h>
L
Linus Torvalds 已提交
33 34 35 36 37 38
#include <asm/param.h>
#include <asm/pdc.h>
#include <asm/led.h>

#include <linux/timex.h>

39
static unsigned long clocktick __read_mostly;	/* timer cycles per tick */
L
Linus Torvalds 已提交
40

41 42 43 44 45 46 47 48 49 50 51 52
#ifndef CONFIG_64BIT
/*
 * The processor-internal cycle counter (Control Register 16) is used as time
 * source for the sched_clock() function.  This register is 64bit wide on a
 * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
 * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
 * with a per-cpu variable which we increase every time the counter
 * wraps-around (which happens every ~4 secounds).
 */
static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
#endif

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/*
 * We keep time on PA-RISC Linux by using the Interval Timer which is
 * a pair of registers; one is read-only and one is write-only; both
 * accessed through CR16.  The read-only register is 32 or 64 bits wide,
 * and increments by 1 every CPU clock tick.  The architecture only
 * guarantees us a rate between 0.5 and 2, but all implementations use a
 * rate of 1.  The write-only register is 32-bits wide.  When the lowest
 * 32 bits of the read-only register compare equal to the write-only
 * register, it raises a maskable external interrupt.  Each processor has
 * an Interval Timer of its own and they are not synchronised.  
 *
 * We want to generate an interrupt every 1/HZ seconds.  So we program
 * CR16 to interrupt every @clocktick cycles.  The it_value in cpu_data
 * is programmed with the intended time of the next tick.  We can be
 * held off for an arbitrarily long period of time by interrupts being
 * disabled, so we may miss one or more ticks.
 */
70
irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
L
Linus Torvalds 已提交
71
{
72
	unsigned long now, now2;
73
	unsigned long next_tick;
74
	unsigned long cycles_elapsed, ticks_elapsed = 1;
75 76
	unsigned long cycles_remainder;
	unsigned int cpu = smp_processor_id();
77
	struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
L
Linus Torvalds 已提交
78

79
	/* gcc can optimize for "read-only" case with a local clocktick */
80
	unsigned long cpt = clocktick;
81

82
	profile_tick(CPU_PROFILING);
L
Linus Torvalds 已提交
83

84
	/* Initialize next_tick to the expected tick time. */
85
	next_tick = cpuinfo->it_value;
L
Linus Torvalds 已提交
86

87
	/* Get current cycle counter (Control Register 16). */
88
	now = mfctl(16);
L
Linus Torvalds 已提交
89

90 91
	cycles_elapsed = now - next_tick;

92
	if ((cycles_elapsed >> 6) < cpt) {
93 94
		/* use "cheap" math (add/subtract) instead
		 * of the more expensive div/mul method
95
		 */
96
		cycles_remainder = cycles_elapsed;
97 98
		while (cycles_remainder > cpt) {
			cycles_remainder -= cpt;
99
			ticks_elapsed++;
100
		}
101
	} else {
102
		/* TODO: Reduce this to one fdiv op */
103
		cycles_remainder = cycles_elapsed % cpt;
104
		ticks_elapsed += cycles_elapsed / cpt;
105 106
	}

107 108
	/* convert from "division remainder" to "remainder of clock tick" */
	cycles_remainder = cpt - cycles_remainder;
109 110 111 112 113

	/* Determine when (in CR16 cycles) next IT interrupt will fire.
	 * We want IT to fire modulo clocktick even if we miss/skip some.
	 * But those interrupts don't in fact get delivered that regularly.
	 */
114 115
	next_tick = now + cycles_remainder;

116
	cpuinfo->it_value = next_tick;
117

118 119
	/* Program the IT when to deliver the next interrupt.
	 * Only bottom 32-bits of next_tick are writable in CR16!
120
	 */
121
	mtctl(next_tick, 16);
L
Linus Torvalds 已提交
122

123 124 125 126 127 128
#if !defined(CONFIG_64BIT)
	/* check for overflow on a 32bit kernel (every ~4 seconds). */
	if (unlikely(next_tick < now))
		this_cpu_inc(cr16_high_32_bits);
#endif

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
	/* Skip one clocktick on purpose if we missed next_tick.
	 * The new CR16 must be "later" than current CR16 otherwise
	 * itimer would not fire until CR16 wrapped - e.g 4 seconds
	 * later on a 1Ghz processor. We'll account for the missed
	 * tick on the next timer interrupt.
	 *
	 * "next_tick - now" will always give the difference regardless
	 * if one or the other wrapped. If "now" is "bigger" we'll end up
	 * with a very large unsigned number.
	 */
	now2 = mfctl(16);
	if (next_tick - now2 > cpt)
		mtctl(next_tick+cpt, 16);

#if 1
/*
 * GGG: DEBUG code for how many cycles programming CR16 used.
 */
	if (unlikely(now2 - now > 0x3000)) 	/* 12K cycles */
		printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!"
			" cyc %lX rem %lX "
			" next/now %lX/%lX\n",
			cpu, now2 - now, cycles_elapsed, cycles_remainder,
			next_tick, now );
#endif

	/* Can we differentiate between "early CR16" (aka Scenario 1) and
	 * "long delay" (aka Scenario 3)? I don't think so.
	 *
	 * Timer_interrupt will be delivered at least a few hundred cycles
	 * after the IT fires. But it's arbitrary how much time passes
	 * before we call it "late". I've picked one second.
	 *
	 * It's important NO printk's are between reading CR16 and
	 * setting up the next value. May introduce huge variance.
	 */
	if (unlikely(ticks_elapsed > HZ)) {
		/* Scenario 3: very long delay?  bad in any case */
		printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
			" cycles %lX rem %lX "
			" next/now %lX/%lX\n",
			cpu,
			cycles_elapsed, cycles_remainder,
			next_tick, now );
	}
174 175 176

	/* Done mucking with unreliable delivery of interrupts.
	 * Go do system house keeping.
177
	 */
178 179 180 181 182 183

	if (!--cpuinfo->prof_counter) {
		cpuinfo->prof_counter = cpuinfo->prof_multiplier;
		update_process_times(user_mode(get_irq_regs()));
	}

184 185
	if (cpu == 0)
		xtime_update(ticks_elapsed);
186

L
Linus Torvalds 已提交
187 188 189
	return IRQ_HANDLED;
}

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207

unsigned long profile_pc(struct pt_regs *regs)
{
	unsigned long pc = instruction_pointer(regs);

	if (regs->gr[0] & PSW_N)
		pc -= 4;

#ifdef CONFIG_SMP
	if (in_lock_functions(pc))
		pc = regs->gr[2];
#endif

	return pc;
}
EXPORT_SYMBOL(profile_pc);


208
/* clock source code */
L
Linus Torvalds 已提交
209

210
static cycle_t read_cr16(struct clocksource *cs)
L
Linus Torvalds 已提交
211
{
212
	return get_cycles();
L
Linus Torvalds 已提交
213
}
214

215 216 217 218 219
static struct clocksource clocksource_cr16 = {
	.name			= "cr16",
	.rating			= 300,
	.read			= read_cr16,
	.mask			= CLOCKSOURCE_MASK(BITS_PER_LONG),
220
	.flags			= CLOCK_SOURCE_IS_CONTINUOUS,
221
};
222

223
int update_cr16_clocksource(void)
L
Linus Torvalds 已提交
224
{
225
	/* since the cr16 cycle counters are not synchronized across CPUs,
226 227
	   we'll check if we should switch to a safe clocksource: */
	if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
228
		clocksource_change_rating(&clocksource_cr16, 0);
229
		return 1;
L
Linus Torvalds 已提交
230 231
	}

232
	return 0;
L
Linus Torvalds 已提交
233 234
}

235 236 237 238 239
void __init start_cpu_itimer(void)
{
	unsigned int cpu = smp_processor_id();
	unsigned long next_tick = mfctl(16) + clocktick;

240 241 242 243 244 245
#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
	/* With multiple 64bit CPUs online, the cr16's are not syncronized. */
	if (cpu != 0)
		clear_sched_clock_stable();
#endif

246 247
	mtctl(next_tick, 16);		/* kick off Interval Timer (CR16) */

248
	per_cpu(cpu_data, cpu).it_value = next_tick;
249 250
}

251 252
static int __init rtc_init(void)
{
253
	struct platform_device *pdev;
254

255 256
	pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
	return PTR_ERR_OR_ZERO(pdev);
257
}
258
device_initcall(rtc_init);
259

260
void read_persistent_clock(struct timespec *ts)
L
Linus Torvalds 已提交
261 262
{
	static struct pdc_tod tod_data;
263 264 265 266 267 268 269 270 271 272
	if (pdc_tod_read(&tod_data) == 0) {
		ts->tv_sec = tod_data.tod_sec;
		ts->tv_nsec = tod_data.tod_usec * 1000;
	} else {
		printk(KERN_ERR "Error reading tod clock\n");
	        ts->tv_sec = 0;
		ts->tv_nsec = 0;
	}
}

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300

/*
 * sched_clock() framework
 */

static u32 cyc2ns_mul __read_mostly;
static u32 cyc2ns_shift __read_mostly;

u64 sched_clock(void)
{
	u64 now;

	/* Get current cycle counter (Control Register 16). */
#ifdef CONFIG_64BIT
	now = mfctl(16);
#else
	now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
#endif

	/* return the value in ns (cycles_2_ns) */
	return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
}


/*
 * timer interrupt and sched_clock() initialization
 */

301 302
void __init time_init(void)
{
303
	unsigned long current_cr16_khz;
L
Linus Torvalds 已提交
304

305
	current_cr16_khz = PAGE0->mem_10msec/10;  /* kHz */
L
Linus Torvalds 已提交
306 307
	clocktick = (100 * PAGE0->mem_10msec) / HZ;

308 309 310 311 312 313 314 315 316
	/* calculate mult/shift values for cr16 */
	clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
				NSEC_PER_MSEC, 0);

#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
	/* At bootup only one 64bit CPU is online and cr16 is "stable" */
	set_sched_clock_stable();
#endif

317
	start_cpu_itimer();	/* get CPU 0 started */
L
Linus Torvalds 已提交
318

319
	/* register at clocksource framework */
320
	clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
L
Linus Torvalds 已提交
321
}
反馈
建议
客服 返回
顶部