提交 08047c4f 编写于 作者: T Thomas Gleixner

x86: Move calibrate_cpu to tsc.c

Move the code where it's only user is. Also we need to look whether
this hardwired hackery might interfere with perfcounters.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 454ede7e
...@@ -57,6 +57,4 @@ extern void time_init(void); ...@@ -57,6 +57,4 @@ extern void time_init(void);
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
extern unsigned long __init calibrate_cpu(void);
#endif /* _ASM_X86_TIME_H */ #endif /* _ASM_X86_TIME_H */
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/nmi.h>
#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
int timer_ack; int timer_ack;
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/nmi.h>
#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
int timer_ack; int timer_ack;
...@@ -84,56 +83,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) ...@@ -84,56 +83,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/*
* calibrate_cpu is used on systems with fixed rate TSCs to determine
* processor frequency
*/
#define TICK_COUNT 100000000
unsigned long __init calibrate_cpu(void)
{
int tsc_start, tsc_now;
int i, no_ctr_free;
unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
unsigned long flags;
for (i = 0; i < 4; i++)
if (avail_to_resrv_perfctr_nmi_bit(i))
break;
no_ctr_free = (i == 4);
if (no_ctr_free) {
WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
"cpu_khz value may be incorrect.\n");
i = 3;
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
wrmsrl(MSR_K7_EVNTSEL3, 0);
rdmsrl(MSR_K7_PERFCTR3, pmc3);
} else {
reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}
local_irq_save(flags);
/* start measuring cycles, incrementing from 0 */
wrmsrl(MSR_K7_PERFCTR0 + i, 0);
wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
rdtscl(tsc_start);
do {
rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
tsc_now = get_cycles();
} while ((tsc_now - tsc_start) < TICK_COUNT);
local_irq_restore(flags);
if (no_ctr_free) {
wrmsrl(MSR_K7_EVNTSEL3, 0);
wrmsrl(MSR_K7_PERFCTR3, pmc3);
wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
} else {
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}
return pmc_now * tsc_khz / (tsc_now - tsc_start);
}
static struct irqaction irq0 = { static struct irqaction irq0 = {
.handler = timer_interrupt, .handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/time.h> #include <asm/time.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/nmi.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz); EXPORT_SYMBOL(cpu_khz);
...@@ -852,6 +853,60 @@ static void __init init_tsc_clocksource(void) ...@@ -852,6 +853,60 @@ static void __init init_tsc_clocksource(void)
clocksource_register(&clocksource_tsc); clocksource_register(&clocksource_tsc);
} }
#ifdef CONFIG_X86_64
/*
* calibrate_cpu is used on systems with fixed rate TSCs to determine
* processor frequency
*/
#define TICK_COUNT 100000000
static unsigned long __init calibrate_cpu(void)
{
int tsc_start, tsc_now;
int i, no_ctr_free;
unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
unsigned long flags;
for (i = 0; i < 4; i++)
if (avail_to_resrv_perfctr_nmi_bit(i))
break;
no_ctr_free = (i == 4);
if (no_ctr_free) {
WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
"cpu_khz value may be incorrect.\n");
i = 3;
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
wrmsrl(MSR_K7_EVNTSEL3, 0);
rdmsrl(MSR_K7_PERFCTR3, pmc3);
} else {
reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}
local_irq_save(flags);
/* start measuring cycles, incrementing from 0 */
wrmsrl(MSR_K7_PERFCTR0 + i, 0);
wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
rdtscl(tsc_start);
do {
rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
tsc_now = get_cycles();
} while ((tsc_now - tsc_start) < TICK_COUNT);
local_irq_restore(flags);
if (no_ctr_free) {
wrmsrl(MSR_K7_EVNTSEL3, 0);
wrmsrl(MSR_K7_PERFCTR3, pmc3);
wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
} else {
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}
return pmc_now * tsc_khz / (tsc_now - tsc_start);
}
#else
static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
#endif
void __init tsc_init(void) void __init tsc_init(void)
{ {
u64 lpj; u64 lpj;
...@@ -870,11 +925,9 @@ void __init tsc_init(void) ...@@ -870,11 +925,9 @@ void __init tsc_init(void)
return; return;
} }
#ifdef CONFIG_X86_64
if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
cpu_khz = calibrate_cpu(); cpu_khz = calibrate_cpu();
#endif
printk("Detected %lu.%03lu MHz processor.\n", printk("Detected %lu.%03lu MHz processor.\n",
(unsigned long)cpu_khz / 1000, (unsigned long)cpu_khz / 1000,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册