提交 65cd4f6c 编写于 作者: S Stephen Boyd 提交者: John Stultz

arch_timer: Move to generic sched_clock framework

Register with the generic sched_clock framework now that it
supports 64 bits. This fixes two problems with the current
sched_clock support for machines using the architected timers.
First off, we don't subtract the start value from subsequent
sched_clock calls so we can potentially start off with
sched_clock returning gigantic numbers. Second, there is no
support for suspend/resume handling so problems such as discussed
in 6a4dae5e (ARM: 7565/1: sched: stop sched_clock() during
suspend, 2012-10-23) can happen without this patch. Finally, it
allows us to move the sched_clock setup into drivers clocksource
out of the arch ports.

Cc: Christopher Covington <cov@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: NWill Deacon <will.deacon@arm.com>
Signed-off-by: NStephen Boyd <sboyd@codeaurora.org>
Signed-off-by: NJohn Stultz <john.stultz@linaro.org>
上级 07783397
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/sched_clock.h>
#include <asm/delay.h> #include <asm/delay.h>
...@@ -22,13 +21,6 @@ static unsigned long arch_timer_read_counter_long(void) ...@@ -22,13 +21,6 @@ static unsigned long arch_timer_read_counter_long(void)
return arch_timer_read_counter(); return arch_timer_read_counter();
} }
static u32 sched_clock_mult __read_mostly;
static unsigned long long notrace arch_timer_sched_clock(void)
{
return arch_timer_read_counter() * sched_clock_mult;
}
static struct delay_timer arch_delay_timer; static struct delay_timer arch_delay_timer;
static void __init arch_timer_delay_timer_register(void) static void __init arch_timer_delay_timer_register(void)
...@@ -48,11 +40,5 @@ int __init arch_timer_arch_init(void) ...@@ -48,11 +40,5 @@ int __init arch_timer_arch_init(void)
arch_timer_delay_timer_register(); arch_timer_delay_timer_register();
/* Cache the sched_clock multiplier to save a divide in the hot path. */
sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
sched_clock_func = arch_timer_sched_clock;
pr_info("sched_clock: ARM arch timer >56 bits at %ukHz, resolution %uns\n",
arch_timer_rate / 1000, sched_clock_mult);
return 0; return 0;
} }
...@@ -14,6 +14,7 @@ config ARM64 ...@@ -14,6 +14,7 @@ config ARM64
select GENERIC_IOMAP select GENERIC_IOMAP
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND select HARDIRQS_SW_RESEND
......
...@@ -61,13 +61,6 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -61,13 +61,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc); EXPORT_SYMBOL(profile_pc);
#endif #endif
static u64 sched_clock_mult __read_mostly;
unsigned long long notrace sched_clock(void)
{
return arch_timer_read_counter() * sched_clock_mult;
}
void __init time_init(void) void __init time_init(void)
{ {
u32 arch_timer_rate; u32 arch_timer_rate;
...@@ -78,9 +71,6 @@ void __init time_init(void) ...@@ -78,9 +71,6 @@ void __init time_init(void)
if (!arch_timer_rate) if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n"); panic("Unable to initialise architected timer.\n");
/* Cache the sched_clock multiplier to save a divide in the hot path. */
sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
/* Calibrate the delay loop directly */ /* Calibrate the delay loop directly */
lpj_fine = arch_timer_rate / HZ; lpj_fine = arch_timer_rate / HZ;
} }
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched_clock.h>
#include <asm/arch_timer.h> #include <asm/arch_timer.h>
#include <asm/virt.h> #include <asm/virt.h>
...@@ -471,6 +472,15 @@ static int __init arch_timer_register(void) ...@@ -471,6 +472,15 @@ static int __init arch_timer_register(void)
goto out; goto out;
} }
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&timecounter, &cyclecounter,
arch_counter_get_cntvct());
/* 56 bits minimum, so we assume worst case rollover */
sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
if (arch_timer_use_virtual) { if (arch_timer_use_virtual) {
ppi = arch_timer_ppi[VIRT_PPI]; ppi = arch_timer_ppi[VIRT_PPI];
err = request_percpu_irq(ppi, arch_timer_handler_virt, err = request_percpu_irq(ppi, arch_timer_handler_virt,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册