提交 8b5f79f9 编写于 作者: V Vitja Makarov 提交者: Bryan Wu

[Blackfin] arch: initial generic time and clock sources

This patch enables Hight-Res Timers and tickless kernel
Signed-off-by: NVitja Makarov <vitja.makarov@gmail.com>
Signed-off-by: NMichael Hennerich <michael.hennerich@analog.com>
Signed-off-by: NBryan Wu <cooloney@kernel.org>
上级 3dc50637
......@@ -47,10 +47,6 @@ config GENERIC_IRQ_PROBE
bool
default y
config GENERIC_TIME
bool
default n
config GENERIC_GPIO
bool
default y
......@@ -415,6 +411,30 @@ comment "Kernel Timer/Scheduler"
source kernel/Kconfig.hz
config GENERIC_TIME
bool "Generic time"
default y
config GENERIC_CLOCKEVENTS
bool "Generic clock events"
depends on GENERIC_TIME
default y
config CYCLES_CLOCKSOURCE
bool "Use 'CYCLES' as a clocksource (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on GENERIC_CLOCKEVENTS
depends on !BFIN_SCRATCH_REG_CYCLES
default n
help
If you say Y here, you will enable support for using the 'cycles'
registers as a clock source. Doing so means you will be unable to
safely write to the 'cycles' register during runtime. You will
still be able to read it (such as for performance monitoring), but
writing the registers will most likely crash the kernel.
source kernel/time/Kconfig
comment "Memory Setup"
config MEM_SIZE
......
......@@ -6,9 +6,15 @@ extra-y := init_task.o vmlinux.lds
obj-y := \
entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
sys_bfin.o time.o traps.o irqchip.o dma-mapping.o flat.o \
sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \
fixed_code.o reboot.o bfin_gpio.o
ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y)
obj-y += time-ts.o
else
obj-y += time.o
endif
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_BFIN_DMA_5XX) += bfin_dma_5xx.o
......
......@@ -32,6 +32,8 @@
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/fs.h>
#include <linux/err.h>
......@@ -69,33 +71,44 @@ EXPORT_SYMBOL(pm_power_off);
* The idle loop on BFIN
*/
#ifdef CONFIG_IDLE_L1
void default_idle(void)__attribute__((l1_text));
static void default_idle(void)__attribute__((l1_text));
void cpu_idle(void)__attribute__((l1_text));
#endif
void default_idle(void)
/*
* This is our default idle handler. We need to disable
* interrupts here to ensure we don't miss a wakeup call.
*/
static void default_idle(void)
{
while (!need_resched()) {
local_irq_disable();
if (likely(!need_resched()))
idle_with_irq_disabled();
local_irq_enable();
}
}
local_irq_disable();
if (!need_resched())
idle_with_irq_disabled();
void (*idle)(void) = default_idle;
local_irq_enable();
}
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
* The idle thread. We try to conserve power, while trying to keep
* overall latency low. The architecture specific idle is passed
* a value to indicate the level of "idleness" of the system.
*/
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
idle();
void (*idle)(void) = pm_idle;
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
cpu_die();
#endif
if (!idle)
idle = default_idle;
tick_nohz_stop_sched_tick();
while (!need_resched())
idle();
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
......
/*
* linux/arch/kernel/time-ts.c
*
* Based on arm clockevents implementation and old bfin time tick.
*
* Copyright(C) 2008, GeoTechnologies, Vitja Makarov
*
* This code is licenced under the GPL version 2. For details see
* kernel-base/COPYING.
*/
#include <linux/module.h>
#include <linux/profile.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/irq.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <asm/blackfin.h>
#ifdef CONFIG_CYCLES_CLOCKSOURCE
static unsigned long cyc2ns_scale;
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline void set_cyc2ns_scale(unsigned long cpu_khz)
{
cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR) / cpu_khz;
}
static inline unsigned long long cycles_2_ns(cycle_t cyc)
{
return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
}
static cycle_t read_cycles(void)
{
unsigned long tmp, tmp2;
asm("%0 = cycles; %1 = cycles2;" : "=d"(tmp), "=d"(tmp2));
return tmp | ((cycle_t)tmp2 << 32);
}
unsigned long long sched_clock(void)
{
return cycles_2_ns(read_cycles());
}
static struct clocksource clocksource_bfin = {
.name = "bfin_cycles",
.rating = 350,
.read = read_cycles,
.mask = CLOCKSOURCE_MASK(64),
.shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init bfin_clocksource_init(void)
{
set_cyc2ns_scale(get_cclk() / 1000);
clocksource_bfin.mult = clocksource_hz2mult(get_cclk(), clocksource_bfin.shift);
if (clocksource_register(&clocksource_bfin))
panic("failed to register clocksource");
return 0;
}
#else
# define bfin_clocksource_init()
#endif
static int bfin_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
bfin_write_TCOUNT(cycles);
CSYNC();
return 0;
}
static void bfin_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC: {
unsigned long tcount = ((get_cclk() / (HZ * 1)) - 1);
bfin_write_TCNTL(TMPWR);
CSYNC();
bfin_write_TPERIOD(tcount);
bfin_write_TCOUNT(tcount);
bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
CSYNC();
break;
}
case CLOCK_EVT_MODE_ONESHOT:
bfin_write_TCOUNT(0);
bfin_write_TCNTL(TMPWR | TMREN);
CSYNC();
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
bfin_write_TCNTL(0);
CSYNC();
break;
case CLOCK_EVT_MODE_RESUME:
break;
}
}
static void __init bfin_timer_init(void)
{
/* power up the timer, but don't enable it just yet */
bfin_write_TCNTL(TMPWR);
CSYNC();
/*
* the TSCALE prescaler counter.
*/
bfin_write_TSCALE(0);
bfin_write_TPERIOD(0);
bfin_write_TCOUNT(0);
/* now enable the timer */
CSYNC();
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
irqreturn_t timer_interrupt(int irq, void *dev_id);
static struct clock_event_device clockevent_bfin = {
.name = "bfin_core_timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
.cpumask = CPU_MASK_CPU0,
.set_next_event = bfin_timer_set_next_event,
.set_mode = bfin_timer_set_mode,
};
static struct irqaction bfin_timer_irq = {
.name = "Blackfin Core Timer",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = timer_interrupt,
.dev_id = &clockevent_bfin,
};
irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static int __init bfin_clockevent_init(void)
{
setup_irq(IRQ_CORETMR, &bfin_timer_irq);
bfin_timer_init();
clockevent_bfin.mult = div_sc(get_cclk(), NSEC_PER_SEC, clockevent_bfin.shift);
clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin);
clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin);
clockevents_register_device(&clockevent_bfin);
return 0;
}
void __init time_init(void)
{
time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
#ifdef CONFIG_RTC_DRV_BFIN
/* [#2663] hack to filter junk RTC values that would cause
* userspace to have to deal with time values greater than
* 2^31 seconds (which uClibc cannot cope with yet)
*/
if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
bfin_write_RTC_STAT(0);
}
#endif
/* Initialize xtime. From now on, xtime is updated with timer interrupts */
xtime.tv_sec = secs_since_1970;
xtime.tv_nsec = 0;
set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
bfin_clocksource_init();
bfin_clockevent_init();
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册