提交 55392c4c 编写于 作者: L Linus Torvalds

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
 "This update provides the following changes:

   - The rework of the timer wheel which addresses the shortcomings of
     the current wheel (cascading, slow search for next expiring timer,
     etc).  That's the first major change of the wheel in almost 20
     years since Finn implemted it.

   - A large overhaul of the clocksource drivers init functions to
     consolidate the Device Tree initialization

   - Some more Y2038 updates

   - A capability fix for timerfd

   - Yet another clock chip driver

   - The usual pile of updates, comment improvements all over the place"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (130 commits)
  tick/nohz: Optimize nohz idle enter
  clockevents: Make clockevents_subsys static
  clocksource/drivers/time-armada-370-xp: Fix return value check
  timers: Implement optimization for same expiry time in mod_timer()
  timers: Split out index calculation
  timers: Only wake softirq if necessary
  timers: Forward the wheel clock whenever possible
  timers/nohz: Remove pointless tick_nohz_kick_tick() function
  timers: Optimize collect_expired_timers() for NOHZ
  timers: Move __run_timers() function
  timers: Remove set_timer_slack() leftovers
  timers: Switch to a non-cascading wheel
  timers: Reduce the CPU index space to 256k
  timers: Give a few structs and members proper names
  hlist: Add hlist_is_singular_node() helper
  signals: Use hrtimer for sigtimedwait()
  timers: Remove the deprecated mod_timer_pinned() API
  timers, net/ipv4/inet: Initialize connection request timers as pinned
  timers, drivers/tty/mips_ejtag: Initialize the poll timer as pinned
  timers, drivers/tty/metag_da: Initialize the poll timer as pinned
  ...
Oxford Semiconductor OXNAS SoCs Family RPS Timer
================================================
Required properties:
- compatible: Should be "oxsemi,ox810se-rps-timer"
- reg : Specifies base physical address and size of the registers.
- interrupts : The interrupts of the two timers
- clocks : The phandle of the timer clock source
example:
timer0: timer@200 {
compatible = "oxsemi,ox810se-rps-timer";
reg = <0x200 0x40>;
clocks = <&rpsclk>;
interrupts = <4 5>;
};
Rockchip rk3288 timer
Rockchip rk timer
Required properties:
- compatible: shall be "rockchip,rk3288-timer"
- compatible: shall be one of:
"rockchip,rk3288-timer" - for rk3066, rk3036, rk3188, rk322x, rk3288, rk3368
"rockchip,rk3399-timer" - for rk3399
- reg: base address of the timer register starting with TIMERS CONTROL register
- interrupts: should contain the interrupts for Timer0
- clocks : must contain an entry for each entry in clock-names
......
......@@ -687,6 +687,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
[SPARC64] tick
[X86-64] hpet,tsc
clocksource.arm_arch_timer.evtstrm=
[ARM,ARM64]
Format: <bool>
Enable/disable the eventstream feature of the ARM
architected timer so that code using WFE-based polling
loops can be debugged more effectively on production
systems.
clearcpuid=BITNUM [X86]
Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeatures.h for the valid bit
......
......@@ -116,19 +116,19 @@ static struct clocksource arc_counter_gfrc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init arc_cs_setup_gfrc(struct device_node *node)
static int __init arc_cs_setup_gfrc(struct device_node *node)
{
int exists = cpuinfo_arc700[0].extn.gfrc;
int ret;
if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
return;
return -ENXIO;
ret = arc_get_timer_clk(node);
if (ret)
return;
return ret;
clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
}
CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
......@@ -172,25 +172,25 @@ static struct clocksource arc_counter_rtc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init arc_cs_setup_rtc(struct device_node *node)
static int __init arc_cs_setup_rtc(struct device_node *node)
{
int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
int ret;
if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
return;
return -ENXIO;
/* Local to CPU hence not usable in SMP */
if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
return;
return -EINVAL;
ret = arc_get_timer_clk(node);
if (ret)
return;
return ret;
write_aux_reg(AUX_RTC_CTRL, 1);
clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
}
CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
......@@ -213,23 +213,23 @@ static struct clocksource arc_counter_timer1 = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init arc_cs_setup_timer1(struct device_node *node)
static int __init arc_cs_setup_timer1(struct device_node *node)
{
int ret;
/* Local to CPU hence not usable in SMP */
if (IS_ENABLED(CONFIG_SMP))
return;
return -EINVAL;
ret = arc_get_timer_clk(node);
if (ret)
return;
return ret;
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
}
/********** Clock Event Device *********/
......@@ -324,20 +324,28 @@ static struct notifier_block arc_timer_cpu_nb = {
/*
* clockevent setup for boot CPU
*/
static void __init arc_clockevent_setup(struct device_node *node)
static int __init arc_clockevent_setup(struct device_node *node)
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int ret;
register_cpu_notifier(&arc_timer_cpu_nb);
ret = register_cpu_notifier(&arc_timer_cpu_nb);
if (ret) {
pr_err("Failed to register cpu notifier");
return ret;
}
arc_timer_irq = irq_of_parse_and_map(node, 0);
if (arc_timer_irq <= 0)
panic("clockevent: missing irq");
if (arc_timer_irq <= 0) {
pr_err("clockevent: missing irq");
return -EINVAL;
}
ret = arc_get_timer_clk(node);
if (ret)
panic("clockevent: missing clk");
if (ret) {
pr_err("clockevent: missing clk");
return ret;
}
evt->irq = arc_timer_irq;
evt->cpumask = cpumask_of(smp_processor_id());
......@@ -347,22 +355,29 @@ static void __init arc_clockevent_setup(struct device_node *node)
/* Needs apriori irq_set_percpu_devid() done in intc map function */
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
"Timer0 (per-cpu-tick)", evt);
if (ret)
panic("clockevent: unable to request irq\n");
if (ret) {
pr_err("clockevent: unable to request irq\n");
return ret;
}
enable_percpu_irq(arc_timer_irq, 0);
return 0;
}
static void __init arc_of_timer_init(struct device_node *np)
static int __init arc_of_timer_init(struct device_node *np)
{
static int init_count = 0;
int ret;
if (!init_count) {
init_count = 1;
arc_clockevent_setup(np);
ret = arc_clockevent_setup(np);
} else {
arc_cs_setup_timer1(np);
ret = arc_cs_setup_timer1(np);
}
return ret;
}
CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
......
......@@ -358,10 +358,10 @@ config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x/EP731x-based"
select ARCH_REQUIRE_GPIOLIB
select AUTO_ZRELADDR
select CLKSRC_MMIO
select COMMON_CLK
select CPU_ARM720T
select GENERIC_CLOCKEVENTS
select CLPS711X_TIMER
select MFD_SYSCON
select SOC_BUS
help
......
......@@ -390,7 +390,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
}
#ifdef CONFIG_OF
static void __init twd_local_timer_of_register(struct device_node *np)
static int __init twd_local_timer_of_register(struct device_node *np)
{
int err;
......@@ -410,6 +410,7 @@ static void __init twd_local_timer_of_register(struct device_node *np)
out:
WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
return err;
}
CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
......
......@@ -89,6 +89,7 @@ config ARCH_BCM_MOBILE
select HAVE_ARM_ARCH_TIMER
select PINCTRL
select ARCH_BCM_MOBILE_SMP if SMP
select BCM_KONA_TIMER
help
This enables support for systems based on Broadcom mobile SoCs.
......@@ -143,6 +144,7 @@ config ARCH_BCM2835
select ARM_TIMER_SP804
select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
select CLKSRC_OF
select BCM2835_TIMER
select PINCTRL
select PINCTRL_BCM2835
help
......
......@@ -20,7 +20,7 @@ if ARCH_INTEGRATOR
config ARCH_INTEGRATOR_AP
bool "Support Integrator/AP and Integrator/PP2 platforms"
select CLKSRC_MMIO
select INTEGRATOR_AP_TIMER
select MIGHT_HAVE_PCI
select SERIAL_AMBA_PL010 if TTY
select SERIAL_AMBA_PL010_CONSOLE if TTY
......
......@@ -4,7 +4,7 @@ config ARCH_KEYSTONE
depends on ARM_PATCH_PHYS_VIRT
select ARM_GIC
select HAVE_ARM_ARCH_TIMER
select CLKSRC_MMIO
select KEYSTONE_TIMER
select ARM_ERRATA_798181 if SMP
select COMMON_CLK_KEYSTONE
select ARCH_SUPPORTS_BIG_ENDIAN
......
......@@ -3,7 +3,7 @@ menuconfig ARCH_MOXART
depends on ARCH_MULTI_V4
select CPU_FA526
select ARM_DMA_MEM_BUFFERABLE
select CLKSRC_MMIO
select MOXART_TIMER
select GENERIC_IRQ_CHIP
select ARCH_REQUIRE_GPIOLIB
select PHYLIB if NETDEVICES
......
......@@ -16,7 +16,7 @@ config ARCH_MXS
bool "Freescale MXS (i.MX23, i.MX28) support"
depends on ARCH_MULTI_V5
select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select MXS_TIMER
select PINCTRL
select SOC_BUS
select SOC_IMX23
......
......@@ -7,5 +7,6 @@ config ARCH_NSPIRE
select ARM_AMBA
select ARM_VIC
select ARM_TIMER_SP804
select NSPIRE_TIMER
help
This enables support for systems using the TI-NSPIRE CPU
......@@ -28,6 +28,7 @@ config ARCH_ATLAS7
default y
select ARM_GIC
select CPU_V7
select ATLAS7_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_SMP
help
......@@ -38,6 +39,7 @@ config ARCH_PRIMA2
default y
select SIRF_IRQ
select ZONE_DMA
select PRIMA2_TIMER
help
Support for CSR SiRFSoC ARM Cortex A9 Platform
......
......@@ -4,7 +4,7 @@ menuconfig ARCH_U300
select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_VIC
select CLKSRC_MMIO
select U300_TIMER
select CPU_ARM926T
select HAVE_TCM
select PINCTRL
......
......@@ -492,6 +492,14 @@
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
};
rktimer: rktimer@ff850000 {
compatible = "rockchip,rk3399-timer";
reg = <0x0 0xff850000 0x0 0x1000>;
interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>;
clock-names = "pclk", "timer";
};
spdif: spdif@ff870000 {
compatible = "rockchip,rk3399-spdif";
reg = <0x0 0xff870000 0x0 0x1000>;
......
......@@ -170,7 +170,7 @@ static struct irqaction timer_irqaction = {
.dev_id = &clockevent_xilinx_timer,
};
static __init void xilinx_clockevent_init(void)
static __init int xilinx_clockevent_init(void)
{
clockevent_xilinx_timer.mult =
div_sc(timer_clock_freq, NSEC_PER_SEC,
......@@ -181,6 +181,8 @@ static __init void xilinx_clockevent_init(void)
clockevent_delta2ns(1, &clockevent_xilinx_timer);
clockevent_xilinx_timer.cpumask = cpumask_of(0);
clockevents_register_device(&clockevent_xilinx_timer);
return 0;
}
static u64 xilinx_clock_read(void)
......@@ -229,8 +231,14 @@ static struct clocksource clocksource_microblaze = {
static int __init xilinx_clocksource_init(void)
{
if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq))
panic("failed to register clocksource");
int ret;
ret = clocksource_register_hz(&clocksource_microblaze,
timer_clock_freq);
if (ret) {
pr_err("failed to register clocksource");
return ret;
}
/* stop timer1 */
write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT,
......@@ -239,16 +247,16 @@ static int __init xilinx_clocksource_init(void)
write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1);
/* register timecounter - for ftrace support */
init_xilinx_timecounter();
return 0;
return init_xilinx_timecounter();
}
static void __init xilinx_timer_init(struct device_node *timer)
static int __init xilinx_timer_init(struct device_node *timer)
{
struct clk *clk;
static int initialized;
u32 irq;
u32 timer_num = 1;
int ret;
if (initialized)
return;
......@@ -258,7 +266,7 @@ static void __init xilinx_timer_init(struct device_node *timer)
timer_baseaddr = of_iomap(timer, 0);
if (!timer_baseaddr) {
pr_err("ERROR: invalid timer base address\n");
BUG();
return -ENXIO;
}
write_fn = timer_write32;
......@@ -271,11 +279,15 @@ static void __init xilinx_timer_init(struct device_node *timer)
}
irq = irq_of_parse_and_map(timer, 0);
if (irq <= 0) {
pr_err("Failed to parse and map irq");
return -EINVAL;
}
of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
if (timer_num) {
pr_emerg("Please enable two timers in HW\n");
BUG();
pr_err("Please enable two timers in HW\n");
return -EINVAL;
}
pr_info("%s: irq=%d\n", timer->full_name, irq);
......@@ -297,14 +309,27 @@ static void __init xilinx_timer_init(struct device_node *timer)
freq_div_hz = timer_clock_freq / HZ;
setup_irq(irq, &timer_irqaction);
ret = setup_irq(irq, &timer_irqaction);
if (ret) {
pr_err("Failed to setup IRQ");
return ret;
}
#ifdef CONFIG_HEART_BEAT
microblaze_setup_heartbeat();
#endif
xilinx_clocksource_init();
xilinx_clockevent_init();
ret = xilinx_clocksource_init();
if (ret)
return ret;
ret = xilinx_clockevent_init();
if (ret)
return ret;
sched_clock_register(xilinx_clock_read, 32, timer_clock_freq);
return 0;
}
CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
......
......@@ -117,11 +117,13 @@ static int systick_set_oneshot(struct clock_event_device *evt)
return 0;
}
static void __init ralink_systick_init(struct device_node *np)
static int __init ralink_systick_init(struct device_node *np)
{
int ret;
systick.membase = of_iomap(np, 0);
if (!systick.membase)
return;
return -ENXIO;
systick_irqaction.name = np->name;
systick.dev.name = np->name;
......@@ -131,16 +133,21 @@ static void __init ralink_systick_init(struct device_node *np)
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%s: request_irq failed", np->name);
return;
return -EINVAL;
}
clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
SYSTICK_FREQ, 301, 16, clocksource_mmio_readl_up);
ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
SYSTICK_FREQ, 301, 16,
clocksource_mmio_readl_up);
if (ret)
return ret;
clockevents_register_device(&systick.dev);
pr_info("%s: running - mult: %d, shift: %d\n",
np->name, systick.dev.mult, systick.dev.shift);
return 0;
}
CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
......@@ -206,15 +206,21 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static void __init nios2_timer_get_base_and_freq(struct device_node *np,
static int __init nios2_timer_get_base_and_freq(struct device_node *np,
void __iomem **base, u32 *freq)
{
*base = of_iomap(np, 0);
if (!*base)
panic("Unable to map reg for %s\n", np->name);
if (!*base) {
pr_crit("Unable to map reg for %s\n", np->name);
return -ENXIO;
}
if (of_property_read_u32(np, "clock-frequency", freq)) {
pr_crit("Unable to get %s clock frequency\n", np->name);
return -EINVAL;
}
if (of_property_read_u32(np, "clock-frequency", freq))
panic("Unable to get %s clock frequency\n", np->name);
return 0;
}
static struct nios2_clockevent_dev nios2_ce = {
......@@ -231,17 +237,21 @@ static struct nios2_clockevent_dev nios2_ce = {
},
};
static __init void nios2_clockevent_init(struct device_node *timer)
static __init int nios2_clockevent_init(struct device_node *timer)
{
void __iomem *iobase;
u32 freq;
int irq;
int irq, ret;
nios2_timer_get_base_and_freq(timer, &iobase, &freq);
ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
if (ret)
return ret;
irq = irq_of_parse_and_map(timer, 0);
if (!irq)
panic("Unable to parse timer irq\n");
if (!irq) {
pr_crit("Unable to parse timer irq\n");
return -EINVAL;
}
nios2_ce.timer.base = iobase;
nios2_ce.timer.freq = freq;
......@@ -253,25 +263,35 @@ static __init void nios2_clockevent_init(struct device_node *timer)
/* clear pending interrupt */
timer_writew(&nios2_ce.timer, 0, ALTERA_TIMER_STATUS_REG);
if (request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
&nios2_ce.ced))
panic("Unable to setup timer irq\n");
ret = request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
&nios2_ce.ced);
if (ret) {
pr_crit("Unable to setup timer irq\n");
return ret;
}
clockevents_config_and_register(&nios2_ce.ced, freq, 1, ULONG_MAX);
return 0;
}
static __init void nios2_clocksource_init(struct device_node *timer)
static __init int nios2_clocksource_init(struct device_node *timer)
{
unsigned int ctrl;
void __iomem *iobase;
u32 freq;
int ret;
nios2_timer_get_base_and_freq(timer, &iobase, &freq);
ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
if (ret)
return ret;
nios2_cs.timer.base = iobase;
nios2_cs.timer.freq = freq;
clocksource_register_hz(&nios2_cs.cs, freq);
ret = clocksource_register_hz(&nios2_cs.cs, freq);
if (ret)
return ret;
timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODL_REG);
timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODH_REG);
......@@ -282,6 +302,8 @@ static __init void nios2_clocksource_init(struct device_node *timer)
/* Calibrate the delay loop directly */
lpj_fine = freq / HZ;
return 0;
}
/*
......@@ -289,22 +311,25 @@ static __init void nios2_clocksource_init(struct device_node *timer)
* more instances, the second one gets used as clocksource and all
* others are unused.
*/
static void __init nios2_time_init(struct device_node *timer)
static int __init nios2_time_init(struct device_node *timer)
{
static int num_called;
int ret;
switch (num_called) {
case 0:
nios2_clockevent_init(timer);
ret = nios2_clockevent_init(timer);
break;
case 1:
nios2_clocksource_init(timer);
ret = nios2_clocksource_init(timer);
break;
default:
break;
}
num_called++;
return ret;
}
void read_persistent_clock(struct timespec *ts)
......
......@@ -918,7 +918,7 @@ static void uv_heartbeat(unsigned long ignored)
uv_set_scir_bits(bits);
/* enable next timer period */
mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
}
static void uv_heartbeat_enable(int cpu)
......@@ -927,7 +927,7 @@ static void uv_heartbeat_enable(int cpu)
struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
setup_timer(timer, uv_heartbeat, cpu);
setup_pinned_timer(timer, uv_heartbeat, cpu);
timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
add_timer_on(timer, cpu);
uv_cpu_scir_info(cpu)->enabled = 1;
......
......@@ -1309,7 +1309,7 @@ static void __restart_timer(struct timer_list *t, unsigned long interval)
if (timer_pending(t)) {
if (time_before(when, t->expires))
mod_timer_pinned(t, when);
mod_timer(t, when);
} else {
t->expires = round_jiffies(when);
add_timer_on(t, smp_processor_id());
......@@ -1735,7 +1735,7 @@ static void __mcheck_cpu_init_timer(void)
struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned int cpu = smp_processor_id();
setup_timer(t, mce_timer_fn, cpu);
setup_pinned_timer(t, mce_timer_fn, cpu);
mce_start_timer(cpu, t);
}
......
......@@ -1523,12 +1523,7 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
if (--ev->block)
goto out_unlock;
/*
* Not exactly a latency critical operation, set poll timer
* slack to 25% and kick event check.
*/
intv = disk_events_poll_jiffies(disk);
set_timer_slack(&ev->dwork.timer, intv / 4);
if (check_now)
queue_delayed_work(system_freezable_power_efficient_wq,
&ev->dwork, 0);
......
......@@ -27,6 +27,20 @@ config CLKBLD_I8253
config CLKSRC_MMIO
bool
config BCM2835_TIMER
bool "BCM2835 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables the support for the BCM2835 timer driver.
config BCM_KONA_TIMER
bool "BCM mobile timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables the support for the BCM Kona mobile timer driver.
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
......@@ -141,6 +155,72 @@ config CLKSRC_DBX500_PRCMU
help
Use the always on PRCMU Timer as clocksource
config CLPS711X_TIMER
bool "Cirrus logic timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Cirrus Logic PS711 timer.
config ATLAS7_TIMER
bool "Atlas7 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Atlas7 timer.
config MOXART_TIMER
bool "Moxart timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Moxart timer.
config MXS_TIMER
bool "Mxs timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
select STMP_DEVICE
help
Enables support for the Mxs timer.
config PRIMA2_TIMER
bool "Prima2 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Prima2 timer.
config U300_TIMER
bool "U300 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on ARM
select CLKSRC_MMIO
help
Enables support for the U300 timer.
config NSPIRE_TIMER
bool "NSpire timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Nspire timer.
config KEYSTONE_TIMER
bool "Keystone timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on ARM || ARM64
select CLKSRC_MMIO
help
Enables support for the Keystone timer.
config INTEGRATOR_AP_TIMER
bool "Integrator-ap timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Integrator-ap timer.
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
bool "Clocksource PRCMU Timer sched_clock"
depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK)
......@@ -208,14 +288,16 @@ config ARM_ARCH_TIMER
select CLKSRC_ACPI if ACPI
config ARM_ARCH_TIMER_EVTSTREAM
bool "Support for ARM architected timer event stream generation"
bool "Enable ARM architected timer event stream generation by default"
default y if ARM_ARCH_TIMER
depends on ARM_ARCH_TIMER
help
This option enables support for event stream generation based on
the ARM architected timer. It is used for waking up CPUs executing
the wfe instruction at a frequency represented as a power-of-2
divisor of the clock rate.
This option enables support by default for event stream generation
based on the ARM architected timer. It is used for waking up CPUs
executing the wfe instruction at a frequency represented as a
power-of-2 divisor of the clock rate. The behaviour can also be
overridden on the command line using the
clocksource.arm_arch_timer.evtstream parameter.
The main use of the event stream is wfe-based timeouts of userspace
locking implementations. It might also be useful for imposing timeout
on wfe to safeguard against any programming errors in case an expected
......@@ -224,8 +306,9 @@ config ARM_ARCH_TIMER_EVTSTREAM
hardware anomalies of missing events.
config ARM_GLOBAL_TIMER
bool
bool "Support for the ARM global timer" if COMPILE_TEST
select CLKSRC_OF if OF
depends on ARM
help
This options enables support for the ARM global timer unit
......@@ -243,7 +326,7 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
Use ARM global timer clock source as sched_clock
config ARMV7M_SYSTICK
bool
bool "Support for the ARMv7M system time" if COMPILE_TEST
select CLKSRC_OF if OF
select CLKSRC_MMIO
help
......@@ -254,9 +337,12 @@ config ATMEL_PIT
def_bool SOC_AT91SAM9 || SOC_SAMA5
config ATMEL_ST
bool
bool "Atmel ST timer support" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_OF
select MFD_SYSCON
help
Support for the Atmel ST timer.
config CLKSRC_METAG_GENERIC
def_bool y if METAG
......@@ -270,7 +356,7 @@ config CLKSRC_EXYNOS_MCT
Support for Multi Core Timer controller on Exynos SoCs.
config CLKSRC_SAMSUNG_PWM
bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
......@@ -293,6 +379,14 @@ config VF_PIT_TIMER
help
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
config OXNAS_RPS_TIMER
bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
select CLKSRC_OF
select CLKSRC_MMIO
help
This enables support for the Oxford Semiconductor OXNAS RPS timers.
config SYS_SUPPORTS_SH_CMT
bool
......@@ -361,8 +455,8 @@ config CLKSRC_QCOM
Qualcomm SoCs.
config CLKSRC_VERSATILE
bool "ARM Versatile (Express) reference platforms clock source"
depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
select CLKSRC_OF
default y if MFD_VEXPRESS_SYSREG
help
......
......@@ -19,21 +19,21 @@ obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ORION_TIMER) += time-orion.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o
obj-$(CONFIG_ARCH_ATLAS7) += timer-atlas7.o
obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
obj-$(CONFIG_MOXART_TIMER) += moxart_timer.o
obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
obj-$(CONFIG_ARCH_U300) += timer-u300.o
obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o
obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o
obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
......@@ -48,6 +48,7 @@ obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
......@@ -55,8 +56,8 @@ obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
......
......@@ -79,6 +79,14 @@ static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
static int __init early_evtstrm_cfg(char *buf)
{
return strtobool(buf, &evtstrm_enable);
}
early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
/*
* Architected system timer support.
*/
......@@ -372,7 +380,7 @@ static int arch_timer_setup(struct clock_event_device *clk)
enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
arch_counter_set_user_access();
if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
if (evtstrm_enable)
arch_timer_configure_evtstream();
return 0;
......@@ -693,25 +701,26 @@ arch_timer_needs_probing(int type, const struct of_device_id *matches)
return needs_probing;
}
static void __init arch_timer_common_init(void)
static int __init arch_timer_common_init(void)
{
unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
/* Wait until both nodes are probed if we have two timers */
if ((arch_timers_present & mask) != mask) {
if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
return;
return 0;
if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
return;
return 0;
}
arch_timer_banner(arch_timers_present);
arch_counter_register(arch_timers_present);
arch_timer_arch_init();
return arch_timer_arch_init();
}
static void __init arch_timer_init(void)
static int __init arch_timer_init(void)
{
int ret;
/*
* If HYP mode is available, we know that the physical timer
* has been configured to be accessible from PL1. Use it, so
......@@ -739,23 +748,30 @@ static void __init arch_timer_init(void)
if (!has_ppi) {
pr_warn("arch_timer: No interrupt available, giving up\n");
return;
return -EINVAL;
}
}
arch_timer_register();
arch_timer_common_init();
ret = arch_timer_register();
if (ret)
return ret;
ret = arch_timer_common_init();
if (ret)
return ret;
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
return 0;
}
static void __init arch_timer_of_init(struct device_node *np)
static int __init arch_timer_of_init(struct device_node *np)
{
int i;
if (arch_timers_present & ARCH_CP15_TIMER) {
pr_warn("arch_timer: multiple nodes in dt, skipping\n");
return;
return 0;
}
arch_timers_present |= ARCH_CP15_TIMER;
......@@ -774,23 +790,23 @@ static void __init arch_timer_of_init(struct device_node *np)
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
arch_timer_uses_ppi = PHYS_SECURE_PPI;
arch_timer_init();
return arch_timer_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
static void __init arch_timer_mem_init(struct device_node *np)
static int __init arch_timer_mem_init(struct device_node *np)
{
struct device_node *frame, *best_frame = NULL;
void __iomem *cntctlbase, *base;
unsigned int irq;
unsigned int irq, ret = -EINVAL;
u32 cnttidr;
arch_timers_present |= ARCH_MEM_TIMER;
cntctlbase = of_iomap(np, 0);
if (!cntctlbase) {
pr_err("arch_timer: Can't find CNTCTLBase\n");
return;
return -ENXIO;
}
cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
......@@ -830,6 +846,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
best_frame = of_node_get(frame);
}
ret= -ENXIO;
base = arch_counter_base = of_iomap(best_frame, 0);
if (!base) {
pr_err("arch_timer: Can't map frame's registers\n");
......@@ -841,6 +858,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
else
irq = irq_of_parse_and_map(best_frame, 0);
ret = -EINVAL;
if (!irq) {
pr_err("arch_timer: Frame missing %s irq",
arch_timer_mem_use_virtual ? "virt" : "phys");
......@@ -848,11 +866,15 @@ static void __init arch_timer_mem_init(struct device_node *np)
}
arch_timer_detect_rate(base, np);
arch_timer_mem_register(base, irq);
arch_timer_common_init();
ret = arch_timer_mem_register(base, irq);
if (ret)
goto out;
return arch_timer_common_init();
out:
iounmap(cntctlbase);
of_node_put(best_frame);
return ret;
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init);
......
......@@ -238,7 +238,7 @@ static void __init gt_delay_timer_init(void)
register_current_timer_delay(&gt_delay_timer);
}
static void __init gt_clocksource_init(void)
static int __init gt_clocksource_init(void)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
......@@ -249,7 +249,7 @@ static void __init gt_clocksource_init(void)
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
#endif
clocksource_register_hz(&gt_clocksource, gt_clk_rate);
return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
}
static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
......@@ -270,7 +270,7 @@ static struct notifier_block gt_cpu_nb = {
.notifier_call = gt_cpu_notify,
};
static void __init global_timer_of_register(struct device_node *np)
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
int err = 0;
......@@ -283,19 +283,19 @@ static void __init global_timer_of_register(struct device_node *np)
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
&& (read_cpuid_id() & 0xf0000f) < 0x200000) {
pr_warn("global-timer: non support for this cpu version.\n");
return;
return -ENOSYS;
}
gt_ppi = irq_of_parse_and_map(np, 0);
if (!gt_ppi) {
pr_warn("global-timer: unable to parse irq\n");
return;
return -EINVAL;
}
gt_base = of_iomap(np, 0);
if (!gt_base) {
pr_warn("global-timer: invalid base address\n");
return;
return -ENXIO;
}
gt_clk = of_clk_get(np, 0);
......@@ -332,11 +332,17 @@ static void __init global_timer_of_register(struct device_node *np)
}
/* Immediately configure the timer on the boot CPU */
gt_clocksource_init();
gt_clockevents_init(this_cpu_ptr(gt_evt));
err = gt_clocksource_init();
if (err)
goto out_irq;
err = gt_clockevents_init(this_cpu_ptr(gt_evt));
if (err)
goto out_irq;
gt_delay_timer_init();
return;
return 0;
out_irq:
free_percpu_irq(gt_ppi, gt_evt);
......@@ -347,6 +353,8 @@ static void __init global_timer_of_register(struct device_node *np)
out_unmap:
iounmap(gt_base);
WARN(err, "ARM Global timer register failed (%d)\n", err);
return err;
}
/* Only tested on r2p2 and r3p0 */
......
......@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk.h>
......@@ -21,7 +22,7 @@
#define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF
static void __init system_timer_of_register(struct device_node *np)
static int __init system_timer_of_register(struct device_node *np)
{
struct clk *clk = NULL;
void __iomem *base;
......@@ -31,22 +32,26 @@ static void __init system_timer_of_register(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
pr_warn("system-timer: invalid base address\n");
return;
return -ENXIO;
}
ret = of_property_read_u32(np, "clock-frequency", &rate);
if (ret) {
clk = of_clk_get(np, 0);
if (IS_ERR(clk))
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto out_unmap;
}
ret = clk_prepare_enable(clk);
if (ret)
goto out_clk_put;
rate = clk_get_rate(clk);
if (!rate)
if (!rate) {
ret = -EINVAL;
goto out_clk_disable;
}
}
writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR);
......@@ -64,7 +69,7 @@ static void __init system_timer_of_register(struct device_node *np)
pr_info("ARM System timer initialized as clocksource\n");
return;
return 0;
out_clk_disable:
clk_disable_unprepare(clk);
......@@ -73,6 +78,8 @@ static void __init system_timer_of_register(struct device_node *np)
out_unmap:
iounmap(base);
pr_warn("ARM System timer register failed (%d)\n", ret);
return ret;
}
CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick",
......
......@@ -184,7 +184,7 @@ static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
* Timer initialization
* ---------------------------------------------------------------------------
*/
static void __init asm9260_timer_init(struct device_node *np)
static int __init asm9260_timer_init(struct device_node *np)
{
int irq;
struct clk *clk;
......@@ -192,20 +192,26 @@ static void __init asm9260_timer_init(struct device_node *np)
unsigned long rate;
priv.base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(priv.base))
panic("%s: unable to map resource", np->name);
if (IS_ERR(priv.base)) {
pr_err("%s: unable to map resource", np->name);
return PTR_ERR(priv.base);
}
clk = of_clk_get(np, 0);
ret = clk_prepare_enable(clk);
if (ret)
panic("Failed to enable clk!\n");
if (ret) {
pr_err("Failed to enable clk!\n");
return ret;
}
irq = irq_of_parse_and_map(np, 0);
ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER,
DRIVER_NAME, &event_dev);
if (ret)
panic("Failed to setup irq!\n");
if (ret) {
pr_err("Failed to setup irq!\n");
return ret;
}
/* set all timers for count-up */
writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR);
......@@ -229,6 +235,8 @@ static void __init asm9260_timer_init(struct device_node *np)
priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
event_dev.cpumask = cpumask_of(0);
clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe);
return 0;
}
CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
asm9260_timer_init);
......@@ -80,19 +80,24 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
}
}
static void __init bcm2835_timer_init(struct device_node *node)
static int __init bcm2835_timer_init(struct device_node *node)
{
void __iomem *base;
u32 freq;
int irq;
int irq, ret;
struct bcm2835_timer *timer;
base = of_iomap(node, 0);
if (!base)
panic("Can't remap registers");
if (!base) {
pr_err("Can't remap registers");
return -ENXIO;
}
if (of_property_read_u32(node, "clock-frequency", &freq))
panic("Can't read clock-frequency");
ret = of_property_read_u32(node, "clock-frequency", &freq);
if (ret) {
pr_err("Can't read clock-frequency");
return ret;
}
system_clock = base + REG_COUNTER_LO;
sched_clock_register(bcm2835_sched_read, 32, freq);
......@@ -101,12 +106,16 @@ static void __init bcm2835_timer_init(struct device_node *node)
freq, 300, 32, clocksource_mmio_readl_up);
irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
if (irq <= 0)
panic("Can't parse IRQ");
if (irq <= 0) {
pr_err("Can't parse IRQ");
return -EINVAL;
}
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
panic("Can't allocate timer struct\n");
if (!timer) {
pr_err("Can't allocate timer struct\n");
return -ENOMEM;
}
timer->control = base + REG_CONTROL;
timer->compare = base + REG_COMPARE(DEFAULT_TIMER);
......@@ -121,12 +130,17 @@ static void __init bcm2835_timer_init(struct device_node *node)
timer->act.dev_id = timer;
timer->act.handler = bcm2835_time_interrupt;
if (setup_irq(irq, &timer->act))
panic("Can't set up timer IRQ\n");
ret = setup_irq(irq, &timer->act);
if (ret) {
pr_err("Can't set up timer IRQ\n");
return ret;
}
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
pr_info("bcm2835: system timer (irq = %d)\n", irq);
return 0;
}
CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
bcm2835_timer_init);
......@@ -20,7 +20,6 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mach/time.h>
#include <linux/of.h>
#include <linux/of_address.h>
......@@ -163,16 +162,11 @@ static struct irqaction kona_timer_irq = {
.handler = kona_timer_interrupt,
};
static void __init kona_timer_init(struct device_node *node)
static int __init kona_timer_init(struct device_node *node)
{
u32 freq;
struct clk *external_clk;
if (!of_device_is_available(node)) {
pr_info("Kona Timer v1 marked as disabled in device tree\n");
return;
}
external_clk = of_clk_get_by_name(node, NULL);
if (!IS_ERR(external_clk)) {
......@@ -182,7 +176,7 @@ static void __init kona_timer_init(struct device_node *node)
arch_timer_rate = freq;
} else {
pr_err("Kona Timer v1 unable to determine clock-frequency");
return;
return -EINVAL;
}
/* Setup IRQ numbers */
......@@ -196,6 +190,8 @@ static void __init kona_timer_init(struct device_node *node)
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
return 0;
}
CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
......
......@@ -322,22 +322,22 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
return NOTIFY_DONE;
}
static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
u32 timer_width)
{
struct ttc_timer_clocksource *ttccs;
int err;
ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
if (WARN_ON(!ttccs))
return;
if (!ttccs)
return -ENOMEM;
ttccs->ttc.clk = clk;
err = clk_prepare_enable(ttccs->ttc.clk);
if (WARN_ON(err)) {
if (err) {
kfree(ttccs);
return;
return err;
}
ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
......@@ -345,8 +345,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
ttccs->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clocksource_cb;
ttccs->ttc.clk_rate_change_nb.next = NULL;
if (clk_notifier_register(ttccs->ttc.clk,
&ttccs->ttc.clk_rate_change_nb))
err = clk_notifier_register(ttccs->ttc.clk,
&ttccs->ttc.clk_rate_change_nb);
if (err)
pr_warn("Unable to register clock notifier.\n");
ttccs->ttc.base_addr = base;
......@@ -368,14 +370,16 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
if (WARN_ON(err)) {
if (err) {
kfree(ttccs);
return;
return err;
}
ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
sched_clock_register(ttc_sched_clock_read, timer_width,
ttccs->ttc.freq / PRESCALE);
return 0;
}
static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
......@@ -401,30 +405,35 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
}
}
static void __init ttc_setup_clockevent(struct clk *clk,
void __iomem *base, u32 irq)
static int __init ttc_setup_clockevent(struct clk *clk,
void __iomem *base, u32 irq)
{
struct ttc_timer_clockevent *ttcce;
int err;
ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
if (WARN_ON(!ttcce))
return;
if (!ttcce)
return -ENOMEM;
ttcce->ttc.clk = clk;
err = clk_prepare_enable(ttcce->ttc.clk);
if (WARN_ON(err)) {
if (err) {
kfree(ttcce);
return;
return err;
}
ttcce->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clockevent_cb;
ttcce->ttc.clk_rate_change_nb.next = NULL;
if (clk_notifier_register(ttcce->ttc.clk,
&ttcce->ttc.clk_rate_change_nb))
err = clk_notifier_register(ttcce->ttc.clk,
&ttcce->ttc.clk_rate_change_nb);
if (err) {
pr_warn("Unable to register clock notifier.\n");
return err;
}
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
ttcce->ttc.base_addr = base;
......@@ -451,13 +460,15 @@ static void __init ttc_setup_clockevent(struct clk *clk,
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
if (WARN_ON(err)) {
if (err) {
kfree(ttcce);
return;
return err;
}
clockevents_config_and_register(&ttcce->ce,
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
return 0;
}
/**
......@@ -466,17 +477,17 @@ static void __init ttc_setup_clockevent(struct clk *clk,
* Initializes the timer hardware and register the clock source and clock event
* timers with Linux kernal timer framework
*/
static void __init ttc_timer_init(struct device_node *timer)
static int __init ttc_timer_init(struct device_node *timer)
{
unsigned int irq;
void __iomem *timer_baseaddr;
struct clk *clk_cs, *clk_ce;
static int initialized;
int clksel;
int clksel, ret;
u32 timer_width = 16;
if (initialized)
return;
return 0;
initialized = 1;
......@@ -488,13 +499,13 @@ static void __init ttc_timer_init(struct device_node *timer)
timer_baseaddr = of_iomap(timer, 0);
if (!timer_baseaddr) {
pr_err("ERROR: invalid timer base address\n");
BUG();
return -ENXIO;
}
irq = irq_of_parse_and_map(timer, 1);
if (irq <= 0) {
pr_err("ERROR: invalid interrupt number\n");
BUG();
return -EINVAL;
}
of_property_read_u32(timer, "timer-width", &timer_width);
......@@ -504,7 +515,7 @@ static void __init ttc_timer_init(struct device_node *timer)
clk_cs = of_clk_get(timer, clksel);
if (IS_ERR(clk_cs)) {
pr_err("ERROR: timer input clock not found\n");
BUG();
return PTR_ERR(clk_cs);
}
clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
......@@ -512,13 +523,20 @@ static void __init ttc_timer_init(struct device_node *timer)
clk_ce = of_clk_get(timer, clksel);
if (IS_ERR(clk_ce)) {
pr_err("ERROR: timer input clock not found\n");
BUG();
return PTR_ERR(clk_ce);
}
ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
if (ret)
return ret;
ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
if (ret)
return ret;
pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
return 0;
}
CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
......@@ -64,7 +64,7 @@ static u64 notrace dbx500_prcmu_sched_clock_read(void)
#endif
static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
{
clksrc_dbx500_timer_base = of_iomap(node, 0);
......@@ -84,7 +84,7 @@ static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
#endif
clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}
CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
clksrc_dbx500_prcmu_init);
......@@ -28,15 +28,23 @@ void __init clocksource_probe(void)
{
struct device_node *np;
const struct of_device_id *match;
of_init_fn_1 init_func;
of_init_fn_1_ret init_func_ret;
unsigned clocksources = 0;
int ret;
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
if (!of_device_is_available(np))
continue;
init_func = match->data;
init_func(np);
init_func_ret = match->data;
ret = init_func_ret(np);
if (ret) {
pr_err("Failed to initialize '%s': %d",
of_node_full_name(np), ret);
continue;
}
clocksources++;
}
......
......@@ -92,7 +92,7 @@ static int __init st_clksrc_setup_clk(struct device_node *np)
return 0;
}
static void __init st_clksrc_of_register(struct device_node *np)
static int __init st_clksrc_of_register(struct device_node *np)
{
int ret;
uint32_t mode;
......@@ -100,32 +100,36 @@ static void __init st_clksrc_of_register(struct device_node *np)
ret = of_property_read_u32(np, "st,lpc-mode", &mode);
if (ret) {
pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
return;
return ret;
}
/* LPC can either run as a Clocksource or in RTC or WDT mode */
if (mode != ST_LPC_MODE_CLKSRC)
return;
return 0;
ddata.base = of_iomap(np, 0);
if (!ddata.base) {
pr_err("clksrc-st-lpc: Unable to map iomem\n");
return;
return -ENXIO;
}
if (st_clksrc_setup_clk(np)) {
ret = st_clksrc_setup_clk(np);
if (ret) {
iounmap(ddata.base);
return;
return ret;
}
if (st_clksrc_init()) {
ret = st_clksrc_init();
if (ret) {
clk_disable_unprepare(ddata.clk);
clk_put(ddata.clk);
iounmap(ddata.base);
return;
return ret;
}
pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
clk_get_rate(ddata.clk));
return ret;
}
CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
......@@ -104,7 +104,7 @@ void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
}
#ifdef CONFIG_CLKSRC_OF
static void __init clps711x_timer_init(struct device_node *np)
static int __init clps711x_timer_init(struct device_node *np)
{
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
......@@ -112,13 +112,11 @@ static void __init clps711x_timer_init(struct device_node *np)
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
BUG_ON(_clps711x_clksrc_init(clock, base));
break;
return _clps711x_clksrc_init(clock, base);
case CLPS711X_CLKSRC_CLOCKEVENT:
BUG_ON(_clps711x_clkevt_init(clock, base, irq));
break;
return _clps711x_clkevt_init(clock, base, irq);
default:
break;
return -EINVAL;
}
}
CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
......
......@@ -143,7 +143,7 @@ static struct delay_timer dw_apb_delay_timer = {
#endif
static int num_called;
static void __init dw_apb_timer_init(struct device_node *timer)
static int __init dw_apb_timer_init(struct device_node *timer)
{
switch (num_called) {
case 0:
......@@ -164,6 +164,8 @@ static void __init dw_apb_timer_init(struct device_node *timer)
}
num_called++;
return 0;
}
CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
......
......@@ -232,7 +232,7 @@ static cycles_t exynos4_read_current_timer(void)
return exynos4_read_count_32();
}
static void __init exynos4_clocksource_init(void)
static int __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start();
......@@ -244,6 +244,8 @@ static void __init exynos4_clocksource_init(void)
panic("%s: can't register clocksource\n", mct_frc.name);
sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
return 0;
}
static void exynos4_mct_comp0_stop(void)
......@@ -335,12 +337,14 @@ static struct irqaction mct_comp_event_irq = {
.dev_id = &mct_comp_device,
};
static void exynos4_clockevent_init(void)
static int exynos4_clockevent_init(void)
{
mct_comp_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&mct_comp_device, clk_rate,
0xf, 0xffffffff);
setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
return 0;
}
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
......@@ -516,7 +520,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
.notifier_call = exynos4_mct_cpu_notify,
};
static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
{
int err, cpu;
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
......@@ -572,15 +576,17 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
/* Immediately configure the timer on the boot CPU */
exynos4_local_timer_setup(mevt);
return;
return 0;
out_irq:
free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
return err;
}
static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
u32 nr_irqs, i;
int ret;
mct_int_type = int_type;
......@@ -600,18 +606,24 @@ static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
for (i = MCT_L0_IRQ; i < nr_irqs; i++)
mct_irqs[i] = irq_of_parse_and_map(np, i);
exynos4_timer_resources(np, of_iomap(np, 0));
exynos4_clocksource_init();
exynos4_clockevent_init();
ret = exynos4_timer_resources(np, of_iomap(np, 0));
if (ret)
return ret;
ret = exynos4_clocksource_init();
if (ret)
return ret;
return exynos4_clockevent_init();
}
static void __init mct_init_spi(struct device_node *np)
static int __init mct_init_spi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_SPI);
}
static void __init mct_init_ppi(struct device_node *np)
static int __init mct_init_ppi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_PPI);
}
......
......@@ -316,15 +316,16 @@ static int __init ftm_calc_closest_round_cyc(unsigned long freq)
return 0;
}
static void __init ftm_timer_init(struct device_node *np)
static int __init ftm_timer_init(struct device_node *np)
{
unsigned long freq;
int irq;
int ret, irq;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return;
return -ENOMEM;
ret = -ENXIO;
priv->clkevt_base = of_iomap(np, 0);
if (!priv->clkevt_base) {
pr_err("ftm: unable to map event timer registers\n");
......@@ -337,6 +338,7 @@ static void __init ftm_timer_init(struct device_node *np)
goto err;
}
ret = -EINVAL;
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
......@@ -349,18 +351,22 @@ static void __init ftm_timer_init(struct device_node *np)
if (!freq)
goto err;
if (ftm_calc_closest_round_cyc(freq))
ret = ftm_calc_closest_round_cyc(freq);
if (ret)
goto err;
if (ftm_clocksource_init(freq))
ret = ftm_clocksource_init(freq);
if (ret)
goto err;
if (ftm_clockevent_init(freq, irq))
ret = ftm_clockevent_init(freq, irq);
if (ret)
goto err;
return;
return 0;
err:
kfree(priv);
return ret;
}
CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
......@@ -126,7 +126,7 @@ static struct timer16_priv timer16_priv = {
#define REG_CH 0
#define REG_COMM 1
static void __init h8300_16timer_init(struct device_node *node)
static int __init h8300_16timer_init(struct device_node *node)
{
void __iomem *base[2];
int ret, irq;
......@@ -136,9 +136,10 @@ static void __init h8300_16timer_init(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
return;
return PTR_ERR(clk);
}
ret = -ENXIO;
base[REG_CH] = of_iomap(node, 0);
if (!base[REG_CH]) {
pr_err("failed to map registers for clocksource\n");
......@@ -151,6 +152,7 @@ static void __init h8300_16timer_init(struct device_node *node)
goto unmap_ch;
}
ret = -EINVAL;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
......@@ -174,7 +176,7 @@ static void __init h8300_16timer_init(struct device_node *node)
clocksource_register_hz(&timer16_priv.cs,
clk_get_rate(clk) / 8);
return;
return 0;
unmap_comm:
iounmap(base[REG_COMM]);
......@@ -182,6 +184,8 @@ static void __init h8300_16timer_init(struct device_node *node)
iounmap(base[REG_CH]);
free_clk:
clk_put(clk);
return ret;
}
CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init);
CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer",
h8300_16timer_init);
......@@ -164,24 +164,26 @@ static struct timer8_priv timer8_priv = {
},
};
static void __init h8300_8timer_init(struct device_node *node)
static int __init h8300_8timer_init(struct device_node *node)
{
void __iomem *base;
int irq;
int irq, ret;
struct clk *clk;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clockevent\n");
return;
return PTR_ERR(clk);
}
ret = ENXIO;
base = of_iomap(node, 0);
if (!base) {
pr_err("failed to map registers for clockevent\n");
goto free_clk;
}
ret = -EINVAL;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
......@@ -205,11 +207,12 @@ static void __init h8300_8timer_init(struct device_node *node)
clockevents_config_and_register(&timer8_priv.ced,
timer8_priv.rate, 1, 0x0000ffff);
return;
return 0;
unmap_reg:
iounmap(base);
free_clk:
clk_put(clk);
return ret;
}
CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);
......@@ -119,15 +119,16 @@ static struct tpu_priv tpu_priv = {
#define CH_L 0
#define CH_H 1
static void __init h8300_tpu_init(struct device_node *node)
static int __init h8300_tpu_init(struct device_node *node)
{
void __iomem *base[2];
struct clk *clk;
int ret = -ENXIO;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
return;
return PTR_ERR(clk);
}
base[CH_L] = of_iomap(node, CH_L);
......@@ -144,14 +145,13 @@ static void __init h8300_tpu_init(struct device_node *node)
tpu_priv.mapbase1 = base[CH_L];
tpu_priv.mapbase2 = base[CH_H];
clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
return;
return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
unmap_L:
iounmap(base[CH_H]);
free_clk:
clk_put(clk);
return ret;
}
CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);
......@@ -126,18 +126,22 @@ static struct irqaction meson6_timer_irq = {
.dev_id = &meson6_clockevent,
};
static void __init meson6_timer_init(struct device_node *node)
static int __init meson6_timer_init(struct device_node *node)
{
u32 val;
int ret, irq;
timer_base = of_io_request_and_map(node, 0, "meson6-timer");
if (IS_ERR(timer_base))
panic("Can't map registers");
if (IS_ERR(timer_base)) {
pr_err("Can't map registers");
return -ENXIO;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0)
panic("Can't parse IRQ");
if (irq <= 0) {
pr_err("Can't parse IRQ");
return -EINVAL;
}
/* Set 1us for timer E */
val = readl(timer_base + TIMER_ISA_MUX);
......@@ -158,14 +162,17 @@ static void __init meson6_timer_init(struct device_node *node)
meson6_clkevt_time_stop(CED_ID);
ret = setup_irq(irq, &meson6_timer_irq);
if (ret)
if (ret) {
pr_warn("failed to setup irq %d\n", irq);
return ret;
}
meson6_clockevent.cpumask = cpu_possible_mask;
meson6_clockevent.irq = irq;
clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC,
1, 0xfffe);
return 0;
}
CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer",
meson6_timer_init);
......@@ -146,7 +146,7 @@ static struct clocksource gic_clocksource = {
.archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC },
};
static void __init __gic_clocksource_init(void)
static int __init __gic_clocksource_init(void)
{
int ret;
......@@ -159,6 +159,8 @@ static void __init __gic_clocksource_init(void)
ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
if (ret < 0)
pr_warn("GIC: Unable to register clocksource\n");
return ret;
}
void __init gic_clocksource_init(unsigned int frequency)
......@@ -179,31 +181,35 @@ static void __init gic_clocksource_of_init(struct device_node *node)
struct clk *clk;
int ret;
if (WARN_ON(!gic_present || !node->parent ||
!of_device_is_compatible(node->parent, "mti,gic")))
return;
if (!gic_present || !node->parent ||
!of_device_is_compatible(node->parent, "mti,gic")) {
pr_warn("No DT definition for the mips gic driver");
return -ENXIO;
}
clk = of_clk_get(node, 0);
if (!IS_ERR(clk)) {
if (clk_prepare_enable(clk) < 0) {
pr_err("GIC failed to enable clock\n");
clk_put(clk);
return;
return PTR_ERR(clk);
}
gic_frequency = clk_get_rate(clk);
} else if (of_property_read_u32(node, "clock-frequency",
&gic_frequency)) {
pr_err("GIC frequency not specified.\n");
return;
return -EINVAL;;
}
gic_timer_irq = irq_of_parse_and_map(node, 0);
if (!gic_timer_irq) {
pr_err("GIC timer IRQ not specified.\n");
return;
return -EINVAL;;
}
__gic_clocksource_init();
ret = __gic_clocksource_init();
if (ret)
return ret;
ret = gic_clockevent_init();
if (!ret && !IS_ERR(clk)) {
......@@ -213,6 +219,8 @@ static void __init gic_clocksource_of_init(struct device_node *node)
/* And finally start the counter */
gic_start_count();
return 0;
}
CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
gic_clocksource_of_init);
......@@ -119,34 +119,45 @@ static struct irqaction moxart_timer_irq = {
.dev_id = &moxart_clockevent,
};
static void __init moxart_timer_init(struct device_node *node)
static int __init moxart_timer_init(struct device_node *node)
{
int ret, irq;
unsigned long pclk;
struct clk *clk;
base = of_iomap(node, 0);
if (!base)
panic("%s: of_iomap failed\n", node->full_name);
if (!base) {
pr_err("%s: of_iomap failed\n", node->full_name);
return -ENXIO;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0)
panic("%s: irq_of_parse_and_map failed\n", node->full_name);
if (irq <= 0) {
pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
return -EINVAL;
}
ret = setup_irq(irq, &moxart_timer_irq);
if (ret)
panic("%s: setup_irq failed\n", node->full_name);
if (ret) {
pr_err("%s: setup_irq failed\n", node->full_name);
return ret;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk))
panic("%s: of_clk_get failed\n", node->full_name);
if (IS_ERR(clk)) {
pr_err("%s: of_clk_get failed\n", node->full_name);
return PTR_ERR(clk);
}
pclk = clk_get_rate(clk);
if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
"moxart_timer", pclk, 200, 32,
clocksource_mmio_readl_down))
panic("%s: clocksource_mmio_init failed\n", node->full_name);
ret = clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
"moxart_timer", pclk, 200, 32,
clocksource_mmio_readl_down);
if (ret) {
pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
return ret;
}
clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
......@@ -164,5 +175,7 @@ static void __init moxart_timer_init(struct device_node *node)
*/
clockevents_config_and_register(&moxart_clockevent, pclk,
0x4, 0xfffffffe);
return 0;
}
CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
......@@ -250,7 +250,7 @@ static int __init mps2_clocksource_init(struct device_node *np)
return ret;
}
static void __init mps2_timer_init(struct device_node *np)
static int __init mps2_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
int ret;
......@@ -259,7 +259,7 @@ static void __init mps2_timer_init(struct device_node *np)
ret = mps2_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
return;
return 0;
}
}
......@@ -267,9 +267,11 @@ static void __init mps2_timer_init(struct device_node *np)
ret = mps2_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
return;
return 0;
}
}
return 0;
}
CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
......@@ -181,7 +181,7 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
evt->gpt_base + GPT_IRQ_EN_REG);
}
static void __init mtk_timer_init(struct device_node *node)
static int __init mtk_timer_init(struct device_node *node)
{
struct mtk_clock_event_device *evt;
struct resource res;
......@@ -190,7 +190,7 @@ static void __init mtk_timer_init(struct device_node *node)
evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt)
return;
return -ENOMEM;
evt->dev.name = "mtk_tick";
evt->dev.rating = 300;
......@@ -248,7 +248,7 @@ static void __init mtk_timer_init(struct device_node *node)
mtk_timer_enable_irq(evt, GPT_CLK_EVT);
return;
return 0;
err_clk_disable:
clk_disable_unprepare(clk);
......@@ -262,5 +262,7 @@ static void __init mtk_timer_init(struct device_node *node)
release_mem_region(res.start, resource_size(&res));
err_kzalloc:
kfree(evt);
return -EINVAL;
}
CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
......@@ -31,8 +31,6 @@
#include <linux/stmp_device.h>
#include <linux/sched_clock.h>
#include <asm/mach/time.h>
/*
* There are 2 versions of the timrot on Freescale MXS-based SoCs.
* The v1 on MX23 only gets 16 bits counter, while v2 on MX28
......@@ -226,10 +224,10 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
return 0;
}
static void __init mxs_timer_init(struct device_node *np)
static int __init mxs_timer_init(struct device_node *np)
{
struct clk *timer_clk;
int irq;
int irq, ret;
mxs_timrot_base = of_iomap(np, 0);
WARN_ON(!mxs_timrot_base);
......@@ -237,10 +235,12 @@ static void __init mxs_timer_init(struct device_node *np)
timer_clk = of_clk_get(np, 0);
if (IS_ERR(timer_clk)) {
pr_err("%s: failed to get clk\n", __func__);
return;
return PTR_ERR(timer_clk);
}
clk_prepare_enable(timer_clk);
ret = clk_prepare_enable(timer_clk);
if (ret)
return ret;
/*
* Initialize timers to a known state
......@@ -278,11 +278,19 @@ static void __init mxs_timer_init(struct device_node *np)
mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
/* init and register the timer to the framework */
mxs_clocksource_init(timer_clk);
mxs_clockevent_init(timer_clk);
ret = mxs_clocksource_init(timer_clk);
if (ret)
return ret;
ret = mxs_clockevent_init(timer_clk);
if (ret)
return ret;
/* Make irqs happen */
irq = irq_of_parse_and_map(np, 0);
setup_irq(irq, &mxs_timer_irq);
if (irq <= 0)
return -EINVAL;
return setup_irq(irq, &mxs_timer_irq);
}
CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
......@@ -193,10 +193,11 @@ static struct irqaction nmdk_timer_irq = {
.dev_id = &nmdk_clkevt,
};
static void __init nmdk_timer_init(void __iomem *base, int irq,
static int __init nmdk_timer_init(void __iomem *base, int irq,
struct clk *pclk, struct clk *clk)
{
unsigned long rate;
int ret;
mtu_base = base;
......@@ -226,10 +227,12 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
/* Timer 0 is the free running clocksource */
nmdk_clksrc_reset();
if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
rate, 200, 32, clocksource_mmio_readl_down))
pr_err("timer: failed to initialize clock source %s\n",
"mtu_0");
ret = clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
rate, 200, 32, clocksource_mmio_readl_down);
if (ret) {
pr_err("timer: failed to initialize clock source %s\n", "mtu_0");
return ret;
}
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
sched_clock_register(nomadik_read_sched_clock, 32, rate);
......@@ -244,9 +247,11 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
mtu_delay_timer.freq = rate;
register_current_timer_delay(&mtu_delay_timer);
return 0;
}
static void __init nmdk_timer_of_init(struct device_node *node)
static int __init nmdk_timer_of_init(struct device_node *node)
{
struct clk *pclk;
struct clk *clk;
......@@ -254,22 +259,30 @@ static void __init nmdk_timer_of_init(struct device_node *node)
int irq;
base = of_iomap(node, 0);
if (!base)
panic("Can't remap registers");
if (!base) {
pr_err("Can't remap registers");
return -ENXIO;
}
pclk = of_clk_get_by_name(node, "apb_pclk");
if (IS_ERR(pclk))
panic("could not get apb_pclk");
if (IS_ERR(pclk)) {
pr_err("could not get apb_pclk");
return PTR_ERR(pclk);
}
clk = of_clk_get_by_name(node, "timclk");
if (IS_ERR(clk))
panic("could not get timclk");
if (IS_ERR(clk)) {
pr_err("could not get timclk");
return PTR_ERR(clk);
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0)
panic("Can't parse IRQ");
if (irq <= 0) {
pr_err("Can't parse IRQ");
return -EINVAL;
}
nmdk_timer_init(base, irq, pclk, clk);
return nmdk_timer_init(base, irq, pclk, clk);
}
CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
nmdk_timer_of_init);
......@@ -150,8 +150,10 @@ static struct irqaction pxa_ost0_irq = {
.dev_id = &ckevt_pxa_osmr0,
};
static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
{
int ret;
timer_writel(0, OIER);
timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
......@@ -159,39 +161,57 @@ static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
ckevt_pxa_osmr0.cpumask = cpumask_of(0);
setup_irq(irq, &pxa_ost0_irq);
ret = setup_irq(irq, &pxa_ost0_irq);
if (ret) {
pr_err("Failed to setup irq");
return ret;
}
ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
32, clocksource_mmio_readl_up);
if (ret) {
pr_err("Failed to init clocksource");
return ret;
}
clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
32, clocksource_mmio_readl_up);
clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
MIN_OSCR_DELTA * 2, 0x7fffffff);
return 0;
}
static void __init pxa_timer_dt_init(struct device_node *np)
static int __init pxa_timer_dt_init(struct device_node *np)
{
struct clk *clk;
int irq;
int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base)
panic("%s: unable to map resource\n", np->name);
if (!timer_base) {
pr_err("%s: unable to map resource\n", np->name);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_crit("%s: unable to get clk\n", np->name);
return;
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_crit("Failed to prepare clock");
return ret;
}
clk_prepare_enable(clk);
/* we are only interested in OS-timer0 irq */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
return;
return -EINVAL;
}
pxa_timer_common_init(irq, clk_get_rate(clk));
return pxa_timer_common_init(irq, clk_get_rate(clk));
}
CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
......
......@@ -178,7 +178,7 @@ static struct delay_timer msm_delay_timer = {
.read_current_timer = msm_read_current_timer,
};
static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
bool percpu)
{
struct clocksource *cs = &msm_clocksource;
......@@ -218,12 +218,14 @@ static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
msm_delay_timer.freq = dgt_hz;
register_current_timer_delay(&msm_delay_timer);
return res;
}
static void __init msm_dt_timer_init(struct device_node *np)
static int __init msm_dt_timer_init(struct device_node *np)
{
u32 freq;
int irq;
int irq, ret;
struct resource res;
u32 percpu_offset;
void __iomem *base;
......@@ -232,34 +234,35 @@ static void __init msm_dt_timer_init(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
pr_err("Failed to map event base\n");
return;
return -ENXIO;
}
/* We use GPT0 for the clockevent */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
pr_err("Can't get irq\n");
return;
return -EINVAL;
}
/* We use CPU0's DGT for the clocksource */
if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
percpu_offset = 0;
if (of_address_to_resource(np, 0, &res)) {
ret = of_address_to_resource(np, 0, &res);
if (ret) {
pr_err("Failed to parse DGT resource\n");
return;
return ret;
}
cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
if (!cpu0_base) {
pr_err("Failed to map source base\n");
return;
return -EINVAL;
}
if (of_property_read_u32(np, "clock-frequency", &freq)) {
pr_err("Unknown frequency\n");
return;
return -EINVAL;
}
event_base = base + 0x4;
......@@ -268,7 +271,7 @@ static void __init msm_dt_timer_init(struct device_node *np)
freq /= 4;
writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
msm_timer_init(freq, 32, irq, !!percpu_offset);
return msm_timer_init(freq, 32, irq, !!percpu_offset);
}
CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
......@@ -19,7 +19,8 @@
#define TIMER_LOAD_COUNT0 0x00
#define TIMER_LOAD_COUNT1 0x04
#define TIMER_CONTROL_REG 0x10
#define TIMER_CONTROL_REG3288 0x10
#define TIMER_CONTROL_REG3399 0x1c
#define TIMER_INT_STATUS 0x18
#define TIMER_DISABLE 0x0
......@@ -31,6 +32,7 @@
struct bc_timer {
struct clock_event_device ce;
void __iomem *base;
void __iomem *ctrl;
u32 freq;
};
......@@ -46,15 +48,20 @@ static inline void __iomem *rk_base(struct clock_event_device *ce)
return rk_timer(ce)->base;
}
static inline void __iomem *rk_ctrl(struct clock_event_device *ce)
{
return rk_timer(ce)->ctrl;
}
static inline void rk_timer_disable(struct clock_event_device *ce)
{
writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG);
writel_relaxed(TIMER_DISABLE, rk_ctrl(ce));
}
static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags)
{
writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags,
rk_base(ce) + TIMER_CONTROL_REG);
rk_ctrl(ce));
}
static void rk_timer_update_counter(unsigned long cycles,
......@@ -106,37 +113,42 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static void __init rk_timer_init(struct device_node *np)
static int __init rk_timer_init(struct device_node *np, u32 ctrl_reg)
{
struct clock_event_device *ce = &bc_timer.ce;
struct clk *timer_clk;
struct clk *pclk;
int ret, irq;
int ret = -EINVAL, irq;
bc_timer.base = of_iomap(np, 0);
if (!bc_timer.base) {
pr_err("Failed to get base address for '%s'\n", TIMER_NAME);
return;
return -ENXIO;
}
bc_timer.ctrl = bc_timer.base + ctrl_reg;
pclk = of_clk_get_by_name(np, "pclk");
if (IS_ERR(pclk)) {
ret = PTR_ERR(pclk);
pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
if (clk_prepare_enable(pclk)) {
ret = clk_prepare_enable(pclk);
if (ret) {
pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk)) {
ret = PTR_ERR(timer_clk);
pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
goto out_timer_clk;
}
if (clk_prepare_enable(timer_clk)) {
ret = clk_prepare_enable(timer_clk);
if (ret) {
pr_err("Failed to enable timer clock\n");
goto out_timer_clk;
}
......@@ -145,17 +157,19 @@ static void __init rk_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -EINVAL;
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
goto out_irq;
}
ce->name = TIMER_NAME;
ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ;
ce->set_next_event = rk_timer_set_next_event;
ce->set_state_shutdown = rk_timer_shutdown;
ce->set_state_periodic = rk_timer_set_periodic;
ce->irq = irq;
ce->cpumask = cpumask_of(0);
ce->cpumask = cpu_possible_mask;
ce->rating = 250;
rk_timer_interrupt_clear(ce);
......@@ -169,7 +183,7 @@ static void __init rk_timer_init(struct device_node *np)
clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
return;
return 0;
out_irq:
clk_disable_unprepare(timer_clk);
......@@ -177,6 +191,21 @@ static void __init rk_timer_init(struct device_node *np)
clk_disable_unprepare(pclk);
out_unmap:
iounmap(bc_timer.base);
return ret;
}
static int __init rk3288_timer_init(struct device_node *np)
{
return rk_timer_init(np, TIMER_CONTROL_REG3288);
}
static int __init rk3399_timer_init(struct device_node *np)
{
return rk_timer_init(np, TIMER_CONTROL_REG3399);
}
CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer",
rk3288_timer_init);
CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer",
rk3399_timer_init);
......@@ -130,9 +130,9 @@ static void samsung_time_stop(unsigned int channel)
spin_lock_irqsave(&samsung_pwm_lock, flags);
tcon = __raw_readl(pwm.base + REG_TCON);
tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_START(channel);
__raw_writel(tcon, pwm.base + REG_TCON);
writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
......@@ -148,14 +148,14 @@ static void samsung_time_setup(unsigned int channel, unsigned long tcnt)
spin_lock_irqsave(&samsung_pwm_lock, flags);
tcon = __raw_readl(pwm.base + REG_TCON);
tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan));
tcon |= TCON_MANUALUPDATE(tcon_chan);
__raw_writel(tcnt, pwm.base + REG_TCNTB(channel));
__raw_writel(tcnt, pwm.base + REG_TCMPB(channel));
__raw_writel(tcon, pwm.base + REG_TCON);
writel_relaxed(tcnt, pwm.base + REG_TCNTB(channel));
writel_relaxed(tcnt, pwm.base + REG_TCMPB(channel));
writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
......@@ -170,7 +170,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
spin_lock_irqsave(&samsung_pwm_lock, flags);
tcon = __raw_readl(pwm.base + REG_TCON);
tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_MANUALUPDATE(channel);
tcon |= TCON_START(channel);
......@@ -180,7 +180,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
else
tcon &= ~TCON_AUTORELOAD(channel);
__raw_writel(tcon, pwm.base + REG_TCON);
writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
......@@ -333,11 +333,10 @@ static u64 notrace samsung_read_sched_clock(void)
return samsung_clocksource_read(NULL);
}
static void __init samsung_clocksource_init(void)
static int __init samsung_clocksource_init(void)
{
unsigned long pclk;
unsigned long clock_rate;
int ret;
pclk = clk_get_rate(pwm.timerclk);
......@@ -358,9 +357,7 @@ static void __init samsung_clocksource_init(void)
pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
ret = clocksource_register_hz(&samsung_clocksource, clock_rate);
if (ret)
panic("samsung_clocksource_timer: can't register clocksource\n");
return clocksource_register_hz(&samsung_clocksource, clock_rate);
}
static void __init samsung_timer_resources(void)
......@@ -380,26 +377,31 @@ static void __init samsung_timer_resources(void)
/*
* PWM master driver
*/
static void __init _samsung_pwm_clocksource_init(void)
static int __init _samsung_pwm_clocksource_init(void)
{
u8 mask;
int channel;
mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1);
channel = fls(mask) - 1;
if (channel < 0)
panic("failed to find PWM channel for clocksource");
if (channel < 0) {
pr_crit("failed to find PWM channel for clocksource");
return -EINVAL;
}
pwm.source_id = channel;
mask &= ~(1 << channel);
channel = fls(mask) - 1;
if (channel < 0)
panic("failed to find PWM channel for clock event");
if (channel < 0) {
pr_crit("failed to find PWM channel for clock event");
return -EINVAL;
}
pwm.event_id = channel;
samsung_timer_resources();
samsung_clockevent_init();
samsung_clocksource_init();
return samsung_clocksource_init();
}
void __init samsung_pwm_clocksource_init(void __iomem *base,
......@@ -417,8 +419,8 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
}
#ifdef CONFIG_CLKSRC_OF
static void __init samsung_pwm_alloc(struct device_node *np,
const struct samsung_pwm_variant *variant)
static int __init samsung_pwm_alloc(struct device_node *np,
const struct samsung_pwm_variant *variant)
{
struct property *prop;
const __be32 *cur;
......@@ -441,14 +443,16 @@ static void __init samsung_pwm_alloc(struct device_node *np,
pwm.base = of_iomap(np, 0);
if (!pwm.base) {
pr_err("%s: failed to map PWM registers\n", __func__);
return;
return -ENXIO;
}
pwm.timerclk = of_clk_get_by_name(np, "timers");
if (IS_ERR(pwm.timerclk))
panic("failed to get timers clock for timer");
if (IS_ERR(pwm.timerclk)) {
pr_crit("failed to get timers clock for timer");
return PTR_ERR(pwm.timerclk);
}
_samsung_pwm_clocksource_init();
return _samsung_pwm_clocksource_init();
}
static const struct samsung_pwm_variant s3c24xx_variant = {
......@@ -458,9 +462,9 @@ static const struct samsung_pwm_variant s3c24xx_variant = {
.tclk_mask = (1 << 4),
};
static void __init s3c2410_pwm_clocksource_init(struct device_node *np)
static int __init s3c2410_pwm_clocksource_init(struct device_node *np)
{
samsung_pwm_alloc(np, &s3c24xx_variant);
return samsung_pwm_alloc(np, &s3c24xx_variant);
}
CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
......@@ -471,9 +475,9 @@ static const struct samsung_pwm_variant s3c64xx_variant = {
.tclk_mask = (1 << 7) | (1 << 6) | (1 << 5),
};
static void __init s3c64xx_pwm_clocksource_init(struct device_node *np)
static int __init s3c64xx_pwm_clocksource_init(struct device_node *np)
{
samsung_pwm_alloc(np, &s3c64xx_variant);
return samsung_pwm_alloc(np, &s3c64xx_variant);
}
CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
......@@ -484,9 +488,9 @@ static const struct samsung_pwm_variant s5p64x0_variant = {
.tclk_mask = 0,
};
static void __init s5p64x0_pwm_clocksource_init(struct device_node *np)
static int __init s5p64x0_pwm_clocksource_init(struct device_node *np)
{
samsung_pwm_alloc(np, &s5p64x0_variant);
return samsung_pwm_alloc(np, &s5p64x0_variant);
}
CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
......@@ -497,9 +501,9 @@ static const struct samsung_pwm_variant s5p_variant = {
.tclk_mask = (1 << 5),
};
static void __init s5p_pwm_clocksource_init(struct device_node *np)
static int __init s5p_pwm_clocksource_init(struct device_node *np)
{
samsung_pwm_alloc(np, &s5p_variant);
return samsung_pwm_alloc(np, &s5p_variant);
}
CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
#endif
......@@ -146,7 +146,7 @@ static u64 notrace sun4i_timer_sched_read(void)
return ~readl(timer_base + TIMER_CNTVAL_REG(1));
}
static void __init sun4i_timer_init(struct device_node *node)
static int __init sun4i_timer_init(struct device_node *node)
{
unsigned long rate = 0;
struct clk *clk;
......@@ -154,17 +154,28 @@ static void __init sun4i_timer_init(struct device_node *node)
u32 val;
timer_base = of_iomap(node, 0);
if (!timer_base)
panic("Can't map registers");
if (!timer_base) {
pr_crit("Can't map registers");
return -ENXIO;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0)
panic("Can't parse IRQ");
if (irq <= 0) {
pr_crit("Can't parse IRQ");
return -EINVAL;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk))
panic("Can't get timer clock");
clk_prepare_enable(clk);
if (IS_ERR(clk)) {
pr_crit("Can't get timer clock");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Failed to prepare clock");
return ret;
}
rate = clk_get_rate(clk);
......@@ -182,8 +193,12 @@ static void __init sun4i_timer_init(struct device_node *node)
of_machine_is_compatible("allwinner,sun5i-a10s"))
sched_clock_register(sun4i_timer_sched_read, 32, rate);
clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
rate, 350, 32, clocksource_mmio_readl_down);
ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
rate, 350, 32, clocksource_mmio_readl_down);
if (ret) {
pr_err("Failed to register clocksource");
return ret;
}
ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
......@@ -200,12 +215,16 @@ static void __init sun4i_timer_init(struct device_node *node)
TIMER_SYNC_TICKS, 0xffffffff);
ret = setup_irq(irq, &sun4i_timer_irq);
if (ret)
pr_warn("failed to setup irq %d\n", irq);
if (ret) {
pr_err("failed to setup irq %d\n", irq);
return ret;
}
/* Enable timer0 interrupt */
val = readl(timer_base + TIMER_IRQ_EN_REG);
writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
return ret;
}
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
......@@ -19,7 +19,7 @@ static u64 notrace read_sched_clock(void)
return read_xtal_counter();
}
static void __init tango_clocksource_init(struct device_node *np)
static int __init tango_clocksource_init(struct device_node *np)
{
struct clk *clk;
int xtal_freq, ret;
......@@ -27,13 +27,13 @@ static void __init tango_clocksource_init(struct device_node *np)
xtal_in_cnt = of_iomap(np, 0);
if (xtal_in_cnt == NULL) {
pr_err("%s: invalid address\n", np->full_name);
return;
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: invalid clock\n", np->full_name);
return;
return PTR_ERR(clk);
}
xtal_freq = clk_get_rate(clk);
......@@ -44,11 +44,13 @@ static void __init tango_clocksource_init(struct device_node *np)
32, clocksource_mmio_readl_up);
if (ret) {
pr_err("%s: registration failed\n", np->full_name);
return;
return ret;
}
sched_clock_register(read_sched_clock, 32, xtal_freq);
register_current_timer_delay(&delay_timer);
return 0;
}
CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
......@@ -165,7 +165,7 @@ static struct irqaction tegra_timer_irq = {
.dev_id = &tegra_clockevent,
};
static void __init tegra20_init_timer(struct device_node *np)
static int __init tegra20_init_timer(struct device_node *np)
{
struct clk *clk;
unsigned long rate;
......@@ -174,13 +174,13 @@ static void __init tegra20_init_timer(struct device_node *np)
timer_reg_base = of_iomap(np, 0);
if (!timer_reg_base) {
pr_err("Can't map timer registers\n");
BUG();
return -ENXIO;
}
tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
if (tegra_timer_irq.irq <= 0) {
pr_err("Failed to map timer IRQ\n");
BUG();
return -EINVAL;
}
clk = of_clk_get(np, 0);
......@@ -211,10 +211,12 @@ static void __init tegra20_init_timer(struct device_node *np)
sched_clock_register(tegra_read_sched_clock, 32, 1000000);
if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", 1000000, 300, 32,
clocksource_mmio_readl_up);
if (ret) {
pr_err("Failed to register clocksource\n");
BUG();
return ret;
}
tegra_delay_timer.read_current_timer =
......@@ -225,24 +227,26 @@ static void __init tegra20_init_timer(struct device_node *np)
ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
if (ret) {
pr_err("Failed to register timer IRQ: %d\n", ret);
BUG();
return ret;
}
tegra_clockevent.cpumask = cpu_all_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_config_and_register(&tegra_clockevent, 1000000,
0x1, 0x1fffffff);
return 0;
}
CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
static void __init tegra20_init_rtc(struct device_node *np)
static int __init tegra20_init_rtc(struct device_node *np)
{
struct clk *clk;
rtc_base = of_iomap(np, 0);
if (!rtc_base) {
pr_err("Can't map RTC registers");
BUG();
return -ENXIO;
}
/*
......@@ -255,6 +259,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
register_persistent_clock(NULL, tegra_read_persistent_clock64);
return register_persistent_clock(NULL, tegra_read_persistent_clock64);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
......@@ -246,7 +246,7 @@ static void armada_370_xp_timer_resume(void)
writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
}
struct syscore_ops armada_370_xp_timer_syscore_ops = {
static struct syscore_ops armada_370_xp_timer_syscore_ops = {
.suspend = armada_370_xp_timer_suspend,
.resume = armada_370_xp_timer_resume,
};
......@@ -260,14 +260,22 @@ static struct delay_timer armada_370_delay_timer = {
.read_current_timer = armada_370_delay_timer_read,
};
static void __init armada_370_xp_timer_common_init(struct device_node *np)
static int __init armada_370_xp_timer_common_init(struct device_node *np)
{
u32 clr = 0, set = 0;
int res;
timer_base = of_iomap(np, 0);
WARN_ON(!timer_base);
if (!timer_base) {
pr_err("Failed to iomap");
return -ENXIO;
}
local_base = of_iomap(np, 1);
if (!local_base) {
pr_err("Failed to iomap");
return -ENXIO;
}
if (timer25Mhz) {
set = TIMER0_25MHZ;
......@@ -306,14 +314,19 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
*/
sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
"armada_370_xp_clocksource",
timer_clk, 300, 32, clocksource_mmio_readl_down);
res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
"armada_370_xp_clocksource",
timer_clk, 300, 32, clocksource_mmio_readl_down);
if (res) {
pr_err("Failed to initialize clocksource mmio");
return res;
}
register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
armada_370_xp_evt = alloc_percpu(struct clock_event_device);
if (!armada_370_xp_evt)
return -ENOMEM;
/*
* Setup clockevent timer (interrupt-driven).
......@@ -323,33 +336,54 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
"armada_370_xp_per_cpu_tick",
armada_370_xp_evt);
/* Immediately configure the timer on the boot CPU */
if (!res)
armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
if (res) {
pr_err("Failed to request percpu irq");
return res;
}
res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
if (res) {
pr_err("Failed to setup timer");
return res;
}
register_syscore_ops(&armada_370_xp_timer_syscore_ops);
return 0;
}
static void __init armada_xp_timer_init(struct device_node *np)
static int __init armada_xp_timer_init(struct device_node *np)
{
struct clk *clk = of_clk_get_by_name(np, "fixed");
int ret;
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get clock");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret)
return ret;
/* The 25Mhz fixed clock is mandatory, and must always be available */
BUG_ON(IS_ERR(clk));
clk_prepare_enable(clk);
timer_clk = clk_get_rate(clk);
armada_370_xp_timer_common_init(np);
return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
armada_xp_timer_init);
static void __init armada_375_timer_init(struct device_node *np)
static int __init armada_375_timer_init(struct device_node *np)
{
struct clk *clk;
int ret;
clk = of_clk_get_by_name(np, "fixed");
if (!IS_ERR(clk)) {
clk_prepare_enable(clk);
ret = clk_prepare_enable(clk);
if (ret)
return ret;
timer_clk = clk_get_rate(clk);
} else {
......@@ -360,27 +394,43 @@ static void __init armada_375_timer_init(struct device_node *np)
clk = of_clk_get(np, 0);
/* Must have at least a clock */
BUG_ON(IS_ERR(clk));
clk_prepare_enable(clk);
if (IS_ERR(clk)) {
pr_err("Failed to get clock");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret)
return ret;
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
}
armada_370_xp_timer_common_init(np);
return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
armada_375_timer_init);
static void __init armada_370_timer_init(struct device_node *np)
static int __init armada_370_timer_init(struct device_node *np)
{
struct clk *clk = of_clk_get(np, 0);
struct clk *clk;
int ret;
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get clock");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret)
return ret;
BUG_ON(IS_ERR(clk));
clk_prepare_enable(clk);
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
armada_370_xp_timer_common_init(np);
return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
armada_370_timer_init);
......@@ -233,10 +233,15 @@ static int __init efm32_clockevent_init(struct device_node *np)
DIV_ROUND_CLOSEST(rate, 1024),
0xf, 0xffff);
setup_irq(irq, &efm32_clock_event_irq);
ret = setup_irq(irq, &efm32_clock_event_irq);
if (ret) {
pr_err("Failed setup irq");
goto err_setup_irq;
}
return 0;
err_setup_irq:
err_get_irq:
iounmap(base);
......@@ -255,16 +260,16 @@ static int __init efm32_clockevent_init(struct device_node *np)
* This function asserts that we have exactly one clocksource and one
* clock_event_device in the end.
*/
static void __init efm32_timer_init(struct device_node *np)
static int __init efm32_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
int ret;
int ret = 0;
if (!has_clocksource) {
ret = efm32_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
return;
return 0;
}
}
......@@ -272,9 +277,11 @@ static void __init efm32_timer_init(struct device_node *np)
ret = efm32_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
return;
return 0;
}
}
return ret;
}
CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
......@@ -288,16 +288,16 @@ static int __init lpc32xx_clockevent_init(struct device_node *np)
* This function asserts that we have exactly one clocksource and one
* clock_event_device in the end.
*/
static void __init lpc32xx_timer_init(struct device_node *np)
static int __init lpc32xx_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
int ret;
int ret = 0;
if (!has_clocksource) {
ret = lpc32xx_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
return;
return 0;
}
}
......@@ -305,8 +305,10 @@ static void __init lpc32xx_timer_init(struct device_node *np)
ret = lpc32xx_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
return;
return 0;
}
}
return ret;
}
CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
......@@ -104,25 +104,36 @@ static struct irqaction orion_clkevt_irq = {
.handler = orion_clkevt_irq_handler,
};
static void __init orion_timer_init(struct device_node *np)
static int __init orion_timer_init(struct device_node *np)
{
struct clk *clk;
int irq;
int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base)
panic("%s: unable to map resource\n", np->name);
if (!timer_base) {
pr_err("%s: unable to map resource\n", np->name);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk))
panic("%s: unable to get clk\n", np->name);
clk_prepare_enable(clk);
if (IS_ERR(clk)) {
pr_err("%s: unable to get clk\n", np->name);
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Failed to prepare clock");
return ret;
}
/* we are only interested in timer1 irq */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0)
panic("%s: unable to parse timer1 irq\n", np->name);
if (irq <= 0) {
pr_err("%s: unable to parse timer1 irq\n", np->name);
return -EINVAL;
}
/* setup timer0 as free-running clocksource */
writel(~0, timer_base + TIMER0_VAL);
......@@ -130,19 +141,30 @@ static void __init orion_timer_init(struct device_node *np)
atomic_io_modify(timer_base + TIMER_CTRL,
TIMER0_RELOAD_EN | TIMER0_EN,
TIMER0_RELOAD_EN | TIMER0_EN);
clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
clk_get_rate(clk), 300, 32,
clocksource_mmio_readl_down);
ret = clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
clk_get_rate(clk), 300, 32,
clocksource_mmio_readl_down);
if (ret) {
pr_err("Failed to initialize mmio timer");
return ret;
}
sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));
/* setup timer1 as clockevent timer */
if (setup_irq(irq, &orion_clkevt_irq))
panic("%s: unable to setup irq\n", np->name);
ret = setup_irq(irq, &orion_clkevt_irq);
if (ret) {
pr_err("%s: unable to setup irq\n", np->name);
return ret;
}
ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
orion_clkevt.cpumask = cpumask_of(0);
orion_clkevt.irq = irq;
clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk),
ORION_ONESHOT_MIN, ORION_ONESHOT_MAX);
return 0;
}
CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
......@@ -148,7 +148,7 @@ static struct pistachio_clocksource pcs_gpt = {
},
};
static void __init pistachio_clksrc_of_init(struct device_node *node)
static int __init pistachio_clksrc_of_init(struct device_node *node)
{
struct clk *sys_clk, *fast_clk;
struct regmap *periph_regs;
......@@ -158,45 +158,45 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
pcs_gpt.base = of_iomap(node, 0);
if (!pcs_gpt.base) {
pr_err("cannot iomap\n");
return;
return -ENXIO;
}
periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
if (IS_ERR(periph_regs)) {
pr_err("cannot get peripheral regmap (%ld)\n",
PTR_ERR(periph_regs));
return;
return PTR_ERR(periph_regs);
}
/* Switch to using the fast counter clock */
ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
0xf, 0x0);
if (ret)
return;
return ret;
sys_clk = of_clk_get_by_name(node, "sys");
if (IS_ERR(sys_clk)) {
pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
return;
return PTR_ERR(sys_clk);
}
fast_clk = of_clk_get_by_name(node, "fast");
if (IS_ERR(fast_clk)) {
pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
return;
return PTR_ERR(fast_clk);
}
ret = clk_prepare_enable(sys_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
return;
return ret;
}
ret = clk_prepare_enable(fast_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
clk_disable_unprepare(sys_clk);
return;
return ret;
}
rate = clk_get_rate(fast_clk);
......@@ -212,7 +212,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
raw_spin_lock_init(&pcs_gpt.lock);
sched_clock_register(pistachio_read_sched_clock, 32, rate);
clocksource_register_hz(&pcs_gpt.cs, rate);
return clocksource_register_hz(&pcs_gpt.cs, rate);
}
CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
pistachio_clksrc_of_init);
......@@ -238,7 +238,7 @@ static struct notifier_block sirfsoc_cpu_nb = {
.notifier_call = sirfsoc_cpu_notify,
};
static void __init sirfsoc_clockevent_init(void)
static int __init sirfsoc_clockevent_init(void)
{
sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
BUG_ON(!sirfsoc_clockevent);
......@@ -246,11 +246,11 @@ static void __init sirfsoc_clockevent_init(void)
BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
/* Immediately configure the timer on the boot CPU */
sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
}
/* initialize the kernel jiffy timer source */
static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
static int __init sirfsoc_atlas7_timer_init(struct device_node *np)
{
struct clk *clk;
......@@ -279,23 +279,29 @@ static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
sirfsoc_clockevent_init();
return sirfsoc_clockevent_init();
}
static void __init sirfsoc_of_timer_init(struct device_node *np)
static int __init sirfsoc_of_timer_init(struct device_node *np)
{
sirfsoc_timer_base = of_iomap(np, 0);
if (!sirfsoc_timer_base)
panic("unable to map timer cpu registers\n");
if (!sirfsoc_timer_base) {
pr_err("unable to map timer cpu registers\n");
return -ENXIO;
}
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
if (!sirfsoc_timer_irq.irq)
panic("No irq passed for timer0 via DT\n");
if (!sirfsoc_timer_irq.irq) {
pr_err("No irq passed for timer0 via DT\n");
return -EINVAL;
}
sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
if (!sirfsoc_timer1_irq.irq)
panic("No irq passed for timer1 via DT\n");
if (!sirfsoc_timer1_irq.irq) {
pr_err("No irq passed for timer1 via DT\n");
return -EINVAL;
}
sirfsoc_atlas7_timer_init(np);
return sirfsoc_atlas7_timer_init(np);
}
CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
......@@ -177,7 +177,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
/*
* Set up both clocksource and clockevent support.
*/
static void __init at91sam926x_pit_common_init(struct pit_data *data)
static int __init at91sam926x_pit_common_init(struct pit_data *data)
{
unsigned long pit_rate;
unsigned bits;
......@@ -204,14 +204,21 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clksrc.rating = 175;
data->clksrc.read = read_pit_clk;
data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
clocksource_register_hz(&data->clksrc, pit_rate);
ret = clocksource_register_hz(&data->clksrc, pit_rate);
if (ret) {
pr_err("Failed to register clocksource");
return ret;
}
/* Set up irq handler */
ret = request_irq(data->irq, at91sam926x_pit_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", data);
if (ret)
panic(pr_fmt("Unable to setup IRQ\n"));
if (ret) {
pr_err("Unable to setup IRQ\n");
return ret;
}
/* Set up and register clockevents */
data->clkevt.name = "pit";
......@@ -226,34 +233,42 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clkevt.resume = at91sam926x_pit_resume;
data->clkevt.suspend = at91sam926x_pit_suspend;
clockevents_register_device(&data->clkevt);
return 0;
}
static void __init at91sam926x_pit_dt_init(struct device_node *node)
static int __init at91sam926x_pit_dt_init(struct device_node *node)
{
struct pit_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
panic(pr_fmt("Unable to allocate memory\n"));
return -ENOMEM;
data->base = of_iomap(node, 0);
if (!data->base)
panic(pr_fmt("Could not map PIT address\n"));
if (!data->base) {
pr_err("Could not map PIT address\n");
return -ENXIO;
}
data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck))
/* Fallback on clkdev for !CCF-based boards */
data->mck = clk_get(NULL, "mck");
if (IS_ERR(data->mck))
panic(pr_fmt("Unable to get mck clk\n"));
if (IS_ERR(data->mck)) {
pr_err("Unable to get mck clk\n");
return PTR_ERR(data->mck);
}
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq)
panic(pr_fmt("Unable to get IRQ from DT\n"));
if (!data->irq) {
pr_err("Unable to get IRQ from DT\n");
return -EINVAL;
}
at91sam926x_pit_common_init(data);
return at91sam926x_pit_common_init(data);
}
CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init);
......@@ -194,15 +194,17 @@ static struct clock_event_device clkevt = {
/*
* ST (system timer) module supports both clockevents and clocksource.
*/
static void __init atmel_st_timer_init(struct device_node *node)
static int __init atmel_st_timer_init(struct device_node *node)
{
struct clk *sclk;
unsigned int sclk_rate, val;
int irq, ret;
regmap_st = syscon_node_to_regmap(node);
if (IS_ERR(regmap_st))
panic(pr_fmt("Unable to get regmap\n"));
if (IS_ERR(regmap_st)) {
pr_err("Unable to get regmap\n");
return PTR_ERR(regmap_st);
}
/* Disable all timer interrupts, and clear any pending ones */
regmap_write(regmap_st, AT91_ST_IDR,
......@@ -211,27 +213,37 @@ static void __init atmel_st_timer_init(struct device_node *node)
/* Get the interrupts property */
irq = irq_of_parse_and_map(node, 0);
if (!irq)
panic(pr_fmt("Unable to get IRQ from DT\n"));
if (!irq) {
pr_err("Unable to get IRQ from DT\n");
return -EINVAL;
}
/* Make IRQs happen for the system timer */
ret = request_irq(irq, at91rm9200_timer_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", regmap_st);
if (ret)
panic(pr_fmt("Unable to setup IRQ\n"));
if (ret) {
pr_err("Unable to setup IRQ\n");
return ret;
}
sclk = of_clk_get(node, 0);
if (IS_ERR(sclk))
panic(pr_fmt("Unable to get slow clock\n"));
if (IS_ERR(sclk)) {
pr_err("Unable to get slow clock\n");
return PTR_ERR(sclk);
}
clk_prepare_enable(sclk);
if (ret)
panic(pr_fmt("Could not enable slow clock\n"));
ret = clk_prepare_enable(sclk);
if (ret) {
pr_err("Could not enable slow clock\n");
return ret;
}
sclk_rate = clk_get_rate(sclk);
if (!sclk_rate)
panic(pr_fmt("Invalid slow clock rate\n"));
if (!sclk_rate) {
pr_err("Invalid slow clock rate\n");
return -EINVAL;
}
timer_latch = (sclk_rate + HZ / 2) / HZ;
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
......@@ -246,7 +258,7 @@ static void __init atmel_st_timer_init(struct device_node *node)
2, AT91_ST_ALMV);
/* register clocksource */
clocksource_register_hz(&clk32k, sclk_rate);
return clocksource_register_hz(&clk32k, sclk_rate);
}
CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
atmel_st_timer_init);
......@@ -63,7 +63,7 @@ struct digicolor_timer {
int timer_id; /* one of TIMER_* */
};
struct digicolor_timer *dc_timer(struct clock_event_device *ce)
static struct digicolor_timer *dc_timer(struct clock_event_device *ce)
{
return container_of(ce, struct digicolor_timer, ce);
}
......@@ -148,7 +148,7 @@ static u64 notrace digicolor_timer_sched_read(void)
return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
}
static void __init digicolor_timer_init(struct device_node *node)
static int __init digicolor_timer_init(struct device_node *node)
{
unsigned long rate;
struct clk *clk;
......@@ -161,19 +161,19 @@ static void __init digicolor_timer_init(struct device_node *node)
dc_timer_dev.base = of_iomap(node, 0);
if (!dc_timer_dev.base) {
pr_err("Can't map registers");
return;
return -ENXIO;
}
irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id);
if (irq <= 0) {
pr_err("Can't parse IRQ");
return;
return -EINVAL;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("Can't get timer clock");
return;
return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
......@@ -190,13 +190,17 @@ static void __init digicolor_timer_init(struct device_node *node)
ret = request_irq(irq, digicolor_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC",
&dc_timer_dev.ce);
if (ret)
if (ret) {
pr_warn("request of timer irq %d failed (%d)\n", irq, ret);
return ret;
}
dc_timer_dev.ce.cpumask = cpu_possible_mask;
dc_timer_dev.ce.irq = irq;
clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff);
return 0;
}
CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
digicolor_timer_init);
......@@ -407,8 +407,10 @@ static const struct imx_gpt_data imx6dl_gpt_data = {
.set_next_event = v2_set_next_event,
};
static void __init _mxc_timer_init(struct imx_timer *imxtm)
static int __init _mxc_timer_init(struct imx_timer *imxtm)
{
int ret;
switch (imxtm->type) {
case GPT_TYPE_IMX1:
imxtm->gpt = &imx1_gpt_data;
......@@ -423,12 +425,12 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
imxtm->gpt = &imx6dl_gpt_data;
break;
default:
BUG();
return -EINVAL;
}
if (IS_ERR(imxtm->clk_per)) {
pr_err("i.MX timer: unable to get clk\n");
return;
return PTR_ERR(imxtm->clk_per);
}
if (!IS_ERR(imxtm->clk_ipg))
......@@ -446,8 +448,11 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
imxtm->gpt->gpt_setup_tctl(imxtm);
/* init and register the timer to the framework */
mxc_clocksource_init(imxtm);
mxc_clockevent_init(imxtm);
ret = mxc_clocksource_init(imxtm);
if (ret)
return ret;
return mxc_clockevent_init(imxtm);
}
void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
......@@ -469,21 +474,27 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
_mxc_timer_init(imxtm);
}
static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
{
struct imx_timer *imxtm;
static int initialized;
int ret;
/* Support one instance only */
if (initialized)
return;
return 0;
imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
BUG_ON(!imxtm);
if (!imxtm)
return -ENOMEM;
imxtm->base = of_iomap(np, 0);
WARN_ON(!imxtm->base);
if (!imxtm->base)
return -ENXIO;
imxtm->irq = irq_of_parse_and_map(np, 0);
if (imxtm->irq <= 0)
return -EINVAL;
imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
......@@ -494,22 +505,26 @@ static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type
imxtm->type = type;
_mxc_timer_init(imxtm);
ret = _mxc_timer_init(imxtm);
if (ret)
return ret;
initialized = 1;
return 0;
}
static void __init imx1_timer_init_dt(struct device_node *np)
static int __init imx1_timer_init_dt(struct device_node *np)
{
mxc_timer_init_dt(np, GPT_TYPE_IMX1);
return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
}
static void __init imx21_timer_init_dt(struct device_node *np)
static int __init imx21_timer_init_dt(struct device_node *np)
{
mxc_timer_init_dt(np, GPT_TYPE_IMX21);
return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
}
static void __init imx31_timer_init_dt(struct device_node *np)
static int __init imx31_timer_init_dt(struct device_node *np)
{
enum imx_gpt_type type = GPT_TYPE_IMX31;
......@@ -522,12 +537,12 @@ static void __init imx31_timer_init_dt(struct device_node *np)
if (of_machine_is_compatible("fsl,imx6dl"))
type = GPT_TYPE_IMX6DL;
mxc_timer_init_dt(np, type);
return mxc_timer_init_dt(np, type);
}
static void __init imx6dl_timer_init_dt(struct device_node *np)
static int __init imx6dl_timer_init_dt(struct device_node *np)
{
mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
}
CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
......
......@@ -36,11 +36,12 @@ static u64 notrace integrator_read_sched_clock(void)
return -readl(sched_clk_base + TIMER_VALUE);
}
static void integrator_clocksource_init(unsigned long inrate,
void __iomem *base)
static int integrator_clocksource_init(unsigned long inrate,
void __iomem *base)
{
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
unsigned long rate = inrate;
int ret;
if (rate >= 1500000) {
rate /= 16;
......@@ -50,11 +51,15 @@ static void integrator_clocksource_init(unsigned long inrate,
writel(0xffff, base + TIMER_LOAD);
writel(ctrl, base + TIMER_CTRL);
clocksource_mmio_init(base + TIMER_VALUE, "timer2",
rate, 200, 16, clocksource_mmio_readl_down);
ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2",
rate, 200, 16, clocksource_mmio_readl_down);
if (ret)
return ret;
sched_clk_base = base;
sched_clock_register(integrator_read_sched_clock, 16, rate);
return 0;
}
static unsigned long timer_reload;
......@@ -138,11 +143,12 @@ static struct irqaction integrator_timer_irq = {
.dev_id = &integrator_clockevent,
};
static void integrator_clockevent_init(unsigned long inrate,
void __iomem *base, int irq)
static int integrator_clockevent_init(unsigned long inrate,
void __iomem *base, int irq)
{
unsigned long rate = inrate;
unsigned int ctrl = 0;
int ret;
clkevt_base = base;
/* Calculate and program a divisor */
......@@ -156,14 +162,18 @@ static void integrator_clockevent_init(unsigned long inrate,
timer_reload = rate / HZ;
writel(ctrl, clkevt_base + TIMER_CTRL);
setup_irq(irq, &integrator_timer_irq);
ret = setup_irq(irq, &integrator_timer_irq);
if (ret)
return ret;
clockevents_config_and_register(&integrator_clockevent,
rate,
1,
0xffffU);
return 0;
}
static void __init integrator_ap_timer_init_of(struct device_node *node)
static int __init integrator_ap_timer_init_of(struct device_node *node)
{
const char *path;
void __iomem *base;
......@@ -176,12 +186,12 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
base = of_io_request_and_map(node, 0, "integrator-timer");
if (IS_ERR(base))
return;
return PTR_ERR(base);
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("No clock for %s\n", node->name);
return;
return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
......@@ -189,30 +199,37 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
err = of_property_read_string(of_aliases,
"arm,timer-primary", &path);
if (WARN_ON(err))
return;
if (err) {
pr_warn("Failed to read property");
return err;
}
pri_node = of_find_node_by_path(path);
err = of_property_read_string(of_aliases,
"arm,timer-secondary", &path);
if (WARN_ON(err))
return;
if (err) {
pr_warn("Failed to read property");
return err;
}
sec_node = of_find_node_by_path(path);
if (node == pri_node) {
if (node == pri_node)
/* The primary timer lacks IRQ, use as clocksource */
integrator_clocksource_init(rate, base);
return;
}
return integrator_clocksource_init(rate, base);
if (node == sec_node) {
/* The secondary timer will drive the clock event */
irq = irq_of_parse_and_map(node, 0);
integrator_clockevent_init(rate, base, irq);
return;
return integrator_clockevent_init(rate, base, irq);
}
pr_info("Timer @%p unused\n", base);
clk_disable_unprepare(clk);
return 0;
}
CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
......
......@@ -144,7 +144,7 @@ static int keystone_set_periodic(struct clock_event_device *evt)
return 0;
}
static void __init keystone_timer_init(struct device_node *np)
static int __init keystone_timer_init(struct device_node *np)
{
struct clock_event_device *event_dev = &timer.event_dev;
unsigned long rate;
......@@ -154,20 +154,20 @@ static void __init keystone_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("%s: failed to map interrupts\n", __func__);
return;
return -EINVAL;
}
timer.base = of_iomap(np, 0);
if (!timer.base) {
pr_err("%s: failed to map registers\n", __func__);
return;
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: failed to get clock\n", __func__);
iounmap(timer.base);
return;
return PTR_ERR(clk);
}
error = clk_prepare_enable(clk);
......@@ -219,11 +219,12 @@ static void __init keystone_timer_init(struct device_node *np)
clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
pr_info("keystone timer clock @%lu Hz\n", rate);
return;
return 0;
err:
clk_put(clk);
iounmap(timer.base);
return error;
}
CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
keystone_timer_init);
keystone_timer_init);
......@@ -55,8 +55,8 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc)
return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
}
static void __init nps_setup_clocksource(struct device_node *node,
struct clk *clk)
static int __init nps_setup_clocksource(struct device_node *node,
struct clk *clk)
{
int ret, cluster;
......@@ -68,7 +68,7 @@ static void __init nps_setup_clocksource(struct device_node *node,
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Couldn't enable parent clock\n");
return;
return ret;
}
nps_timer_rate = clk_get_rate(clk);
......@@ -79,19 +79,21 @@ static void __init nps_setup_clocksource(struct device_node *node,
pr_err("Couldn't register clock source.\n");
clk_disable_unprepare(clk);
}
return ret;
}
static void __init nps_timer_init(struct device_node *node)
static int __init nps_timer_init(struct device_node *node)
{
struct clk *clk;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("Can't get timer clock.\n");
return;
return PTR_ERR(clk);
}
nps_setup_clocksource(node, clk);
return nps_setup_clocksource(node, clk);
}
CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
......
/*
* drivers/clocksource/timer-oxnas-rps.c
*
* Copyright (C) 2009 Oxford Semiconductor Ltd
* Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
* Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/clockchips.h>
#include <linux/sched_clock.h>
/* TIMER1 used as tick
* TIMER2 used as clocksource
*/
/* Registers definitions */
#define TIMER_LOAD_REG 0x0
#define TIMER_CURR_REG 0x4
#define TIMER_CTRL_REG 0x8
#define TIMER_CLRINT_REG 0xC
#define TIMER_BITS 24
#define TIMER_MAX_VAL (BIT(TIMER_BITS) - 1)
#define TIMER_PERIODIC BIT(6)
#define TIMER_ENABLE BIT(7)
#define TIMER_DIV1 (0)
#define TIMER_DIV16 (1 << 2)
#define TIMER_DIV256 (2 << 2)
#define TIMER1_REG_OFFSET 0
#define TIMER2_REG_OFFSET 0x20
/* Clockevent & Clocksource data */
struct oxnas_rps_timer {
struct clock_event_device clkevent;
void __iomem *clksrc_base;
void __iomem *clkevt_base;
unsigned long timer_period;
unsigned int timer_prescaler;
struct clk *clk;
int irq;
};
static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id)
{
struct oxnas_rps_timer *rps = dev_id;
writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
rps->clkevent.event_handler(&rps->clkevent);
return IRQ_HANDLED;
}
static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps,
unsigned long period,
unsigned int periodic)
{
uint32_t cfg = rps->timer_prescaler;
if (period)
cfg |= TIMER_ENABLE;
if (periodic)
cfg |= TIMER_PERIODIC;
writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG);
writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG);
}
static int oxnas_rps_timer_shutdown(struct clock_event_device *evt)
{
struct oxnas_rps_timer *rps =
container_of(evt, struct oxnas_rps_timer, clkevent);
oxnas_rps_timer_config(rps, 0, 0);
return 0;
}
static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt)
{
struct oxnas_rps_timer *rps =
container_of(evt, struct oxnas_rps_timer, clkevent);
oxnas_rps_timer_config(rps, rps->timer_period, 1);
return 0;
}
static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt)
{
struct oxnas_rps_timer *rps =
container_of(evt, struct oxnas_rps_timer, clkevent);
oxnas_rps_timer_config(rps, rps->timer_period, 0);
return 0;
}
static int oxnas_rps_timer_next_event(unsigned long delta,
struct clock_event_device *evt)
{
struct oxnas_rps_timer *rps =
container_of(evt, struct oxnas_rps_timer, clkevent);
oxnas_rps_timer_config(rps, delta, 0);
return 0;
}
static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps)
{
ulong clk_rate = clk_get_rate(rps->clk);
ulong timer_rate;
/* Start with prescaler 1 */
rps->timer_prescaler = TIMER_DIV1;
rps->timer_period = DIV_ROUND_UP(clk_rate, HZ);
timer_rate = clk_rate;
if (rps->timer_period > TIMER_MAX_VAL) {
rps->timer_prescaler = TIMER_DIV16;
timer_rate = clk_rate / 16;
rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
}
if (rps->timer_period > TIMER_MAX_VAL) {
rps->timer_prescaler = TIMER_DIV256;
timer_rate = clk_rate / 256;
rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
}
rps->clkevent.name = "oxnas-rps";
rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ;
rps->clkevent.tick_resume = oxnas_rps_timer_shutdown;
rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown;
rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic;
rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot;
rps->clkevent.set_next_event = oxnas_rps_timer_next_event;
rps->clkevent.rating = 200;
rps->clkevent.cpumask = cpu_possible_mask;
rps->clkevent.irq = rps->irq;
clockevents_config_and_register(&rps->clkevent,
timer_rate,
1,
TIMER_MAX_VAL);
pr_info("Registered clock event rate %luHz prescaler %x period %lu\n",
clk_rate,
rps->timer_prescaler,
rps->timer_period);
return 0;
}
/* Clocksource */
static void __iomem *timer_sched_base;
static u64 notrace oxnas_rps_read_sched_clock(void)
{
return ~readl_relaxed(timer_sched_base);
}
static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps)
{
ulong clk_rate = clk_get_rate(rps->clk);
int ret;
/* use prescale 16 */
clk_rate = clk_rate / 16;
writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG);
writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16,
rps->clksrc_base + TIMER_CTRL_REG);
timer_sched_base = rps->clksrc_base + TIMER_CURR_REG;
sched_clock_register(oxnas_rps_read_sched_clock,
TIMER_BITS, clk_rate);
ret = clocksource_mmio_init(timer_sched_base,
"oxnas_rps_clocksource_timer",
clk_rate, 250, TIMER_BITS,
clocksource_mmio_readl_down);
if (WARN_ON(ret)) {
pr_err("can't register clocksource\n");
return ret;
}
pr_info("Registered clocksource rate %luHz\n", clk_rate);
return 0;
}
static int __init oxnas_rps_timer_init(struct device_node *np)
{
struct oxnas_rps_timer *rps;
void __iomem *base;
int ret;
rps = kzalloc(sizeof(*rps), GFP_KERNEL);
if (!rps)
return -ENOMEM;
rps->clk = of_clk_get(np, 0);
if (IS_ERR(rps->clk)) {
ret = PTR_ERR(rps->clk);
goto err_alloc;
}
ret = clk_prepare_enable(rps->clk);
if (ret)
goto err_clk;
base = of_iomap(np, 0);
if (!base) {
ret = -ENXIO;
goto err_clk_prepare;
}
rps->irq = irq_of_parse_and_map(np, 0);
if (rps->irq < 0) {
ret = -EINVAL;
goto err_iomap;
}
rps->clkevt_base = base + TIMER1_REG_OFFSET;
rps->clksrc_base = base + TIMER2_REG_OFFSET;
/* Disable timers */
writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG);
writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG);
writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG);
writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG);
writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG);
ret = request_irq(rps->irq, oxnas_rps_timer_irq,
IRQF_TIMER | IRQF_IRQPOLL,
"rps-timer", rps);
if (ret)
goto err_iomap;
ret = oxnas_rps_clocksource_init(rps);
if (ret)
goto err_irqreq;
ret = oxnas_rps_clockevent_init(rps);
if (ret)
goto err_irqreq;
return 0;
err_irqreq:
free_irq(rps->irq, rps);
err_iomap:
iounmap(base);
err_clk_prepare:
clk_disable_unprepare(rps->clk);
err_clk:
clk_put(rps->clk);
err_alloc:
kfree(rps);
return ret;
}
CLOCKSOURCE_OF_DECLARE(ox810se_rps,
"oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
......@@ -19,7 +19,6 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
#include <asm/mach/time.h>
#define PRIMA2_CLOCK_FREQ 1000000
......@@ -189,24 +188,36 @@ static void __init sirfsoc_clockevent_init(void)
}
/* initialize the kernel jiffy timer source */
static void __init sirfsoc_prima2_timer_init(struct device_node *np)
static int __init sirfsoc_prima2_timer_init(struct device_node *np)
{
unsigned long rate;
struct clk *clk;
int ret;
clk = of_clk_get(np, 0);
BUG_ON(IS_ERR(clk));
if (IS_ERR(clk)) {
pr_err("Failed to get clock");
return PTR_ERR(clk);
}
BUG_ON(clk_prepare_enable(clk));
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Failed to enable clock");
return ret;
}
rate = clk_get_rate(clk);
BUG_ON(rate < PRIMA2_CLOCK_FREQ);
BUG_ON(rate % PRIMA2_CLOCK_FREQ);
if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) {
pr_err("Invalid clock rate");
return -EINVAL;
}
sirfsoc_timer_base = of_iomap(np, 0);
if (!sirfsoc_timer_base)
panic("unable to map timer cpu registers\n");
if (!sirfsoc_timer_base) {
pr_err("unable to map timer cpu registers\n");
return -ENXIO;
}
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
......@@ -216,14 +227,23 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource,
PRIMA2_CLOCK_FREQ));
ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ);
if (ret) {
pr_err("Failed to register clocksource");
return ret;
}
sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
if (ret) {
pr_err("Failed to setup irq");
return ret;
}
sirfsoc_clockevent_init();
return 0;
}
CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer,
"sirf,prima2-tick", sirfsoc_prima2_timer_init);
......@@ -77,7 +77,7 @@ void __init sp804_timer_disable(void __iomem *base)
writel(0, base + TIMER_CTRL);
}
void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
int __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,
int use_sched_clock)
......@@ -89,14 +89,13 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
if (IS_ERR(clk)) {
pr_err("sp804: clock not found: %d\n",
(int)PTR_ERR(clk));
return;
return PTR_ERR(clk);
}
}
rate = sp804_get_clock_rate(clk);
if (rate < 0)
return;
return -EINVAL;
/* setup timer 0 as free-running clocksource */
writel(0, base + TIMER_CTRL);
......@@ -112,6 +111,8 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
sched_clock_base = base;
sched_clock_register(sp804_read, 32, rate);
}
return 0;
}
......@@ -186,7 +187,7 @@ static struct irqaction sp804_timer_irq = {
.dev_id = &sp804_clockevent,
};
void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
{
struct clock_event_device *evt = &sp804_clockevent;
long rate;
......@@ -196,12 +197,12 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
if (IS_ERR(clk)) {
pr_err("sp804: %s clock not found: %d\n", name,
(int)PTR_ERR(clk));
return;
return PTR_ERR(clk);
}
rate = sp804_get_clock_rate(clk);
if (rate < 0)
return;
return -EINVAL;
clkevt_base = base;
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
......@@ -213,27 +214,31 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
setup_irq(irq, &sp804_timer_irq);
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
return 0;
}
static void __init sp804_of_init(struct device_node *np)
static int __init sp804_of_init(struct device_node *np)
{
static bool initialized = false;
void __iomem *base;
int irq;
int irq, ret = -EINVAL;
u32 irq_num = 0;
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
base = of_iomap(np, 0);
if (WARN_ON(!base))
return;
if (!base)
return -ENXIO;
/* Ensure timers are disabled */
writel(0, base + TIMER_CTRL);
writel(0, base + TIMER_2_BASE + TIMER_CTRL);
if (initialized || !of_device_is_available(np))
if (initialized || !of_device_is_available(np)) {
ret = -EINVAL;
goto err;
}
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
......@@ -256,35 +261,53 @@ static void __init sp804_of_init(struct device_node *np)
of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
if (irq_num == 2) {
__sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
__sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
ret = __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
if (ret)
goto err;
ret = __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
if (ret)
goto err;
} else {
__sp804_clockevents_init(base, irq, clk1 , name);
__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
name, clk2, 1);
ret = __sp804_clockevents_init(base, irq, clk1 , name);
if (ret)
goto err;
ret =__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
name, clk2, 1);
if (ret)
goto err;
}
initialized = true;
return;
return 0;
err:
iounmap(base);
return ret;
}
CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
static void __init integrator_cp_of_init(struct device_node *np)
static int __init integrator_cp_of_init(struct device_node *np)
{
static int init_count = 0;
void __iomem *base;
int irq;
int irq, ret = -EINVAL;
const char *name = of_get_property(np, "compatible", NULL);
struct clk *clk;
base = of_iomap(np, 0);
if (WARN_ON(!base))
return;
if (!base) {
pr_err("Failed to iomap");
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (WARN_ON(IS_ERR(clk)))
return;
if (IS_ERR(clk)) {
pr_err("Failed to get clock");
return PTR_ERR(clk);
}
/* Ensure timer is disabled */
writel(0, base + TIMER_CTRL);
......@@ -292,19 +315,24 @@ static void __init integrator_cp_of_init(struct device_node *np)
if (init_count == 2 || !of_device_is_available(np))
goto err;
if (!init_count)
__sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
else {
if (!init_count) {
ret = __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
if (ret)
goto err;
} else {
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
goto err;
__sp804_clockevents_init(base, irq, clk, name);
ret = __sp804_clockevents_init(base, irq, clk, name);
if (ret)
goto err;
}
init_count++;
return;
return 0;
err:
iounmap(base);
return ret;
}
CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
......@@ -98,7 +98,7 @@ static struct stm32_clock_event_ddata clock_event_ddata = {
},
};
static void __init stm32_clockevent_init(struct device_node *np)
static int __init stm32_clockevent_init(struct device_node *np)
{
struct stm32_clock_event_ddata *data = &clock_event_ddata;
struct clk *clk;
......@@ -130,12 +130,14 @@ static void __init stm32_clockevent_init(struct device_node *np)
data->base = of_iomap(np, 0);
if (!data->base) {
ret = -ENXIO;
pr_err("failed to map registers for clockevent\n");
goto err_iomap;
}
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -EINVAL;
pr_err("%s: failed to get irq.\n", np->full_name);
goto err_get_irq;
}
......@@ -173,7 +175,7 @@ static void __init stm32_clockevent_init(struct device_node *np)
pr_info("%s: STM32 clockevent driver initialized (%d bits)\n",
np->full_name, bits);
return;
return ret;
err_get_irq:
iounmap(data->base);
......@@ -182,7 +184,7 @@ static void __init stm32_clockevent_init(struct device_node *np)
err_clk_enable:
clk_put(clk);
err_clk_get:
return;
return ret;
}
CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
......@@ -311,33 +311,42 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem
return ret;
}
static void __init sun5i_timer_init(struct device_node *node)
static int __init sun5i_timer_init(struct device_node *node)
{
struct reset_control *rstc;
void __iomem *timer_base;
struct clk *clk;
int irq;
int irq, ret;
timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(timer_base))
panic("Can't map registers");
if (IS_ERR(timer_base)) {
pr_err("Can't map registers");
return PTR_ERR(timer_base);;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0)
panic("Can't parse IRQ");
if (irq <= 0) {
pr_err("Can't parse IRQ");
return -EINVAL;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk))
panic("Can't get timer clock");
if (IS_ERR(clk)) {
pr_err("Can't get timer clock");
return PTR_ERR(clk);
}
rstc = of_reset_control_get(node, NULL);
if (!IS_ERR(rstc))
reset_control_deassert(rstc);
sun5i_setup_clocksource(node, timer_base, clk, irq);
sun5i_setup_clockevent(node, timer_base, clk, irq);
ret = sun5i_setup_clocksource(node, timer_base, clk, irq);
if (ret)
return ret;
return sun5i_setup_clockevent(node, timer_base, clk, irq);
}
CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
sun5i_timer_init);
sun5i_timer_init);
CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
sun5i_timer_init);
sun5i_timer_init);
......@@ -88,14 +88,14 @@ static u64 notrace omap_32k_read_sched_clock(void)
return ti_32k_read_cycles(&ti_32k_timer.cs);
}
static void __init ti_32k_timer_init(struct device_node *np)
static int __init ti_32k_timer_init(struct device_node *np)
{
int ret;
ti_32k_timer.base = of_iomap(np, 0);
if (!ti_32k_timer.base) {
pr_err("Can't ioremap 32k timer base\n");
return;
return -ENXIO;
}
ti_32k_timer.counter = ti_32k_timer.base;
......@@ -116,11 +116,13 @@ static void __init ti_32k_timer_init(struct device_node *np)
ret = clocksource_register_hz(&ti_32k_timer.cs, 32768);
if (ret) {
pr_err("32k_counter: can't register clocksource\n");
return;
return ret;
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
return 0;
}
CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k",
ti_32k_timer_init);
......@@ -359,27 +359,37 @@ static struct delay_timer u300_delay_timer;
/*
* This sets up the system timers, clock source and clock event.
*/
static void __init u300_timer_init_of(struct device_node *np)
static int __init u300_timer_init_of(struct device_node *np)
{
unsigned int irq;
struct clk *clk;
unsigned long rate;
int ret;
u300_timer_base = of_iomap(np, 0);
if (!u300_timer_base)
panic("could not ioremap system timer\n");
if (!u300_timer_base) {
pr_err("could not ioremap system timer\n");
return -ENXIO;
}
/* Get the IRQ for the GP1 timer */
irq = irq_of_parse_and_map(np, 2);
if (!irq)
panic("no IRQ for system timer\n");
if (!irq) {
pr_err("no IRQ for system timer\n");
return -EINVAL;
}
pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
/* Clock the interrupt controller */
clk = of_clk_get(np, 0);
BUG_ON(IS_ERR(clk));
clk_prepare_enable(clk);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_prepare_enable(clk);
if (ret)
return ret;
rate = clk_get_rate(clk);
u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
......@@ -410,7 +420,9 @@ static void __init u300_timer_init_of(struct device_node *np)
u300_timer_base + U300_TIMER_APP_RGPT1);
/* Set up the IRQ handler */
setup_irq(irq, &u300_timer_irq);
ret = setup_irq(irq, &u300_timer_irq);
if (ret)
return ret;
/* Reset the General Purpose timer 2 */
writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
......@@ -428,9 +440,12 @@ static void __init u300_timer_init_of(struct device_node *np)
u300_timer_base + U300_TIMER_APP_EGPT2);
/* Use general purpose timer 2 as clock source */
if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
"GPT2", rate, 300, 32, clocksource_mmio_readl_up))
ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
"GPT2", rate, 300, 32, clocksource_mmio_readl_up);
if (ret) {
pr_err("timer: failed to initialize U300 clock source\n");
return ret;
}
/* Configure and register the clockevent */
clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
......@@ -440,6 +455,7 @@ static void __init u300_timer_init_of(struct device_node *np)
* TODO: init and register the rest of the timers too, they can be
* used by hrtimers!
*/
return 0;
}
CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
......
......@@ -25,16 +25,18 @@ static u64 notrace versatile_sys_24mhz_read(void)
return readl(versatile_sys_24mhz);
}
static void __init versatile_sched_clock_init(struct device_node *node)
static int __init versatile_sched_clock_init(struct device_node *node)
{
void __iomem *base = of_iomap(node, 0);
if (!base)
return;
return -ENXIO;
versatile_sys_24mhz = base + SYS_24MHZ;
sched_clock_register(versatile_sys_24mhz_read, 32, 24000000);
return 0;
}
CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
versatile_sched_clock_init);
......
......@@ -156,15 +156,18 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
return 0;
}
static void __init pit_timer_init(struct device_node *np)
static int __init pit_timer_init(struct device_node *np)
{
struct clk *pit_clk;
void __iomem *timer_base;
unsigned long clk_rate;
int irq;
int irq, ret;
timer_base = of_iomap(np, 0);
BUG_ON(!timer_base);
if (!timer_base) {
pr_err("Failed to iomap");
return -ENXIO;
}
/*
* PIT0 and PIT1 can be chained to build a 64-bit timer,
......@@ -175,12 +178,16 @@ static void __init pit_timer_init(struct device_node *np)
clkevt_base = timer_base + PITn_OFFSET(3);
irq = irq_of_parse_and_map(np, 0);
BUG_ON(irq <= 0);
if (irq <= 0)
return -EINVAL;
pit_clk = of_clk_get(np, 0);
BUG_ON(IS_ERR(pit_clk));
if (IS_ERR(pit_clk))
return PTR_ERR(pit_clk);
BUG_ON(clk_prepare_enable(pit_clk));
ret = clk_prepare_enable(pit_clk);
if (ret)
return ret;
clk_rate = clk_get_rate(pit_clk);
cycle_per_jiffy = clk_rate / (HZ);
......@@ -188,8 +195,10 @@ static void __init pit_timer_init(struct device_node *np)
/* enable the pit module */
__raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
BUG_ON(pit_clocksource_init(clk_rate));
ret = pit_clocksource_init(clk_rate);
if (ret)
return ret;
pit_clockevent_init(clk_rate, irq);
return pit_clockevent_init(clk_rate, irq);
}
CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
......@@ -121,38 +121,48 @@ static struct irqaction irq = {
.dev_id = &clockevent,
};
static void __init vt8500_timer_init(struct device_node *np)
static int __init vt8500_timer_init(struct device_node *np)
{
int timer_irq;
int timer_irq, ret;
regbase = of_iomap(np, 0);
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
__func__);
return;
return -ENXIO;
}
timer_irq = irq_of_parse_and_map(np, 0);
if (!timer_irq) {
pr_err("%s: Missing irq description in Device Tree\n",
__func__);
return;
return -EINVAL;
}
writel(1, regbase + TIMER_CTRL_VAL);
writel(0xf, regbase + TIMER_STATUS_VAL);
writel(~0, regbase + TIMER_MATCH_VAL);
if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ))
ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ);
if (ret) {
pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
__func__, clocksource.name);
__func__, clocksource.name);
return ret;
}
clockevent.cpumask = cpumask_of(0);
if (setup_irq(timer_irq, &irq))
ret = setup_irq(timer_irq, &irq);
if (ret) {
pr_err("%s: setup_irq failed for %s\n", __func__,
clockevent.name);
return ret;
}
clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
MIN_OSCR_DELTA * 2, 0xf0000000);
return 0;
}
CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
......@@ -210,9 +210,9 @@ static int __init zevio_timer_add(struct device_node *node)
return ret;
}
static void __init zevio_timer_init(struct device_node *node)
static int __init zevio_timer_init(struct device_node *node)
{
BUG_ON(zevio_timer_add(node));
return zevio_timer_add(node);
}
CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
......@@ -530,8 +530,7 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
else
timer_interval = GPSTATE_TIMER_INTERVAL;
mod_timer_pinned(&gpstates->timer, jiffies +
msecs_to_jiffies(timer_interval));
mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
}
/**
......@@ -699,7 +698,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = gpstates;
/* initialize timer */
init_timer_deferrable(&gpstates->timer);
init_timer_pinned_deferrable(&gpstates->timer);
gpstates->timer.data = (unsigned long)policy;
gpstates->timer.function = gpstate_timer_handler;
gpstates->timer.expires = jiffies +
......
......@@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
jz4740_mmc_clock_disable(host);
setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
(unsigned long)host);
/* It is not important when it times out, it just needs to timeout. */
set_timer_slack(&host->timeout_timer, HZ);
host->use_dma = true;
if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
......
......@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
{
if (!info->egress_timer_scheduled) {
mod_timer_pinned(&info->egress_timer, jiffies + 1);
mod_timer(&info->egress_timer, jiffies + 1);
info->egress_timer_scheduled = true;
}
}
......@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr)
BUG();
/* Initialize the egress timer. */
init_timer(&info->egress_timer);
init_timer_pinned(&info->egress_timer);
info->egress_timer.data = (long)info;
info->egress_timer.function = tile_net_handle_egress_timer;
......
......@@ -735,11 +735,8 @@ static void bq27xxx_battery_poll(struct work_struct *work)
bq27xxx_battery_update(di);
if (poll_interval > 0) {
/* The timer does not have to be accurate. */
set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
if (poll_interval > 0)
schedule_delayed_work(&di->work, poll_interval * HZ);
}
}
/*
......
......@@ -323,12 +323,12 @@ static void dashtty_timer(unsigned long ignored)
if (channel >= 0)
fetch_data(channel);
mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
}
static void add_poll_timer(struct timer_list *poll_timer)
{
setup_timer(poll_timer, dashtty_timer, 0);
setup_pinned_timer(poll_timer, dashtty_timer, 0);
poll_timer->expires = jiffies + DA_TTY_POLL;
/*
......
......@@ -689,7 +689,7 @@ static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
mips_ejtag_fdc_handle(priv);
if (!priv->removing)
mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
}
/* TTY Port operations */
......@@ -1002,7 +1002,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
raw_spin_unlock_irq(&priv->lock);
} else {
/* If we didn't get an usable IRQ, poll instead */
setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
(unsigned long)priv);
priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
/*
......
......@@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *ohci)
setup_timer(&ohci->io_watchdog, io_watchdog_func,
(unsigned long) ohci);
set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
......
......@@ -490,8 +490,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
set_timer_slack(&xhci->comp_mode_recovery_timer,
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
add_timer(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Compliance mode recovery timer initialized");
......
......@@ -390,6 +390,11 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
clockid != CLOCK_BOOTTIME_ALARM))
return -EINVAL;
if (!capable(CAP_WAKE_ALARM) &&
(clockid == CLOCK_REALTIME_ALARM ||
clockid == CLOCK_BOOTTIME_ALARM))
return -EPERM;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
......@@ -433,6 +438,11 @@ static int do_timerfd_settime(int ufd, int flags,
return ret;
ctx = f.file->private_data;
if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) {
fdput(f);
return -EPERM;
}
timerfd_setup_cancel(ctx, flags);
/*
......
......@@ -3,10 +3,10 @@
struct clk;
void __sp804_clocksource_and_sched_clock_init(void __iomem *,
const char *, struct clk *, int);
void __sp804_clockevents_init(void __iomem *, unsigned int,
struct clk *, const char *);
int __sp804_clocksource_and_sched_clock_init(void __iomem *,
const char *, struct clk *, int);
int __sp804_clockevents_init(void __iomem *, unsigned int,
struct clk *, const char *);
void sp804_timer_disable(void __iomem *);
static inline void sp804_clocksource_init(void __iomem *base, const char *name)
......
......@@ -26,10 +26,10 @@ enum alarmtimer_restart {
* struct alarm - Alarm timer structure
* @node: timerqueue node for adding to the event list this value
* also includes the expiration time.
* @period: Period for recuring alarms
* @timer: hrtimer used to schedule events while running
* @function: Function pointer to be executed when the timer fires.
* @type: Alarm type (BOOTTIME/REALTIME)
* @enabled: Flag that represents if the alarm is set to fire or not
* @type: Alarm type (BOOTTIME/REALTIME).
* @state: Flag that represents if the alarm is set to fire or not.
* @data: Internal data value.
*/
struct alarm {
......
......@@ -461,6 +461,10 @@ static inline struct clk *clk_get_parent(struct clk *clk)
return NULL;
}
static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
return NULL;
}
#endif
/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
......
......@@ -244,7 +244,7 @@ extern int clocksource_mmio_init(void __iomem *, const char *,
extern int clocksource_i8253_init(void);
#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
OF_DECLARE_1(clksrc, name, compat, fn)
OF_DECLARE_1_RET(clksrc, name, compat, fn)
#ifdef CONFIG_CLKSRC_PROBE
extern void clocksource_probe(void);
......
......@@ -678,6 +678,16 @@ static inline bool hlist_fake(struct hlist_node *h)
return h->pprev == &h->next;
}
/*
* Check whether the node is the only node of the head without
* accessing head:
*/
static inline bool
hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
{
return !n->next && n->pprev == &h->first;
}
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
......
......@@ -1009,10 +1009,13 @@ static inline int of_get_available_child_count(const struct device_node *np)
#endif
typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
typedef int (*of_init_fn_1_ret)(struct device_node *);
typedef void (*of_init_fn_1)(struct device_node *);
#define OF_DECLARE_1(table, name, compat, fn) \
_OF_DECLARE(table, name, compat, fn, of_init_fn_1)
#define OF_DECLARE_1_RET(table, name, compat, fn) \
_OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret)
#define OF_DECLARE_2(table, name, compat, fn) \
_OF_DECLARE(table, name, compat, fn, of_init_fn_2)
......
......@@ -205,7 +205,20 @@ struct tm {
int tm_yday;
};
void time_to_tm(time_t totalsecs, int offset, struct tm *result);
void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);
/**
* time_to_tm - converts the calendar time to local broken-down time
*
* @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
* Coordinated Universal Time (UTC).
* @offset offset seconds adding to totalsecs.
* @result pointer to struct tm variable to receive broken-down time
*/
static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
{
time64_to_tm(totalsecs, offset, result);
}
/**
* timespec_to_ns - Convert timespec to nanoseconds
......
此差异已折叠。
此差异已折叠。
......@@ -30,7 +30,6 @@
* struct alarm_base - Alarm timer bases
* @lock: Lock for syncrhonized access to the base
* @timerqueue: Timerqueue head managing the list of events
* @timer: hrtimer used to schedule events while running
* @gettime: Function to read the time correlating to the base
* @base_clockid: clockid for the base
*/
......
......@@ -645,7 +645,7 @@ void tick_cleanup_dead_cpu(int cpu)
#endif
#ifdef CONFIG_SYSFS
struct bus_type clockevents_subsys = {
static struct bus_type clockevents_subsys = {
.name = "clockevents",
.dev_name = "clockevent",
};
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册