exynos_mct.c 15.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* linux/arch/arm/mach-exynos4/mct.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 *
 * EXYNOS4 MCT(Multi-Core Timer) support
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
*/

#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
19
#include <linux/cpu.h>
20 21 22
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/percpu.h>
23
#include <linux/of.h>
24 25
#include <linux/of_irq.h>
#include <linux/of_address.h>
26
#include <linux/clocksource.h>
27
#include <linux/sched_clock.h>
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
#define EXYNOS4_MCTREG(x)		(x)
#define EXYNOS4_MCT_G_CNT_L		EXYNOS4_MCTREG(0x100)
#define EXYNOS4_MCT_G_CNT_U		EXYNOS4_MCTREG(0x104)
#define EXYNOS4_MCT_G_CNT_WSTAT		EXYNOS4_MCTREG(0x110)
#define EXYNOS4_MCT_G_COMP0_L		EXYNOS4_MCTREG(0x200)
#define EXYNOS4_MCT_G_COMP0_U		EXYNOS4_MCTREG(0x204)
#define EXYNOS4_MCT_G_COMP0_ADD_INCR	EXYNOS4_MCTREG(0x208)
#define EXYNOS4_MCT_G_TCON		EXYNOS4_MCTREG(0x240)
#define EXYNOS4_MCT_G_INT_CSTAT		EXYNOS4_MCTREG(0x244)
#define EXYNOS4_MCT_G_INT_ENB		EXYNOS4_MCTREG(0x248)
#define EXYNOS4_MCT_G_WSTAT		EXYNOS4_MCTREG(0x24C)
#define _EXYNOS4_MCT_L_BASE		EXYNOS4_MCTREG(0x300)
#define EXYNOS4_MCT_L_BASE(x)		(_EXYNOS4_MCT_L_BASE + (0x100 * x))
#define EXYNOS4_MCT_L_MASK		(0xffffff00)

#define MCT_L_TCNTB_OFFSET		(0x00)
#define MCT_L_ICNTB_OFFSET		(0x08)
#define MCT_L_TCON_OFFSET		(0x20)
#define MCT_L_INT_CSTAT_OFFSET		(0x30)
#define MCT_L_INT_ENB_OFFSET		(0x34)
#define MCT_L_WSTAT_OFFSET		(0x40)
#define MCT_G_TCON_START		(1 << 8)
#define MCT_G_TCON_COMP0_AUTO_INC	(1 << 1)
#define MCT_G_TCON_COMP0_ENABLE		(1 << 0)
#define MCT_L_TCON_INTERVAL_MODE	(1 << 2)
#define MCT_L_TCON_INT_START		(1 << 1)
#define MCT_L_TCON_TIMER_START		(1 << 0)

57 58
#define TICK_BASE_CNT	1

59 60 61 62 63
enum {
	MCT_INT_SPI,
	MCT_INT_PPI
};

64 65 66 67 68 69 70 71 72
enum {
	MCT_G0_IRQ,
	MCT_G1_IRQ,
	MCT_G2_IRQ,
	MCT_G3_IRQ,
	MCT_L0_IRQ,
	MCT_L1_IRQ,
	MCT_L2_IRQ,
	MCT_L3_IRQ,
73 74 75 76
	MCT_L4_IRQ,
	MCT_L5_IRQ,
	MCT_L6_IRQ,
	MCT_L7_IRQ,
77 78 79
	MCT_NR_IRQS,
};

80
static void __iomem *reg_base;
81
static unsigned long clk_rate;
82
static unsigned int mct_int_type;
83
static int mct_irqs[MCT_NR_IRQS];
84 85

struct mct_clock_event_device {
86
	struct clock_event_device evt;
87
	unsigned long base;
88
	char name[10];
89 90
};

91
static void exynos4_mct_write(unsigned int value, unsigned long offset)
92
{
93
	unsigned long stat_addr;
94 95 96
	u32 mask;
	u32 i;

97
	writel_relaxed(value, reg_base + offset);
98

99
	if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
100 101
		stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
		switch (offset & ~EXYNOS4_MCT_L_MASK) {
102
		case MCT_L_TCON_OFFSET:
103 104
			mask = 1 << 3;		/* L_TCON write status */
			break;
105
		case MCT_L_ICNTB_OFFSET:
106 107
			mask = 1 << 1;		/* L_ICNTB write status */
			break;
108
		case MCT_L_TCNTB_OFFSET:
109 110 111 112 113 114
			mask = 1 << 0;		/* L_TCNTB write status */
			break;
		default:
			return;
		}
	} else {
115 116
		switch (offset) {
		case EXYNOS4_MCT_G_TCON:
117 118 119
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 16;		/* G_TCON write status */
			break;
120
		case EXYNOS4_MCT_G_COMP0_L:
121 122 123
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 0;		/* G_COMP0_L write status */
			break;
124
		case EXYNOS4_MCT_G_COMP0_U:
125 126 127
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 1;		/* G_COMP0_U write status */
			break;
128
		case EXYNOS4_MCT_G_COMP0_ADD_INCR:
129 130 131
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 2;		/* G_COMP0_ADD_INCR w status */
			break;
132
		case EXYNOS4_MCT_G_CNT_L:
133 134 135
			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
			mask = 1 << 0;		/* G_CNT_L write status */
			break;
136
		case EXYNOS4_MCT_G_CNT_U:
137 138 139 140 141 142
			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
			mask = 1 << 1;		/* G_CNT_U write status */
			break;
		default:
			return;
		}
143 144 145 146
	}

	/* Wait maximum 1 ms until written values are applied */
	for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
147 148
		if (readl_relaxed(reg_base + stat_addr) & mask) {
			writel_relaxed(mask, reg_base + stat_addr);
149 150 151
			return;
		}

152
	panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
153 154 155
}

/* Clocksource handling */
156
static void exynos4_mct_frc_start(void)
157 158 159
{
	u32 reg;

160
	reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
161 162 163 164
	reg |= MCT_G_TCON_START;
	exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
}

165 166 167 168 169 170 171 172 173 174 175
/**
 * exynos4_read_count_64 - Read all 64-bits of the global counter
 *
 * This will read all 64-bits of the global counter taking care to make sure
 * that the upper and lower half match.  Note that reading the MCT can be quite
 * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half
 * only) version when possible.
 *
 * Returns the number of cycles in the global counter.
 */
static u64 exynos4_read_count_64(void)
176 177
{
	unsigned int lo, hi;
178
	u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
179 180 181

	do {
		hi = hi2;
182 183
		lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
		hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
184 185 186 187 188
	} while (hi != hi2);

	return ((cycle_t)hi << 32) | lo;
}

189 190 191 192 193 194 195 196 197 198 199 200 201
/**
 * exynos4_read_count_32 - Read the lower 32-bits of the global counter
 *
 * This will read just the lower 32-bits of the global counter.  This is marked
 * as notrace so it can be used by the scheduler clock.
 *
 * Returns the number of cycles in the global counter (lower 32 bits).
 */
static u32 notrace exynos4_read_count_32(void)
{
	return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
}

202 203
static cycle_t exynos4_frc_read(struct clocksource *cs)
{
204
	return exynos4_read_count_32();
205 206
}

207 208
static void exynos4_frc_resume(struct clocksource *cs)
{
209
	exynos4_mct_frc_start();
210 211
}

212 213 214 215
struct clocksource mct_frc = {
	.name		= "mct-frc",
	.rating		= 400,
	.read		= exynos4_frc_read,
216
	.mask		= CLOCKSOURCE_MASK(32),
217
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
218
	.resume		= exynos4_frc_resume,
219 220
};

221 222
static u64 notrace exynos4_read_sched_clock(void)
{
223
	return exynos4_read_count_32();
224 225
}

226 227 228 229
static struct delay_timer exynos4_delay_timer;

static cycles_t exynos4_read_current_timer(void)
{
230 231 232
	BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32),
			 "cycles_t needs to move to 32-bit for ARM64 usage");
	return exynos4_read_count_32();
233 234
}

235 236
static void __init exynos4_clocksource_init(void)
{
237
	exynos4_mct_frc_start();
238

239 240 241 242
	exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
	exynos4_delay_timer.freq = clk_rate;
	register_current_timer_delay(&exynos4_delay_timer);

243 244
	if (clocksource_register_hz(&mct_frc, clk_rate))
		panic("%s: can't register clocksource\n", mct_frc.name);
245

246
	sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
247 248 249 250 251 252
}

static void exynos4_mct_comp0_stop(void)
{
	unsigned int tcon;

253
	tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
254 255 256 257 258 259 260 261 262 263 264 265
	tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);

	exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
	exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
}

static void exynos4_mct_comp0_start(enum clock_event_mode mode,
				    unsigned long cycles)
{
	unsigned int tcon;
	cycle_t comp_cycle;

266
	tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
267 268 269 270 271 272

	if (mode == CLOCK_EVT_MODE_PERIODIC) {
		tcon |= MCT_G_TCON_COMP0_AUTO_INC;
		exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
	}

273
	comp_cycle = exynos4_read_count_64() + cycles;
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
	exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
	exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);

	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);

	tcon |= MCT_G_TCON_COMP0_ENABLE;
	exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
}

static int exynos4_comp_set_next_event(unsigned long cycles,
				       struct clock_event_device *evt)
{
	exynos4_mct_comp0_start(evt->mode, cycles);

	return 0;
}

static void exynos4_comp_set_mode(enum clock_event_mode mode,
				  struct clock_event_device *evt)
{
294
	unsigned long cycles_per_jiffy;
295 296 297 298
	exynos4_mct_comp0_stop();

	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
299 300 301
		cycles_per_jiffy =
			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
		exynos4_mct_comp0_start(mode, cycles_per_jiffy);
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
		break;

	case CLOCK_EVT_MODE_ONESHOT:
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
	case CLOCK_EVT_MODE_RESUME:
		break;
	}
}

static struct clock_event_device mct_comp_device = {
	.name		= "mct-comp",
	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
	.rating		= 250,
	.set_next_event	= exynos4_comp_set_next_event,
	.set_mode	= exynos4_comp_set_mode,
};

static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
{
	struct clock_event_device *evt = dev_id;

	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);

	evt->event_handler(evt);

	return IRQ_HANDLED;
}

static struct irqaction mct_comp_event_irq = {
	.name		= "mct_comp_irq",
	.flags		= IRQF_TIMER | IRQF_IRQPOLL,
	.handler	= exynos4_mct_comp_isr,
	.dev_id		= &mct_comp_device,
};

static void exynos4_clockevent_init(void)
{
	mct_comp_device.cpumask = cpumask_of(0);
341 342
	clockevents_config_and_register(&mct_comp_device, clk_rate,
					0xf, 0xffffffff);
343
	setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
344 345
}

346 347
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);

348 349 350 351 352
/* Clock event handling */
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
{
	unsigned long tmp;
	unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
353
	unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
354

355
	tmp = readl_relaxed(reg_base + offset);
356 357
	if (tmp & mask) {
		tmp &= ~mask;
358
		exynos4_mct_write(tmp, offset);
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
	}
}

static void exynos4_mct_tick_start(unsigned long cycles,
				   struct mct_clock_event_device *mevt)
{
	unsigned long tmp;

	exynos4_mct_tick_stop(mevt);

	tmp = (1 << 31) | cycles;	/* MCT_L_UPDATE_ICNTB */

	/* update interrupt count buffer */
	exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);

L
Lucas De Marchi 已提交
374
	/* enable MCT tick interrupt */
375 376
	exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);

377
	tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET);
378 379 380 381 382 383 384 385
	tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
	       MCT_L_TCON_INTERVAL_MODE;
	exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
}

static int exynos4_tick_set_next_event(unsigned long cycles,
				       struct clock_event_device *evt)
{
386
	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
387 388 389 390 391 392 393 394 395

	exynos4_mct_tick_start(cycles, mevt);

	return 0;
}

static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
					 struct clock_event_device *evt)
{
396
	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
397
	unsigned long cycles_per_jiffy;
398 399 400 401 402

	exynos4_mct_tick_stop(mevt);

	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
403 404 405
		cycles_per_jiffy =
			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
		exynos4_mct_tick_start(cycles_per_jiffy, mevt);
406 407 408 409 410 411 412 413 414 415
		break;

	case CLOCK_EVT_MODE_ONESHOT:
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
	case CLOCK_EVT_MODE_RESUME:
		break;
	}
}

416
static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
417
{
418
	struct clock_event_device *evt = &mevt->evt;
419 420 421 422 423 424 425 426 427 428

	/*
	 * This is for supporting oneshot mode.
	 * Mct would generate interrupt periodically
	 * without explicit stopping.
	 */
	if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
		exynos4_mct_tick_stop(mevt);

	/* Clear the MCT tick interrupt */
429
	if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
430 431 432 433 434 435
		exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
}

static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
	struct mct_clock_event_device *mevt = dev_id;
436
	struct clock_event_device *evt = &mevt->evt;
437 438

	exynos4_mct_tick_clear(mevt);
439 440 441 442 443 444

	evt->event_handler(evt);

	return IRQ_HANDLED;
}

445
static int exynos4_local_timer_setup(struct clock_event_device *evt)
446
{
447
	struct mct_clock_event_device *mevt;
448 449
	unsigned int cpu = smp_processor_id();

450
	mevt = container_of(evt, struct mct_clock_event_device, evt);
451

452
	mevt->base = EXYNOS4_MCT_L_BASE(cpu);
453
	snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
454

455
	evt->name = mevt->name;
456 457 458 459 460 461
	evt->cpumask = cpumask_of(cpu);
	evt->set_next_event = exynos4_tick_set_next_event;
	evt->set_mode = exynos4_tick_set_mode;
	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
	evt->rating = 450;

462
	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
463

464
	if (mct_int_type == MCT_INT_SPI) {
465 466 467 468 469 470 471
		evt->irq = mct_irqs[MCT_L0_IRQ + cpu];
		if (request_irq(evt->irq, exynos4_mct_tick_isr,
				IRQF_TIMER | IRQF_NOBALANCING,
				evt->name, mevt)) {
			pr_err("exynos-mct: cannot register IRQ %d\n",
				evt->irq);
			return -EIO;
472
		}
473
		irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
474
	} else {
475
		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
476
	}
477 478
	clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
					0xf, 0x7fffffff);
479 480

	return 0;
481 482
}

483
static void exynos4_local_timer_stop(struct clock_event_device *evt)
484
{
485
	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
486
	if (mct_int_type == MCT_INT_SPI)
487
		free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick));
488
	else
489
		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
490
}
491

492
static int exynos4_mct_cpu_notify(struct notifier_block *self,
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
					   unsigned long action, void *hcpu)
{
	struct mct_clock_event_device *mevt;

	/*
	 * Grab cpu pointer in each case to avoid spurious
	 * preemptible warnings
	 */
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_STARTING:
		mevt = this_cpu_ptr(&percpu_mct_tick);
		exynos4_local_timer_setup(&mevt->evt);
		break;
	case CPU_DYING:
		mevt = this_cpu_ptr(&percpu_mct_tick);
		exynos4_local_timer_stop(&mevt->evt);
		break;
	}

	return NOTIFY_OK;
}

515
static struct notifier_block exynos4_mct_cpu_nb = {
516
	.notifier_call = exynos4_mct_cpu_notify,
517
};
518

519
static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
520
{
521 522
	int err;
	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
523
	struct clk *mct_clk, *tick_clk;
524

525 526 527 528 529
	tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
				clk_get(NULL, "fin_pll");
	if (IS_ERR(tick_clk))
		panic("%s: unable to determine tick clock rate\n", __func__);
	clk_rate = clk_get_rate(tick_clk);
530

531 532 533 534
	mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct");
	if (IS_ERR(mct_clk))
		panic("%s: unable to retrieve mct clock instance\n", __func__);
	clk_prepare_enable(mct_clk);
535

536
	reg_base = base;
537 538
	if (!reg_base)
		panic("%s: unable to ioremap mct address space\n", __func__);
539

540 541
	if (mct_int_type == MCT_INT_PPI) {

542
		err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
543 544 545
					 exynos4_mct_tick_isr, "MCT",
					 &percpu_mct_tick);
		WARN(err, "MCT: can't request IRQ %d (%d)\n",
546
		     mct_irqs[MCT_L0_IRQ], err);
547 548
	} else {
		irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
549
	}
550

551 552 553 554 555 556 557 558 559 560
	err = register_cpu_notifier(&exynos4_mct_cpu_nb);
	if (err)
		goto out_irq;

	/* Immediately configure the timer on the boot CPU */
	exynos4_local_timer_setup(&mevt->evt);
	return;

out_irq:
	free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
561 562
}

563
void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1)
564
{
565 566 567 568
	mct_irqs[MCT_G0_IRQ] = irq_g0;
	mct_irqs[MCT_L0_IRQ] = irq_l0;
	mct_irqs[MCT_L1_IRQ] = irq_l1;
	mct_int_type = MCT_INT_SPI;
569

570
	exynos4_timer_resources(NULL, base);
571 572 573
	exynos4_clocksource_init();
	exynos4_clockevent_init();
}
574

575 576 577 578 579 580 581 582 583 584 585 586 587 588
static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
	u32 nr_irqs, i;

	mct_int_type = int_type;

	/* This driver uses only one global timer interrupt */
	mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);

	/*
	 * Find out the number of local irqs specified. The local
	 * timer irqs are specified after the four global timer
	 * irqs are specified.
	 */
589
#ifdef CONFIG_OF
590
	nr_irqs = of_irq_count(np);
591 592 593
#else
	nr_irqs = 0;
#endif
594 595 596
	for (i = MCT_L0_IRQ; i < nr_irqs; i++)
		mct_irqs[i] = irq_of_parse_and_map(np, i);

597
	exynos4_timer_resources(np, of_iomap(np, 0));
598 599 600
	exynos4_clocksource_init();
	exynos4_clockevent_init();
}
601 602 603 604 605 606 607 608 609 610 611 612 613


static void __init mct_init_spi(struct device_node *np)
{
	return mct_init_dt(np, MCT_INT_SPI);
}

static void __init mct_init_ppi(struct device_node *np)
{
	return mct_init_dt(np, MCT_INT_PPI);
}
CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);