exynos_mct.c 14.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* linux/arch/arm/mach-exynos4/mct.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 *
 * EXYNOS4 MCT(Multi-Core Timer) support
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
*/

#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/percpu.h>
22
#include <linux/of.h>
23 24
#include <linux/of_irq.h>
#include <linux/of_address.h>
25
#include <linux/clocksource.h>
26

27
#include <asm/arch_timer.h>
28
#include <asm/localtimer.h>
29 30 31

#include <plat/cpu.h>

32
#include <mach/map.h>
33
#include <mach/irqs.h>
34 35
#include <asm/mach/time.h>

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#define EXYNOS4_MCTREG(x)		(x)
#define EXYNOS4_MCT_G_CNT_L		EXYNOS4_MCTREG(0x100)
#define EXYNOS4_MCT_G_CNT_U		EXYNOS4_MCTREG(0x104)
#define EXYNOS4_MCT_G_CNT_WSTAT		EXYNOS4_MCTREG(0x110)
#define EXYNOS4_MCT_G_COMP0_L		EXYNOS4_MCTREG(0x200)
#define EXYNOS4_MCT_G_COMP0_U		EXYNOS4_MCTREG(0x204)
#define EXYNOS4_MCT_G_COMP0_ADD_INCR	EXYNOS4_MCTREG(0x208)
#define EXYNOS4_MCT_G_TCON		EXYNOS4_MCTREG(0x240)
#define EXYNOS4_MCT_G_INT_CSTAT		EXYNOS4_MCTREG(0x244)
#define EXYNOS4_MCT_G_INT_ENB		EXYNOS4_MCTREG(0x248)
#define EXYNOS4_MCT_G_WSTAT		EXYNOS4_MCTREG(0x24C)
#define _EXYNOS4_MCT_L_BASE		EXYNOS4_MCTREG(0x300)
#define EXYNOS4_MCT_L_BASE(x)		(_EXYNOS4_MCT_L_BASE + (0x100 * x))
#define EXYNOS4_MCT_L_MASK		(0xffffff00)

#define MCT_L_TCNTB_OFFSET		(0x00)
#define MCT_L_ICNTB_OFFSET		(0x08)
#define MCT_L_TCON_OFFSET		(0x20)
#define MCT_L_INT_CSTAT_OFFSET		(0x30)
#define MCT_L_INT_ENB_OFFSET		(0x34)
#define MCT_L_WSTAT_OFFSET		(0x40)
#define MCT_G_TCON_START		(1 << 8)
#define MCT_G_TCON_COMP0_AUTO_INC	(1 << 1)
#define MCT_G_TCON_COMP0_ENABLE		(1 << 0)
#define MCT_L_TCON_INTERVAL_MODE	(1 << 2)
#define MCT_L_TCON_INT_START		(1 << 1)
#define MCT_L_TCON_TIMER_START		(1 << 0)

64 65
#define TICK_BASE_CNT	1

66 67 68 69 70
enum {
	MCT_INT_SPI,
	MCT_INT_PPI
};

71 72 73 74 75 76 77 78 79 80 81 82
enum {
	MCT_G0_IRQ,
	MCT_G1_IRQ,
	MCT_G2_IRQ,
	MCT_G3_IRQ,
	MCT_L0_IRQ,
	MCT_L1_IRQ,
	MCT_L2_IRQ,
	MCT_L3_IRQ,
	MCT_NR_IRQS,
};

83
static void __iomem *reg_base;
84
static unsigned long clk_rate;
85
static unsigned int mct_int_type;
86
static int mct_irqs[MCT_NR_IRQS];
87 88 89

struct mct_clock_event_device {
	struct clock_event_device *evt;
90
	unsigned long base;
91
	char name[10];
92 93
};

94
static void exynos4_mct_write(unsigned int value, unsigned long offset)
95
{
96
	unsigned long stat_addr;
97 98 99
	u32 mask;
	u32 i;

100
	__raw_writel(value, reg_base + offset);
101

102 103 104 105
	if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
		stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
		switch (offset & EXYNOS4_MCT_L_MASK) {
		case MCT_L_TCON_OFFSET:
106 107
			mask = 1 << 3;		/* L_TCON write status */
			break;
108
		case MCT_L_ICNTB_OFFSET:
109 110
			mask = 1 << 1;		/* L_ICNTB write status */
			break;
111
		case MCT_L_TCNTB_OFFSET:
112 113 114 115 116 117
			mask = 1 << 0;		/* L_TCNTB write status */
			break;
		default:
			return;
		}
	} else {
118 119
		switch (offset) {
		case EXYNOS4_MCT_G_TCON:
120 121 122
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 16;		/* G_TCON write status */
			break;
123
		case EXYNOS4_MCT_G_COMP0_L:
124 125 126
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 0;		/* G_COMP0_L write status */
			break;
127
		case EXYNOS4_MCT_G_COMP0_U:
128 129 130
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 1;		/* G_COMP0_U write status */
			break;
131
		case EXYNOS4_MCT_G_COMP0_ADD_INCR:
132 133 134
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 2;		/* G_COMP0_ADD_INCR w status */
			break;
135
		case EXYNOS4_MCT_G_CNT_L:
136 137 138
			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
			mask = 1 << 0;		/* G_CNT_L write status */
			break;
139
		case EXYNOS4_MCT_G_CNT_U:
140 141 142 143 144 145
			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
			mask = 1 << 1;		/* G_CNT_U write status */
			break;
		default:
			return;
		}
146 147 148 149
	}

	/* Wait maximum 1 ms until written values are applied */
	for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
150 151
		if (__raw_readl(reg_base + stat_addr) & mask) {
			__raw_writel(mask, reg_base + stat_addr);
152 153 154
			return;
		}

155
	panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
156 157 158 159 160 161 162 163 164 165
}

/* Clocksource handling */
static void exynos4_mct_frc_start(u32 hi, u32 lo)
{
	u32 reg;

	exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
	exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);

166
	reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
167 168 169 170 171 172 173
	reg |= MCT_G_TCON_START;
	exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
}

static cycle_t exynos4_frc_read(struct clocksource *cs)
{
	unsigned int lo, hi;
174
	u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
175 176 177

	do {
		hi = hi2;
178 179
		lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L);
		hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
180 181 182 183 184
	} while (hi != hi2);

	return ((cycle_t)hi << 32) | lo;
}

185 186 187 188 189
static void exynos4_frc_resume(struct clocksource *cs)
{
	exynos4_mct_frc_start(0, 0);
}

190 191 192 193 194 195
struct clocksource mct_frc = {
	.name		= "mct-frc",
	.rating		= 400,
	.read		= exynos4_frc_read,
	.mask		= CLOCKSOURCE_MASK(64),
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
196
	.resume		= exynos4_frc_resume,
197 198 199 200 201 202 203 204 205 206 207 208 209 210
};

static void __init exynos4_clocksource_init(void)
{
	exynos4_mct_frc_start(0, 0);

	if (clocksource_register_hz(&mct_frc, clk_rate))
		panic("%s: can't register clocksource\n", mct_frc.name);
}

static void exynos4_mct_comp0_stop(void)
{
	unsigned int tcon;

211
	tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
212 213 214 215 216 217 218 219 220 221 222 223
	tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);

	exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
	exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
}

static void exynos4_mct_comp0_start(enum clock_event_mode mode,
				    unsigned long cycles)
{
	unsigned int tcon;
	cycle_t comp_cycle;

224
	tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251

	if (mode == CLOCK_EVT_MODE_PERIODIC) {
		tcon |= MCT_G_TCON_COMP0_AUTO_INC;
		exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
	}

	comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
	exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
	exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);

	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);

	tcon |= MCT_G_TCON_COMP0_ENABLE;
	exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
}

static int exynos4_comp_set_next_event(unsigned long cycles,
				       struct clock_event_device *evt)
{
	exynos4_mct_comp0_start(evt->mode, cycles);

	return 0;
}

static void exynos4_comp_set_mode(enum clock_event_mode mode,
				  struct clock_event_device *evt)
{
252
	unsigned long cycles_per_jiffy;
253 254 255 256
	exynos4_mct_comp0_stop();

	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
257 258 259
		cycles_per_jiffy =
			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
		exynos4_mct_comp0_start(mode, cycles_per_jiffy);
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
		break;

	case CLOCK_EVT_MODE_ONESHOT:
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
	case CLOCK_EVT_MODE_RESUME:
		break;
	}
}

static struct clock_event_device mct_comp_device = {
	.name		= "mct-comp",
	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
	.rating		= 250,
	.set_next_event	= exynos4_comp_set_next_event,
	.set_mode	= exynos4_comp_set_mode,
};

static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
{
	struct clock_event_device *evt = dev_id;

	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);

	evt->event_handler(evt);

	return IRQ_HANDLED;
}

static struct irqaction mct_comp_event_irq = {
	.name		= "mct_comp_irq",
	.flags		= IRQF_TIMER | IRQF_IRQPOLL,
	.handler	= exynos4_mct_comp_isr,
	.dev_id		= &mct_comp_device,
};

static void exynos4_clockevent_init(void)
{
	mct_comp_device.cpumask = cpumask_of(0);
299 300
	clockevents_config_and_register(&mct_comp_device, clk_rate,
					0xf, 0xffffffff);
301
	setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
302 303 304
}

#ifdef CONFIG_LOCAL_TIMERS
305 306 307

static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);

308 309 310 311 312
/* Clock event handling */
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
{
	unsigned long tmp;
	unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
313
	unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
314

315
	tmp = __raw_readl(reg_base + offset);
316 317
	if (tmp & mask) {
		tmp &= ~mask;
318
		exynos4_mct_write(tmp, offset);
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
	}
}

static void exynos4_mct_tick_start(unsigned long cycles,
				   struct mct_clock_event_device *mevt)
{
	unsigned long tmp;

	exynos4_mct_tick_stop(mevt);

	tmp = (1 << 31) | cycles;	/* MCT_L_UPDATE_ICNTB */

	/* update interrupt count buffer */
	exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);

L
Lucas De Marchi 已提交
334
	/* enable MCT tick interrupt */
335 336
	exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);

337
	tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET);
338 339 340 341 342 343 344 345
	tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
	       MCT_L_TCON_INTERVAL_MODE;
	exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
}

static int exynos4_tick_set_next_event(unsigned long cycles,
				       struct clock_event_device *evt)
{
346
	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
347 348 349 350 351 352 353 354 355

	exynos4_mct_tick_start(cycles, mevt);

	return 0;
}

static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
					 struct clock_event_device *evt)
{
356
	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
357
	unsigned long cycles_per_jiffy;
358 359 360 361 362

	exynos4_mct_tick_stop(mevt);

	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
363 364 365
		cycles_per_jiffy =
			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
		exynos4_mct_tick_start(cycles_per_jiffy, mevt);
366 367 368 369 370 371 372 373 374 375
		break;

	case CLOCK_EVT_MODE_ONESHOT:
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
	case CLOCK_EVT_MODE_RESUME:
		break;
	}
}

376
static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
377 378 379 380 381 382 383 384 385 386 387 388
{
	struct clock_event_device *evt = mevt->evt;

	/*
	 * This is for supporting oneshot mode.
	 * Mct would generate interrupt periodically
	 * without explicit stopping.
	 */
	if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
		exynos4_mct_tick_stop(mevt);

	/* Clear the MCT tick interrupt */
389
	if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
390 391 392 393 394 395 396 397 398 399 400 401 402
		exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
		return 1;
	} else {
		return 0;
	}
}

static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
	struct mct_clock_event_device *mevt = dev_id;
	struct clock_event_device *evt = mevt->evt;

	exynos4_mct_tick_clear(mevt);
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420

	evt->event_handler(evt);

	return IRQ_HANDLED;
}

static struct irqaction mct_tick0_event_irq = {
	.name		= "mct_tick0_irq",
	.flags		= IRQF_TIMER | IRQF_NOBALANCING,
	.handler	= exynos4_mct_tick_isr,
};

static struct irqaction mct_tick1_event_irq = {
	.name		= "mct_tick1_irq",
	.flags		= IRQF_TIMER | IRQF_NOBALANCING,
	.handler	= exynos4_mct_tick_isr,
};

421
static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
422
{
423
	struct mct_clock_event_device *mevt;
424 425
	unsigned int cpu = smp_processor_id();

426 427
	mevt = this_cpu_ptr(&percpu_mct_tick);
	mevt->evt = evt;
428

429 430
	mevt->base = EXYNOS4_MCT_L_BASE(cpu);
	sprintf(mevt->name, "mct_tick%d", cpu);
431

432
	evt->name = mevt->name;
433 434 435 436 437
	evt->cpumask = cpumask_of(cpu);
	evt->set_next_event = exynos4_tick_set_next_event;
	evt->set_mode = exynos4_tick_set_mode;
	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
	evt->rating = 450;
438 439
	clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
					0xf, 0x7fffffff);
440

441
	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
442

443 444
	if (mct_int_type == MCT_INT_SPI) {
		if (cpu == 0) {
445
			mct_tick0_event_irq.dev_id = mevt;
446 447
			evt->irq = mct_irqs[MCT_L0_IRQ];
			setup_irq(evt->irq, &mct_tick0_event_irq);
448
		} else {
449
			mct_tick1_event_irq.dev_id = mevt;
450 451 452
			evt->irq = mct_irqs[MCT_L1_IRQ];
			setup_irq(evt->irq, &mct_tick1_event_irq);
			irq_set_affinity(evt->irq, cpumask_of(1));
453
		}
454
	} else {
455
		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
456
	}
457 458

	return 0;
459 460
}

461
static void exynos4_local_timer_stop(struct clock_event_device *evt)
462
{
463
	unsigned int cpu = smp_processor_id();
464
	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
465
	if (mct_int_type == MCT_INT_SPI)
466 467 468 469
		if (cpu == 0)
			remove_irq(evt->irq, &mct_tick0_event_irq);
		else
			remove_irq(evt->irq, &mct_tick1_event_irq);
470
	else
471
		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
472
}
473 474 475 476 477

static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = {
	.setup	= exynos4_local_timer_setup,
	.stop	= exynos4_local_timer_stop,
};
478 479
#endif /* CONFIG_LOCAL_TIMERS */

480
static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
481
{
482
	struct clk *mct_clk, *tick_clk;
483

484 485 486 487 488
	tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
				clk_get(NULL, "fin_pll");
	if (IS_ERR(tick_clk))
		panic("%s: unable to determine tick clock rate\n", __func__);
	clk_rate = clk_get_rate(tick_clk);
489

490 491 492 493
	mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct");
	if (IS_ERR(mct_clk))
		panic("%s: unable to retrieve mct clock instance\n", __func__);
	clk_prepare_enable(mct_clk);
494

495
	reg_base = base;
496 497
	if (!reg_base)
		panic("%s: unable to ioremap mct address space\n", __func__);
498

499
#ifdef CONFIG_LOCAL_TIMERS
500 501 502
	if (mct_int_type == MCT_INT_PPI) {
		int err;

503
		err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
504 505 506
					 exynos4_mct_tick_isr, "MCT",
					 &percpu_mct_tick);
		WARN(err, "MCT: can't request IRQ %d (%d)\n",
507
		     mct_irqs[MCT_L0_IRQ], err);
508
	}
509 510

	local_timer_register(&exynos4_mct_tick_ops);
511
#endif /* CONFIG_LOCAL_TIMERS */
512 513
}

514
void __init mct_init(void)
515
{
516
	if (soc_is_exynos4210()) {
517 518 519
		mct_irqs[MCT_G0_IRQ] = EXYNOS4_IRQ_MCT_G0;
		mct_irqs[MCT_L0_IRQ] = EXYNOS4_IRQ_MCT_L0;
		mct_irqs[MCT_L1_IRQ] = EXYNOS4_IRQ_MCT_L1;
520
		mct_int_type = MCT_INT_SPI;
521
	} else {
522
		panic("unable to determine mct controller type\n");
523 524
	}

525
	exynos4_timer_resources(NULL, S5P_VA_SYSTIMER);
526 527 528
	exynos4_clocksource_init();
	exynos4_clockevent_init();
}
529

530 531 532 533 534 535 536 537 538 539 540 541 542 543
static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
	u32 nr_irqs, i;

	mct_int_type = int_type;

	/* This driver uses only one global timer interrupt */
	mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);

	/*
	 * Find out the number of local irqs specified. The local
	 * timer irqs are specified after the four global timer
	 * irqs are specified.
	 */
544
#ifdef CONFIG_OF
545
	nr_irqs = of_irq_count(np);
546 547 548
#else
	nr_irqs = 0;
#endif
549 550 551
	for (i = MCT_L0_IRQ; i < nr_irqs; i++)
		mct_irqs[i] = irq_of_parse_and_map(np, i);

552
	exynos4_timer_resources(np, of_iomap(np, 0));
553 554 555
	exynos4_clocksource_init();
	exynos4_clockevent_init();
}
556 557 558 559 560 561 562 563 564 565 566 567 568


static void __init mct_init_spi(struct device_node *np)
{
	return mct_init_dt(np, MCT_INT_SPI);
}

static void __init mct_init_ppi(struct device_node *np)
{
	return mct_init_dt(np, MCT_INT_PPI);
}
CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);