exynos_mct.c 14.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* linux/arch/arm/mach-exynos4/mct.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 *		http://www.samsung.com
 *
 * EXYNOS4 MCT(Multi-Core Timer) support
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
*/

#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/percpu.h>
22
#include <linux/of.h>
23 24
#include <linux/of_irq.h>
#include <linux/of_address.h>
25
#include <linux/clocksource.h>
26

27
#include <asm/localtimer.h>
28 29
#include <asm/mach/time.h>

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
#define EXYNOS4_MCTREG(x)		(x)
#define EXYNOS4_MCT_G_CNT_L		EXYNOS4_MCTREG(0x100)
#define EXYNOS4_MCT_G_CNT_U		EXYNOS4_MCTREG(0x104)
#define EXYNOS4_MCT_G_CNT_WSTAT		EXYNOS4_MCTREG(0x110)
#define EXYNOS4_MCT_G_COMP0_L		EXYNOS4_MCTREG(0x200)
#define EXYNOS4_MCT_G_COMP0_U		EXYNOS4_MCTREG(0x204)
#define EXYNOS4_MCT_G_COMP0_ADD_INCR	EXYNOS4_MCTREG(0x208)
#define EXYNOS4_MCT_G_TCON		EXYNOS4_MCTREG(0x240)
#define EXYNOS4_MCT_G_INT_CSTAT		EXYNOS4_MCTREG(0x244)
#define EXYNOS4_MCT_G_INT_ENB		EXYNOS4_MCTREG(0x248)
#define EXYNOS4_MCT_G_WSTAT		EXYNOS4_MCTREG(0x24C)
#define _EXYNOS4_MCT_L_BASE		EXYNOS4_MCTREG(0x300)
#define EXYNOS4_MCT_L_BASE(x)		(_EXYNOS4_MCT_L_BASE + (0x100 * x))
#define EXYNOS4_MCT_L_MASK		(0xffffff00)

#define MCT_L_TCNTB_OFFSET		(0x00)
#define MCT_L_ICNTB_OFFSET		(0x08)
#define MCT_L_TCON_OFFSET		(0x20)
#define MCT_L_INT_CSTAT_OFFSET		(0x30)
#define MCT_L_INT_ENB_OFFSET		(0x34)
#define MCT_L_WSTAT_OFFSET		(0x40)
#define MCT_G_TCON_START		(1 << 8)
#define MCT_G_TCON_COMP0_AUTO_INC	(1 << 1)
#define MCT_G_TCON_COMP0_ENABLE		(1 << 0)
#define MCT_L_TCON_INTERVAL_MODE	(1 << 2)
#define MCT_L_TCON_INT_START		(1 << 1)
#define MCT_L_TCON_TIMER_START		(1 << 0)

58 59
#define TICK_BASE_CNT	1

60 61 62 63 64
enum {
	MCT_INT_SPI,
	MCT_INT_PPI
};

65 66 67 68 69 70 71 72 73 74 75 76
enum {
	MCT_G0_IRQ,
	MCT_G1_IRQ,
	MCT_G2_IRQ,
	MCT_G3_IRQ,
	MCT_L0_IRQ,
	MCT_L1_IRQ,
	MCT_L2_IRQ,
	MCT_L3_IRQ,
	MCT_NR_IRQS,
};

77
static void __iomem *reg_base;
78
static unsigned long clk_rate;
79
static unsigned int mct_int_type;
80
static int mct_irqs[MCT_NR_IRQS];
81 82 83

struct mct_clock_event_device {
	struct clock_event_device *evt;
84
	unsigned long base;
85
	char name[10];
86 87
};

88
static void exynos4_mct_write(unsigned int value, unsigned long offset)
89
{
90
	unsigned long stat_addr;
91 92 93
	u32 mask;
	u32 i;

94
	__raw_writel(value, reg_base + offset);
95

96 97 98 99
	if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
		stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
		switch (offset & EXYNOS4_MCT_L_MASK) {
		case MCT_L_TCON_OFFSET:
100 101
			mask = 1 << 3;		/* L_TCON write status */
			break;
102
		case MCT_L_ICNTB_OFFSET:
103 104
			mask = 1 << 1;		/* L_ICNTB write status */
			break;
105
		case MCT_L_TCNTB_OFFSET:
106 107 108 109 110 111
			mask = 1 << 0;		/* L_TCNTB write status */
			break;
		default:
			return;
		}
	} else {
112 113
		switch (offset) {
		case EXYNOS4_MCT_G_TCON:
114 115 116
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 16;		/* G_TCON write status */
			break;
117
		case EXYNOS4_MCT_G_COMP0_L:
118 119 120
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 0;		/* G_COMP0_L write status */
			break;
121
		case EXYNOS4_MCT_G_COMP0_U:
122 123 124
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 1;		/* G_COMP0_U write status */
			break;
125
		case EXYNOS4_MCT_G_COMP0_ADD_INCR:
126 127 128
			stat_addr = EXYNOS4_MCT_G_WSTAT;
			mask = 1 << 2;		/* G_COMP0_ADD_INCR w status */
			break;
129
		case EXYNOS4_MCT_G_CNT_L:
130 131 132
			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
			mask = 1 << 0;		/* G_CNT_L write status */
			break;
133
		case EXYNOS4_MCT_G_CNT_U:
134 135 136 137 138 139
			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
			mask = 1 << 1;		/* G_CNT_U write status */
			break;
		default:
			return;
		}
140 141 142 143
	}

	/* Wait maximum 1 ms until written values are applied */
	for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
144 145
		if (__raw_readl(reg_base + stat_addr) & mask) {
			__raw_writel(mask, reg_base + stat_addr);
146 147 148
			return;
		}

149
	panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
150 151 152 153 154 155 156 157 158 159
}

/* Clocksource handling */
static void exynos4_mct_frc_start(u32 hi, u32 lo)
{
	u32 reg;

	exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
	exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);

160
	reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
161 162 163 164 165 166 167
	reg |= MCT_G_TCON_START;
	exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
}

static cycle_t exynos4_frc_read(struct clocksource *cs)
{
	unsigned int lo, hi;
168
	u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
169 170 171

	do {
		hi = hi2;
172 173
		lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L);
		hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
174 175 176 177 178
	} while (hi != hi2);

	return ((cycle_t)hi << 32) | lo;
}

179 180 181 182 183
static void exynos4_frc_resume(struct clocksource *cs)
{
	exynos4_mct_frc_start(0, 0);
}

184 185 186 187 188 189
struct clocksource mct_frc = {
	.name		= "mct-frc",
	.rating		= 400,
	.read		= exynos4_frc_read,
	.mask		= CLOCKSOURCE_MASK(64),
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
190
	.resume		= exynos4_frc_resume,
191 192 193 194 195 196 197 198 199 200 201 202 203 204
};

static void __init exynos4_clocksource_init(void)
{
	exynos4_mct_frc_start(0, 0);

	if (clocksource_register_hz(&mct_frc, clk_rate))
		panic("%s: can't register clocksource\n", mct_frc.name);
}

static void exynos4_mct_comp0_stop(void)
{
	unsigned int tcon;

205
	tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
206 207 208 209 210 211 212 213 214 215 216 217
	tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);

	exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
	exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
}

static void exynos4_mct_comp0_start(enum clock_event_mode mode,
				    unsigned long cycles)
{
	unsigned int tcon;
	cycle_t comp_cycle;

218
	tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245

	if (mode == CLOCK_EVT_MODE_PERIODIC) {
		tcon |= MCT_G_TCON_COMP0_AUTO_INC;
		exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
	}

	comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
	exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
	exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);

	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);

	tcon |= MCT_G_TCON_COMP0_ENABLE;
	exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
}

static int exynos4_comp_set_next_event(unsigned long cycles,
				       struct clock_event_device *evt)
{
	exynos4_mct_comp0_start(evt->mode, cycles);

	return 0;
}

static void exynos4_comp_set_mode(enum clock_event_mode mode,
				  struct clock_event_device *evt)
{
246
	unsigned long cycles_per_jiffy;
247 248 249 250
	exynos4_mct_comp0_stop();

	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
251 252 253
		cycles_per_jiffy =
			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
		exynos4_mct_comp0_start(mode, cycles_per_jiffy);
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
		break;

	case CLOCK_EVT_MODE_ONESHOT:
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
	case CLOCK_EVT_MODE_RESUME:
		break;
	}
}

static struct clock_event_device mct_comp_device = {
	.name		= "mct-comp",
	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
	.rating		= 250,
	.set_next_event	= exynos4_comp_set_next_event,
	.set_mode	= exynos4_comp_set_mode,
};

static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
{
	struct clock_event_device *evt = dev_id;

	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);

	evt->event_handler(evt);

	return IRQ_HANDLED;
}

static struct irqaction mct_comp_event_irq = {
	.name		= "mct_comp_irq",
	.flags		= IRQF_TIMER | IRQF_IRQPOLL,
	.handler	= exynos4_mct_comp_isr,
	.dev_id		= &mct_comp_device,
};

static void exynos4_clockevent_init(void)
{
	mct_comp_device.cpumask = cpumask_of(0);
293 294
	clockevents_config_and_register(&mct_comp_device, clk_rate,
					0xf, 0xffffffff);
295
	setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
296 297 298
}

#ifdef CONFIG_LOCAL_TIMERS
299 300 301

static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);

302 303 304 305 306
/* Clock event handling */
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
{
	unsigned long tmp;
	unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
307
	unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
308

309
	tmp = __raw_readl(reg_base + offset);
310 311
	if (tmp & mask) {
		tmp &= ~mask;
312
		exynos4_mct_write(tmp, offset);
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	}
}

static void exynos4_mct_tick_start(unsigned long cycles,
				   struct mct_clock_event_device *mevt)
{
	unsigned long tmp;

	exynos4_mct_tick_stop(mevt);

	tmp = (1 << 31) | cycles;	/* MCT_L_UPDATE_ICNTB */

	/* update interrupt count buffer */
	exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);

L
Lucas De Marchi 已提交
328
	/* enable MCT tick interrupt */
329 330
	exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);

331
	tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET);
332 333 334 335 336 337 338 339
	tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
	       MCT_L_TCON_INTERVAL_MODE;
	exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
}

static int exynos4_tick_set_next_event(unsigned long cycles,
				       struct clock_event_device *evt)
{
340
	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
341 342 343 344 345 346 347 348 349

	exynos4_mct_tick_start(cycles, mevt);

	return 0;
}

static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
					 struct clock_event_device *evt)
{
350
	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
351
	unsigned long cycles_per_jiffy;
352 353 354 355 356

	exynos4_mct_tick_stop(mevt);

	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
357 358 359
		cycles_per_jiffy =
			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
		exynos4_mct_tick_start(cycles_per_jiffy, mevt);
360 361 362 363 364 365 366 367 368 369
		break;

	case CLOCK_EVT_MODE_ONESHOT:
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
	case CLOCK_EVT_MODE_RESUME:
		break;
	}
}

370
static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
371 372 373 374 375 376 377 378 379 380 381 382
{
	struct clock_event_device *evt = mevt->evt;

	/*
	 * This is for supporting oneshot mode.
	 * Mct would generate interrupt periodically
	 * without explicit stopping.
	 */
	if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
		exynos4_mct_tick_stop(mevt);

	/* Clear the MCT tick interrupt */
383
	if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
384 385 386 387 388 389 390 391 392 393 394 395 396
		exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
		return 1;
	} else {
		return 0;
	}
}

static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
	struct mct_clock_event_device *mevt = dev_id;
	struct clock_event_device *evt = mevt->evt;

	exynos4_mct_tick_clear(mevt);
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414

	evt->event_handler(evt);

	return IRQ_HANDLED;
}

static struct irqaction mct_tick0_event_irq = {
	.name		= "mct_tick0_irq",
	.flags		= IRQF_TIMER | IRQF_NOBALANCING,
	.handler	= exynos4_mct_tick_isr,
};

static struct irqaction mct_tick1_event_irq = {
	.name		= "mct_tick1_irq",
	.flags		= IRQF_TIMER | IRQF_NOBALANCING,
	.handler	= exynos4_mct_tick_isr,
};

415
static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
416
{
417
	struct mct_clock_event_device *mevt;
418 419
	unsigned int cpu = smp_processor_id();

420 421
	mevt = this_cpu_ptr(&percpu_mct_tick);
	mevt->evt = evt;
422

423 424
	mevt->base = EXYNOS4_MCT_L_BASE(cpu);
	sprintf(mevt->name, "mct_tick%d", cpu);
425

426
	evt->name = mevt->name;
427 428 429 430 431
	evt->cpumask = cpumask_of(cpu);
	evt->set_next_event = exynos4_tick_set_next_event;
	evt->set_mode = exynos4_tick_set_mode;
	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
	evt->rating = 450;
432 433
	clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
					0xf, 0x7fffffff);
434

435
	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
436

437 438
	if (mct_int_type == MCT_INT_SPI) {
		if (cpu == 0) {
439
			mct_tick0_event_irq.dev_id = mevt;
440 441
			evt->irq = mct_irqs[MCT_L0_IRQ];
			setup_irq(evt->irq, &mct_tick0_event_irq);
442
		} else {
443
			mct_tick1_event_irq.dev_id = mevt;
444 445 446
			evt->irq = mct_irqs[MCT_L1_IRQ];
			setup_irq(evt->irq, &mct_tick1_event_irq);
			irq_set_affinity(evt->irq, cpumask_of(1));
447
		}
448
	} else {
449
		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
450
	}
451 452

	return 0;
453 454
}

455
static void exynos4_local_timer_stop(struct clock_event_device *evt)
456
{
457
	unsigned int cpu = smp_processor_id();
458
	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
459
	if (mct_int_type == MCT_INT_SPI)
460 461 462 463
		if (cpu == 0)
			remove_irq(evt->irq, &mct_tick0_event_irq);
		else
			remove_irq(evt->irq, &mct_tick1_event_irq);
464
	else
465
		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
466
}
467 468 469 470 471

static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = {
	.setup	= exynos4_local_timer_setup,
	.stop	= exynos4_local_timer_stop,
};
472 473
#endif /* CONFIG_LOCAL_TIMERS */

474
static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
475
{
476
	struct clk *mct_clk, *tick_clk;
477

478 479 480 481 482
	tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
				clk_get(NULL, "fin_pll");
	if (IS_ERR(tick_clk))
		panic("%s: unable to determine tick clock rate\n", __func__);
	clk_rate = clk_get_rate(tick_clk);
483

484 485 486 487
	mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct");
	if (IS_ERR(mct_clk))
		panic("%s: unable to retrieve mct clock instance\n", __func__);
	clk_prepare_enable(mct_clk);
488

489
	reg_base = base;
490 491
	if (!reg_base)
		panic("%s: unable to ioremap mct address space\n", __func__);
492

493
#ifdef CONFIG_LOCAL_TIMERS
494 495 496
	if (mct_int_type == MCT_INT_PPI) {
		int err;

497
		err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
498 499 500
					 exynos4_mct_tick_isr, "MCT",
					 &percpu_mct_tick);
		WARN(err, "MCT: can't request IRQ %d (%d)\n",
501
		     mct_irqs[MCT_L0_IRQ], err);
502
	}
503 504

	local_timer_register(&exynos4_mct_tick_ops);
505
#endif /* CONFIG_LOCAL_TIMERS */
506 507
}

508
void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1)
509
{
510 511 512 513
	mct_irqs[MCT_G0_IRQ] = irq_g0;
	mct_irqs[MCT_L0_IRQ] = irq_l0;
	mct_irqs[MCT_L1_IRQ] = irq_l1;
	mct_int_type = MCT_INT_SPI;
514

515
	exynos4_timer_resources(NULL, base);
516 517 518
	exynos4_clocksource_init();
	exynos4_clockevent_init();
}
519

520 521 522 523 524 525 526 527 528 529 530 531 532 533
static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
	u32 nr_irqs, i;

	mct_int_type = int_type;

	/* This driver uses only one global timer interrupt */
	mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);

	/*
	 * Find out the number of local irqs specified. The local
	 * timer irqs are specified after the four global timer
	 * irqs are specified.
	 */
534
#ifdef CONFIG_OF
535
	nr_irqs = of_irq_count(np);
536 537 538
#else
	nr_irqs = 0;
#endif
539 540 541
	for (i = MCT_L0_IRQ; i < nr_irqs; i++)
		mct_irqs[i] = irq_of_parse_and_map(np, i);

542
	exynos4_timer_resources(np, of_iomap(np, 0));
543 544 545
	exynos4_clocksource_init();
	exynos4_clockevent_init();
}
546 547 548 549 550 551 552 553 554 555 556 557 558


static void __init mct_init_spi(struct device_node *np)
{
	return mct_init_dt(np, MCT_INT_SPI);
}

static void __init mct_init_ppi(struct device_node *np)
{
	return mct_init_dt(np, MCT_INT_PPI);
}
CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);