arm_arch_timer.c 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *  linux/drivers/clocksource/arm_arch_timer.c
 *
 *  Copyright (C) 2011 ARM Ltd.
 *  All Rights Reserved
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
16
#include <linux/cpu_pm.h>
17 18 19
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
20
#include <linux/of_address.h>
21
#include <linux/io.h>
22
#include <linux/slab.h>
23
#include <linux/sched_clock.h>
24 25

#include <asm/arch_timer.h>
26
#include <asm/virt.h>
27 28 29

#include <clocksource/arm_arch_timer.h>

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
#define CNTTIDR		0x08
#define CNTTIDR_VIRT(n)	(BIT(1) << ((n) * 4))

#define CNTVCT_LO	0x08
#define CNTVCT_HI	0x0c
#define CNTFRQ		0x10
#define CNTP_TVAL	0x28
#define CNTP_CTL	0x2c
#define CNTV_TVAL	0x38
#define CNTV_CTL	0x3c

#define ARCH_CP15_TIMER	BIT(0)
#define ARCH_MEM_TIMER	BIT(1)
static unsigned arch_timers_present __initdata;

static void __iomem *arch_counter_base;

struct arch_timer {
	void __iomem *base;
	struct clock_event_device evt;
};

#define to_arch_timer(e) container_of(e, struct arch_timer, evt)

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
static u32 arch_timer_rate;

enum ppi_nr {
	PHYS_SECURE_PPI,
	PHYS_NONSECURE_PPI,
	VIRT_PPI,
	HYP_PPI,
	MAX_TIMER_PPI
};

static int arch_timer_ppi[MAX_TIMER_PPI];

static struct clock_event_device __percpu *arch_timer_evt;

static bool arch_timer_use_virtual = true;
69
static bool arch_timer_mem_use_virtual;
70 71 72 73 74

/*
 * Architected system timer support.
 */

75 76
static __always_inline
void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
77
			  struct clock_event_device *clk)
78
{
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
		struct arch_timer *timer = to_arch_timer(clk);
		switch (reg) {
		case ARCH_TIMER_REG_CTRL:
			writel_relaxed(val, timer->base + CNTP_CTL);
			break;
		case ARCH_TIMER_REG_TVAL:
			writel_relaxed(val, timer->base + CNTP_TVAL);
			break;
		}
	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
		struct arch_timer *timer = to_arch_timer(clk);
		switch (reg) {
		case ARCH_TIMER_REG_CTRL:
			writel_relaxed(val, timer->base + CNTV_CTL);
			break;
		case ARCH_TIMER_REG_TVAL:
			writel_relaxed(val, timer->base + CNTV_TVAL);
			break;
		}
	} else {
		arch_timer_reg_write_cp15(access, reg, val);
	}
102 103 104 105
}

static __always_inline
u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
106
			struct clock_event_device *clk)
107
{
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
	u32 val;

	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
		struct arch_timer *timer = to_arch_timer(clk);
		switch (reg) {
		case ARCH_TIMER_REG_CTRL:
			val = readl_relaxed(timer->base + CNTP_CTL);
			break;
		case ARCH_TIMER_REG_TVAL:
			val = readl_relaxed(timer->base + CNTP_TVAL);
			break;
		}
	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
		struct arch_timer *timer = to_arch_timer(clk);
		switch (reg) {
		case ARCH_TIMER_REG_CTRL:
			val = readl_relaxed(timer->base + CNTV_CTL);
			break;
		case ARCH_TIMER_REG_TVAL:
			val = readl_relaxed(timer->base + CNTV_TVAL);
			break;
		}
	} else {
		val = arch_timer_reg_read_cp15(access, reg);
	}

	return val;
135 136
}

137
static __always_inline irqreturn_t timer_handler(const int access,
138 139 140
					struct clock_event_device *evt)
{
	unsigned long ctrl;
141

142
	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
143 144
	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
145
		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		evt->event_handler(evt);
		return IRQ_HANDLED;
	}

	return IRQ_NONE;
}

static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
{
	struct clock_event_device *evt = dev_id;

	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
}

static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
{
	struct clock_event_device *evt = dev_id;

	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}

167 168 169 170 171 172 173 174 175 176 177 178 179 180
static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
{
	struct clock_event_device *evt = dev_id;

	return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
}

static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
{
	struct clock_event_device *evt = dev_id;

	return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
}

181 182
static __always_inline void timer_set_mode(const int access, int mode,
				  struct clock_event_device *clk)
183 184 185 186 187
{
	unsigned long ctrl;
	switch (mode) {
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
188
		ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
189
		ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
190
		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
191 192 193 194 195 196 197 198 199
		break;
	default:
		break;
	}
}

static void arch_timer_set_mode_virt(enum clock_event_mode mode,
				     struct clock_event_device *clk)
{
200
	timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk);
201 202 203 204 205
}

static void arch_timer_set_mode_phys(enum clock_event_mode mode,
				     struct clock_event_device *clk)
{
206
	timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk);
207 208
}

209 210 211 212
static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode,
					 struct clock_event_device *clk)
{
	timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk);
213 214
}

215 216 217 218 219 220
static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode,
					 struct clock_event_device *clk)
{
	timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk);
}

221
static __always_inline void set_next_event(const int access, unsigned long evt,
222
					   struct clock_event_device *clk)
223 224
{
	unsigned long ctrl;
225
	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
226 227
	ctrl |= ARCH_TIMER_CTRL_ENABLE;
	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
228 229
	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
230 231 232
}

static int arch_timer_set_next_event_virt(unsigned long evt,
233
					  struct clock_event_device *clk)
234
{
235
	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
236 237 238 239
	return 0;
}

static int arch_timer_set_next_event_phys(unsigned long evt,
240
					  struct clock_event_device *clk)
241
{
242
	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
243 244 245
	return 0;
}

246 247
static int arch_timer_set_next_event_virt_mem(unsigned long evt,
					      struct clock_event_device *clk)
248
{
249 250 251 252 253 254 255 256 257 258 259
	set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
	return 0;
}

static int arch_timer_set_next_event_phys_mem(unsigned long evt,
					      struct clock_event_device *clk)
{
	set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
	return 0;
}

260 261
static void __arch_timer_setup(unsigned type,
			       struct clock_event_device *clk)
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
{
	clk->features = CLOCK_EVT_FEAT_ONESHOT;

	if (type == ARCH_CP15_TIMER) {
		clk->features |= CLOCK_EVT_FEAT_C3STOP;
		clk->name = "arch_sys_timer";
		clk->rating = 450;
		clk->cpumask = cpumask_of(smp_processor_id());
		if (arch_timer_use_virtual) {
			clk->irq = arch_timer_ppi[VIRT_PPI];
			clk->set_mode = arch_timer_set_mode_virt;
			clk->set_next_event = arch_timer_set_next_event_virt;
		} else {
			clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
			clk->set_mode = arch_timer_set_mode_phys;
			clk->set_next_event = arch_timer_set_next_event_phys;
		}
279
	} else {
280
		clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
281 282 283 284 285 286 287 288 289 290 291 292
		clk->name = "arch_mem_timer";
		clk->rating = 400;
		clk->cpumask = cpu_all_mask;
		if (arch_timer_mem_use_virtual) {
			clk->set_mode = arch_timer_set_mode_virt_mem;
			clk->set_next_event =
				arch_timer_set_next_event_virt_mem;
		} else {
			clk->set_mode = arch_timer_set_mode_phys_mem;
			clk->set_next_event =
				arch_timer_set_next_event_phys_mem;
		}
293 294
	}

295
	clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
296

297 298
	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
}
299

300 301 302 303 304 305 306 307 308 309 310 311 312
static void arch_timer_configure_evtstream(void)
{
	int evt_stream_div, pos;

	/* Find the closest power of two to the divisor */
	evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
	pos = fls(evt_stream_div);
	if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
		pos--;
	/* enable event stream */
	arch_timer_evtstrm_enable(min(pos, 15));
}

313
static int arch_timer_setup(struct clock_event_device *clk)
314 315
{
	__arch_timer_setup(ARCH_CP15_TIMER, clk);
316 317 318 319 320 321 322 323 324 325

	if (arch_timer_use_virtual)
		enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
	else {
		enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
			enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
	}

	arch_counter_set_user_access();
326 327
	if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
		arch_timer_configure_evtstream();
328 329 330 331

	return 0;
}

332 333
static void
arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
334
{
335 336 337
	/* Who has more than one independent system counter? */
	if (arch_timer_rate)
		return;
338

339 340 341 342 343 344
	/* Try to determine the frequency from the device tree or CNTFRQ */
	if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
		if (cntbase)
			arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
		else
			arch_timer_rate = arch_timer_get_cntfrq();
345 346
	}

347 348 349 350 351 352 353 354 355 356 357
	/* Check the timer frequency. */
	if (arch_timer_rate == 0)
		pr_warn("Architected timer frequency not available\n");
}

static void arch_timer_banner(unsigned type)
{
	pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
		     type & ARCH_CP15_TIMER ? "cp15" : "",
		     type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  " and " : "",
		     type & ARCH_MEM_TIMER ? "mmio" : "",
358 359
		     (unsigned long)arch_timer_rate / 1000000,
		     (unsigned long)(arch_timer_rate / 10000) % 100,
360 361 362 363 364 365 366
		     type & ARCH_CP15_TIMER ?
			arch_timer_use_virtual ? "virt" : "phys" :
			"",
		     type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  "/" : "",
		     type & ARCH_MEM_TIMER ?
			arch_timer_mem_use_virtual ? "virt" : "phys" :
			"");
367 368 369 370 371 372 373
}

u32 arch_timer_get_rate(void)
{
	return arch_timer_rate;
}

374
static u64 arch_counter_get_cntvct_mem(void)
375
{
376 377 378 379 380 381 382 383 384
	u32 vct_lo, vct_hi, tmp_hi;

	do {
		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
	} while (vct_hi != tmp_hi);

	return ((u64) vct_hi << 32) | vct_lo;
385 386
}

387 388 389 390 391 392 393 394
/*
 * Default to cp15 based access because arm64 uses this function for
 * sched_clock() before DT is probed and the cp15 method is guaranteed
 * to exist on arm64. arm doesn't use this before DT is probed so even
 * if we don't have the cp15 accessors we won't have a problem.
 */
u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;

395 396
static cycle_t arch_counter_read(struct clocksource *cs)
{
397
	return arch_timer_read_counter();
398 399 400 401
}

static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
{
402
	return arch_timer_read_counter();
403 404 405 406 407 408 409
}

static struct clocksource clocksource_counter = {
	.name	= "arch_sys_counter",
	.rating	= 400,
	.read	= arch_counter_read,
	.mask	= CLOCKSOURCE_MASK(56),
410
	.flags	= CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
411 412 413 414 415 416 417 418 419 420 421 422 423 424
};

static struct cyclecounter cyclecounter = {
	.read	= arch_counter_read_cc,
	.mask	= CLOCKSOURCE_MASK(56),
};

static struct timecounter timecounter;

struct timecounter *arch_timer_get_timecounter(void)
{
	return &timecounter;
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
static void __init arch_counter_register(unsigned type)
{
	u64 start_count;

	/* Register the CP15 based counter if we have one */
	if (type & ARCH_CP15_TIMER)
		arch_timer_read_counter = arch_counter_get_cntvct;
	else
		arch_timer_read_counter = arch_counter_get_cntvct_mem;

	start_count = arch_timer_read_counter();
	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
	cyclecounter.mult = clocksource_counter.mult;
	cyclecounter.shift = clocksource_counter.shift;
	timecounter_init(&timecounter, &cyclecounter, start_count);
440 441 442

	/* 56 bits minimum, so we assume worst case rollover */
	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
443 444
}

445
static void arch_timer_stop(struct clock_event_device *clk)
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
{
	pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
		 clk->irq, smp_processor_id());

	if (arch_timer_use_virtual)
		disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
	else {
		disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
			disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
	}

	clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
}

461
static int arch_timer_cpu_notify(struct notifier_block *self,
462 463
					   unsigned long action, void *hcpu)
{
464 465 466 467
	/*
	 * Grab cpu pointer in each case to avoid spurious
	 * preemptible warnings
	 */
468 469
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_STARTING:
470
		arch_timer_setup(this_cpu_ptr(arch_timer_evt));
471 472
		break;
	case CPU_DYING:
473
		arch_timer_stop(this_cpu_ptr(arch_timer_evt));
474 475 476 477 478 479
		break;
	}

	return NOTIFY_OK;
}

480
static struct notifier_block arch_timer_cpu_nb = {
481 482 483
	.notifier_call = arch_timer_cpu_notify,
};

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
#ifdef CONFIG_CPU_PM
static unsigned int saved_cntkctl;
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
				    unsigned long action, void *hcpu)
{
	if (action == CPU_PM_ENTER)
		saved_cntkctl = arch_timer_get_cntkctl();
	else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
		arch_timer_set_cntkctl(saved_cntkctl);
	return NOTIFY_OK;
}

static struct notifier_block arch_timer_cpu_pm_notifier = {
	.notifier_call = arch_timer_cpu_pm_notify,
};

static int __init arch_timer_cpu_pm_init(void)
{
	return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
}
#else
static int __init arch_timer_cpu_pm_init(void)
{
	return 0;
}
#endif

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
static int __init arch_timer_register(void)
{
	int err;
	int ppi;

	arch_timer_evt = alloc_percpu(struct clock_event_device);
	if (!arch_timer_evt) {
		err = -ENOMEM;
		goto out;
	}

	if (arch_timer_use_virtual) {
		ppi = arch_timer_ppi[VIRT_PPI];
		err = request_percpu_irq(ppi, arch_timer_handler_virt,
					 "arch_timer", arch_timer_evt);
	} else {
		ppi = arch_timer_ppi[PHYS_SECURE_PPI];
		err = request_percpu_irq(ppi, arch_timer_handler_phys,
					 "arch_timer", arch_timer_evt);
		if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
			ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
			err = request_percpu_irq(ppi, arch_timer_handler_phys,
						 "arch_timer", arch_timer_evt);
			if (err)
				free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
						arch_timer_evt);
		}
	}

	if (err) {
		pr_err("arch_timer: can't register interrupt %d (%d)\n",
		       ppi, err);
		goto out_free;
	}

	err = register_cpu_notifier(&arch_timer_cpu_nb);
	if (err)
		goto out_free_irq;

550 551 552 553
	err = arch_timer_cpu_pm_init();
	if (err)
		goto out_unreg_notify;

554 555 556 557 558
	/* Immediately configure the timer on the boot CPU */
	arch_timer_setup(this_cpu_ptr(arch_timer_evt));

	return 0;

559 560
out_unreg_notify:
	unregister_cpu_notifier(&arch_timer_cpu_nb);
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
out_free_irq:
	if (arch_timer_use_virtual)
		free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
	else {
		free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
				arch_timer_evt);
		if (arch_timer_ppi[PHYS_NONSECURE_PPI])
			free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
					arch_timer_evt);
	}

out_free:
	free_percpu(arch_timer_evt);
out:
	return err;
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
{
	int ret;
	irq_handler_t func;
	struct arch_timer *t;

	t = kzalloc(sizeof(*t), GFP_KERNEL);
	if (!t)
		return -ENOMEM;

	t->base = base;
	t->evt.irq = irq;
	__arch_timer_setup(ARCH_MEM_TIMER, &t->evt);

	if (arch_timer_mem_use_virtual)
		func = arch_timer_handler_virt_mem;
	else
		func = arch_timer_handler_phys_mem;

	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
	if (ret) {
		pr_err("arch_timer: Failed to request mem timer irq\n");
		kfree(t);
	}

	return ret;
}

static const struct of_device_id arch_timer_of_match[] __initconst = {
	{ .compatible   = "arm,armv7-timer",    },
	{ .compatible   = "arm,armv8-timer",    },
	{},
};

static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
	{ .compatible   = "arm,armv7-timer-mem", },
	{},
};

static void __init arch_timer_common_init(void)
{
	unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;

	/* Wait until both nodes are probed if we have two timers */
	if ((arch_timers_present & mask) != mask) {
		if (of_find_matching_node(NULL, arch_timer_mem_of_match) &&
				!(arch_timers_present & ARCH_MEM_TIMER))
			return;
		if (of_find_matching_node(NULL, arch_timer_of_match) &&
				!(arch_timers_present & ARCH_CP15_TIMER))
			return;
	}

	arch_timer_banner(arch_timers_present);
	arch_counter_register(arch_timers_present);
	arch_timer_arch_init();
}

636
static void __init arch_timer_init(struct device_node *np)
637 638 639
{
	int i;

640
	if (arch_timers_present & ARCH_CP15_TIMER) {
641 642
		pr_warn("arch_timer: multiple nodes in dt, skipping\n");
		return;
643 644
	}

645
	arch_timers_present |= ARCH_CP15_TIMER;
646 647
	for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
		arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
648
	arch_timer_detect_rate(NULL, np);
649 650

	/*
651 652 653 654
	 * If HYP mode is available, we know that the physical timer
	 * has been configured to be accessible from PL1. Use it, so
	 * that a guest can use the virtual timer instead.
	 *
655 656 657
	 * If no interrupt provided for virtual timer, we'll have to
	 * stick to the physical timer. It'd better be accessible...
	 */
658
	if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
659 660 661 662 663
		arch_timer_use_virtual = false;

		if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
		    !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
			pr_warn("arch_timer: No interrupt available, giving up\n");
664
			return;
665 666 667
		}
	}

668
	arch_timer_register();
669
	arch_timer_common_init();
670
}
671 672
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728

static void __init arch_timer_mem_init(struct device_node *np)
{
	struct device_node *frame, *best_frame = NULL;
	void __iomem *cntctlbase, *base;
	unsigned int irq;
	u32 cnttidr;

	arch_timers_present |= ARCH_MEM_TIMER;
	cntctlbase = of_iomap(np, 0);
	if (!cntctlbase) {
		pr_err("arch_timer: Can't find CNTCTLBase\n");
		return;
	}

	cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
	iounmap(cntctlbase);

	/*
	 * Try to find a virtual capable frame. Otherwise fall back to a
	 * physical capable frame.
	 */
	for_each_available_child_of_node(np, frame) {
		int n;

		if (of_property_read_u32(frame, "frame-number", &n)) {
			pr_err("arch_timer: Missing frame-number\n");
			of_node_put(best_frame);
			of_node_put(frame);
			return;
		}

		if (cnttidr & CNTTIDR_VIRT(n)) {
			of_node_put(best_frame);
			best_frame = frame;
			arch_timer_mem_use_virtual = true;
			break;
		}
		of_node_put(best_frame);
		best_frame = of_node_get(frame);
	}

	base = arch_counter_base = of_iomap(best_frame, 0);
	if (!base) {
		pr_err("arch_timer: Can't map frame's registers\n");
		of_node_put(best_frame);
		return;
	}

	if (arch_timer_mem_use_virtual)
		irq = irq_of_parse_and_map(best_frame, 1);
	else
		irq = irq_of_parse_and_map(best_frame, 0);
	of_node_put(best_frame);
	if (!irq) {
		pr_err("arch_timer: Frame missing %s irq",
729
		       arch_timer_mem_use_virtual ? "virt" : "phys");
730 731 732 733 734 735 736 737 738
		return;
	}

	arch_timer_detect_rate(base, np);
	arch_timer_mem_register(base, irq);
	arch_timer_common_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
		       arch_timer_mem_init);