arm_pmu.c 21.9 KB
Newer Older
1 2 3 4 5 6
#undef DEBUG

/*
 * ARM performance counter support.
 *
 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7
 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
8
 *
9
 * This code is based on the sparc64 perf event code, which is in turn based
10
 * on the x86 code.
11 12 13
 */
#define pr_fmt(fmt) "hw perfevents: " fmt

14
#include <linux/bitmap.h>
15
#include <linux/cpumask.h>
16
#include <linux/export.h>
17
#include <linux/kernel.h>
18
#include <linux/of_device.h>
19
#include <linux/perf/arm_pmu.h>
20
#include <linux/platform_device.h>
21 22
#include <linux/slab.h>
#include <linux/spinlock.h>
23 24
#include <linux/irq.h>
#include <linux/irqdesc.h>
25

26
#include <asm/cputype.h>
27 28 29
#include <asm/irq_regs.h>

static int
M
Mark Rutland 已提交
30 31 32 33 34
armpmu_map_cache_event(const unsigned (*cache_map)
				      [PERF_COUNT_HW_CACHE_MAX]
				      [PERF_COUNT_HW_CACHE_OP_MAX]
				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
		       u64 config)
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
{
	unsigned int cache_type, cache_op, cache_result, ret;

	cache_type = (config >>  0) & 0xff;
	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
		return -EINVAL;

	cache_op = (config >>  8) & 0xff;
	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
		return -EINVAL;

	cache_result = (config >> 16) & 0xff;
	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
		return -EINVAL;

M
Mark Rutland 已提交
50
	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
51 52 53 54 55 56 57

	if (ret == CACHE_OP_UNSUPPORTED)
		return -ENOENT;

	return ret;
}

58
static int
59
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
60
{
61 62 63 64 65 66
	int mapping;

	if (config >= PERF_COUNT_HW_MAX)
		return -EINVAL;

	mapping = (*event_map)[config];
M
Mark Rutland 已提交
67
	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
68 69 70
}

static int
M
Mark Rutland 已提交
71
armpmu_map_raw_event(u32 raw_event_mask, u64 config)
72
{
M
Mark Rutland 已提交
73 74 75
	return (int)(config & raw_event_mask);
}

76 77 78 79 80 81 82 83
int
armpmu_map_event(struct perf_event *event,
		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
		 const unsigned (*cache_map)
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
				[PERF_COUNT_HW_CACHE_RESULT_MAX],
		 u32 raw_event_mask)
M
Mark Rutland 已提交
84 85
{
	u64 config = event->attr.config;
86
	int type = event->attr.type;
M
Mark Rutland 已提交
87

88 89 90 91
	if (type == event->pmu->type)
		return armpmu_map_raw_event(raw_event_mask, config);

	switch (type) {
M
Mark Rutland 已提交
92
	case PERF_TYPE_HARDWARE:
93
		return armpmu_map_hw_event(event_map, config);
M
Mark Rutland 已提交
94 95 96 97 98 99 100
	case PERF_TYPE_HW_CACHE:
		return armpmu_map_cache_event(cache_map, config);
	case PERF_TYPE_RAW:
		return armpmu_map_raw_event(raw_event_mask, config);
	}

	return -ENOENT;
101 102
}

103
int armpmu_event_set_period(struct perf_event *event)
104
{
105
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
106
	struct hw_perf_event *hwc = &event->hw;
107
	s64 left = local64_read(&hwc->period_left);
108 109 110 111 112
	s64 period = hwc->sample_period;
	int ret = 0;

	if (unlikely(left <= -period)) {
		left = period;
113
		local64_set(&hwc->period_left, left);
114 115 116 117 118 119
		hwc->last_period = period;
		ret = 1;
	}

	if (unlikely(left <= 0)) {
		left += period;
120
		local64_set(&hwc->period_left, left);
121 122 123 124
		hwc->last_period = period;
		ret = 1;
	}

125 126 127 128 129 130 131 132
	/*
	 * Limit the maximum period to prevent the counter value
	 * from overtaking the one we are about to program. In
	 * effect we are reducing max_period to account for
	 * interrupt latency (and we are being very conservative).
	 */
	if (left > (armpmu->max_period >> 1))
		left = armpmu->max_period >> 1;
133

134
	local64_set(&hwc->prev_count, (u64)-left);
135

136
	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
137 138 139 140 141 142

	perf_event_update_userpage(event);

	return ret;
}

143
u64 armpmu_event_update(struct perf_event *event)
144
{
145
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
146
	struct hw_perf_event *hwc = &event->hw;
147
	u64 delta, prev_raw_count, new_raw_count;
148 149

again:
150
	prev_raw_count = local64_read(&hwc->prev_count);
151
	new_raw_count = armpmu->read_counter(event);
152

153
	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
154 155 156
			     new_raw_count) != prev_raw_count)
		goto again;

157
	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
158

159 160
	local64_add(delta, &event->count);
	local64_sub(delta, &hwc->period_left);
161 162 163 164 165

	return new_raw_count;
}

static void
P
Peter Zijlstra 已提交
166
armpmu_read(struct perf_event *event)
167
{
168
	armpmu_event_update(event);
169 170 171
}

static void
P
Peter Zijlstra 已提交
172
armpmu_stop(struct perf_event *event, int flags)
173
{
174
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
175 176
	struct hw_perf_event *hwc = &event->hw;

P
Peter Zijlstra 已提交
177 178 179 180 181
	/*
	 * ARM pmu always has to update the counter, so ignore
	 * PERF_EF_UPDATE, see comments in armpmu_start().
	 */
	if (!(hwc->state & PERF_HES_STOPPED)) {
182 183
		armpmu->disable(event);
		armpmu_event_update(event);
P
Peter Zijlstra 已提交
184 185
		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
	}
186 187
}

188
static void armpmu_start(struct perf_event *event, int flags)
189
{
190
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
191 192
	struct hw_perf_event *hwc = &event->hw;

P
Peter Zijlstra 已提交
193 194 195 196 197 198 199 200
	/*
	 * ARM pmu always has to reprogram the period, so ignore
	 * PERF_EF_RELOAD, see the comment below.
	 */
	if (flags & PERF_EF_RELOAD)
		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));

	hwc->state = 0;
201 202
	/*
	 * Set the period again. Some counters can't be stopped, so when we
P
Peter Zijlstra 已提交
203
	 * were stopped we simply disabled the IRQ source and the counter
204 205 206 207
	 * may have been left counting. If we don't do this step then we may
	 * get an interrupt too soon or *way* too late if the overflow has
	 * happened since disabling.
	 */
208 209
	armpmu_event_set_period(event);
	armpmu->enable(event);
210 211
}

P
Peter Zijlstra 已提交
212 213 214
static void
armpmu_del(struct perf_event *event, int flags)
{
215
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
M
Mark Rutland 已提交
216
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
P
Peter Zijlstra 已提交
217 218 219 220
	struct hw_perf_event *hwc = &event->hw;
	int idx = hwc->idx;

	armpmu_stop(event, PERF_EF_UPDATE);
221 222
	hw_events->events[idx] = NULL;
	clear_bit(idx, hw_events->used_mask);
223 224
	if (armpmu->clear_event_idx)
		armpmu->clear_event_idx(hw_events, event);
P
Peter Zijlstra 已提交
225 226 227 228

	perf_event_update_userpage(event);
}

229
static int
P
Peter Zijlstra 已提交
230
armpmu_add(struct perf_event *event, int flags)
231
{
232
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
M
Mark Rutland 已提交
233
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
234 235 236 237
	struct hw_perf_event *hwc = &event->hw;
	int idx;
	int err = 0;

238 239 240 241
	/* An event following a process won't be stopped earlier */
	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
		return -ENOENT;

P
Peter Zijlstra 已提交
242
	perf_pmu_disable(event->pmu);
243

244
	/* If we don't have a space for the counter then finish early. */
245
	idx = armpmu->get_event_idx(hw_events, event);
246 247 248 249 250 251 252 253 254 255
	if (idx < 0) {
		err = idx;
		goto out;
	}

	/*
	 * If there is an event in the counter we are going to use then make
	 * sure it is disabled.
	 */
	event->hw.idx = idx;
256
	armpmu->disable(event);
257
	hw_events->events[idx] = event;
258

P
Peter Zijlstra 已提交
259 260 261
	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
	if (flags & PERF_EF_START)
		armpmu_start(event, PERF_EF_RELOAD);
262 263 264 265 266

	/* Propagate our changes to the userspace mapping. */
	perf_event_update_userpage(event);

out:
P
Peter Zijlstra 已提交
267
	perf_pmu_enable(event->pmu);
268 269 270 271
	return err;
}

static int
272 273
validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
			       struct perf_event *event)
274
{
275
	struct arm_pmu *armpmu;
276

277 278 279
	if (is_software_event(event))
		return 1;

280 281 282 283 284 285 286 287
	/*
	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
	 * core perf code won't check that the pmu->ctx == leader->ctx
	 * until after pmu->event_init(event).
	 */
	if (event->pmu != pmu)
		return 0;

288
	if (event->state < PERF_EVENT_STATE_OFF)
289 290 291
		return 1;

	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
292
		return 1;
293

294
	armpmu = to_arm_pmu(event->pmu);
295
	return armpmu->get_event_idx(hw_events, event) >= 0;
296 297 298 299 300 301
}

static int
validate_group(struct perf_event *event)
{
	struct perf_event *sibling, *leader = event->group_leader;
302
	struct pmu_hw_events fake_pmu;
303

304 305 306 307
	/*
	 * Initialise the fake PMU. We only need to populate the
	 * used_mask for the purposes of validation.
	 */
308
	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
309

310
	if (!validate_event(event->pmu, &fake_pmu, leader))
311
		return -EINVAL;
312 313

	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
314
		if (!validate_event(event->pmu, &fake_pmu, sibling))
315
			return -EINVAL;
316 317
	}

318
	if (!validate_event(event->pmu, &fake_pmu, event))
319
		return -EINVAL;
320 321 322 323

	return 0;
}

324
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
325
{
326 327 328
	struct arm_pmu *armpmu;
	struct platform_device *plat_device;
	struct arm_pmu_platdata *plat;
329 330
	int ret;
	u64 start_clock, finish_clock;
331

332 333 334 335 336 337 338
	/*
	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
	 * do any necessary shifting, we just need to perform the first
	 * dereference.
	 */
	armpmu = *(void **)dev;
339 340
	plat_device = armpmu->plat_device;
	plat = dev_get_platdata(&plat_device->dev);
341

342
	start_clock = sched_clock();
343
	if (plat && plat->handle_irq)
344
		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
345
	else
346
		ret = armpmu->handle_irq(irq, armpmu);
347 348 349 350
	finish_clock = sched_clock();

	perf_sample_event_took(finish_clock - start_clock);
	return ret;
351 352
}

353
static void
354
armpmu_release_hardware(struct arm_pmu *armpmu)
355
{
356
	armpmu->free_irq(armpmu);
357 358
}

359
static int
360
armpmu_reserve_hardware(struct arm_pmu *armpmu)
361
{
362
	int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
363 364 365
	if (err) {
		armpmu_release_hardware(armpmu);
		return err;
366
	}
367

368
	return 0;
369 370 371 372 373
}

static void
hw_perf_event_destroy(struct perf_event *event)
{
374
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
375 376 377 378
	atomic_t *active_events	 = &armpmu->active_events;
	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;

	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
379
		armpmu_release_hardware(armpmu);
380
		mutex_unlock(pmu_reserve_mutex);
381 382 383
	}
}

384 385 386 387 388 389 390
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
	return attr->exclude_idle || attr->exclude_user ||
	       attr->exclude_kernel || attr->exclude_hv;
}

391 392 393
static int
__hw_perf_event_init(struct perf_event *event)
{
394
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
395
	struct hw_perf_event *hwc = &event->hw;
396
	int mapping;
397

M
Mark Rutland 已提交
398
	mapping = armpmu->map_event(event);
399 400 401 402 403 404 405

	if (mapping < 0) {
		pr_debug("event %x:%llx not supported\n", event->attr.type,
			 event->attr.config);
		return mapping;
	}

406 407 408 409 410 411 412 413 414 415 416
	/*
	 * We don't assign an index until we actually place the event onto
	 * hardware. Use -1 to signify that we haven't decided where to put it
	 * yet. For SMP systems, each core has it's own PMU so we can't do any
	 * clever allocation or constraints checking at this point.
	 */
	hwc->idx		= -1;
	hwc->config_base	= 0;
	hwc->config		= 0;
	hwc->event_base		= 0;

417 418 419
	/*
	 * Check whether we need to exclude the counter from certain modes.
	 */
420 421 422
	if ((!armpmu->set_event_filter ||
	     armpmu->set_event_filter(hwc, &event->attr)) &&
	     event_requires_mode_exclusion(&event->attr)) {
423 424
		pr_debug("ARM performance counters do not support "
			 "mode exclusion\n");
425
		return -EOPNOTSUPP;
426 427 428
	}

	/*
429
	 * Store the event encoding into the config_base field.
430
	 */
431
	hwc->config_base	    |= (unsigned long)mapping;
432

433
	if (!is_sampling_event(event)) {
434 435 436 437 438 439 440
		/*
		 * For non-sampling runs, limit the sample_period to half
		 * of the counter width. That way, the new counter value
		 * is far less likely to overtake the previous one unless
		 * you have some serious IRQ latency issues.
		 */
		hwc->sample_period  = armpmu->max_period >> 1;
441
		hwc->last_period    = hwc->sample_period;
442
		local64_set(&hwc->period_left, hwc->sample_period);
443 444 445
	}

	if (event->group_leader != event) {
446
		if (validate_group(event) != 0)
447 448 449
			return -EINVAL;
	}

450
	return 0;
451 452
}

453
static int armpmu_event_init(struct perf_event *event)
454
{
455
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
456
	int err = 0;
457
	atomic_t *active_events = &armpmu->active_events;
458

459 460 461 462 463 464 465 466 467 468 469
	/*
	 * Reject CPU-affine events for CPUs that are of a different class to
	 * that which this PMU handles. Process-following events (where
	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
	 * reject them later (in armpmu_add) if they're scheduled on a
	 * different class of CPU.
	 */
	if (event->cpu != -1 &&
		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
		return -ENOENT;

470 471 472 473
	/* does not support taken branch sampling */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

M
Mark Rutland 已提交
474
	if (armpmu->map_event(event) == -ENOENT)
475 476
		return -ENOENT;

477 478
	event->destroy = hw_perf_event_destroy;

479 480 481
	if (!atomic_inc_not_zero(active_events)) {
		mutex_lock(&armpmu->reserve_mutex);
		if (atomic_read(active_events) == 0)
482
			err = armpmu_reserve_hardware(armpmu);
483 484

		if (!err)
485 486
			atomic_inc(active_events);
		mutex_unlock(&armpmu->reserve_mutex);
487 488 489
	}

	if (err)
490
		return err;
491 492 493 494 495

	err = __hw_perf_event_init(event);
	if (err)
		hw_perf_event_destroy(event);

496
	return err;
497 498
}

P
Peter Zijlstra 已提交
499
static void armpmu_enable(struct pmu *pmu)
500
{
501
	struct arm_pmu *armpmu = to_arm_pmu(pmu);
M
Mark Rutland 已提交
502
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
503
	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
504

505 506 507 508
	/* For task-bound events we may be called on other CPUs */
	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
		return;

509
	if (enabled)
510
		armpmu->start(armpmu);
511 512
}

P
Peter Zijlstra 已提交
513
static void armpmu_disable(struct pmu *pmu)
514
{
515
	struct arm_pmu *armpmu = to_arm_pmu(pmu);
516 517 518 519 520

	/* For task-bound events we may be called on other CPUs */
	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
		return;

521
	armpmu->stop(armpmu);
522 523
}

524 525 526 527 528 529 530 531 532 533 534 535
/*
 * In heterogeneous systems, events are specific to a particular
 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
 * the same microarchitecture.
 */
static int armpmu_filter_match(struct perf_event *event)
{
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	unsigned int cpu = smp_processor_id();
	return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
}

536
static void armpmu_init(struct arm_pmu *armpmu)
537 538 539
{
	atomic_set(&armpmu->active_events, 0);
	mutex_init(&armpmu->reserve_mutex);
540 541 542 543 544 545 546 547 548 549

	armpmu->pmu = (struct pmu) {
		.pmu_enable	= armpmu_enable,
		.pmu_disable	= armpmu_disable,
		.event_init	= armpmu_event_init,
		.add		= armpmu_add,
		.del		= armpmu_del,
		.start		= armpmu_start,
		.stop		= armpmu_stop,
		.read		= armpmu_read,
550
		.filter_match	= armpmu_filter_match,
551 552 553
	};
}

554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *__oprofile_cpu_pmu;

/*
 * Despite the names, these two functions are CPU-specific and are used
 * by the OProfile/perf code.
 */
const char *perf_pmu_name(void)
{
	if (!__oprofile_cpu_pmu)
		return NULL;

	return __oprofile_cpu_pmu->name;
}
EXPORT_SYMBOL_GPL(perf_pmu_name);

int perf_num_counters(void)
{
	int max_events = 0;

	if (__oprofile_cpu_pmu != NULL)
		max_events = __oprofile_cpu_pmu->num_events;

	return max_events;
}
EXPORT_SYMBOL_GPL(perf_num_counters);

static void cpu_pmu_enable_percpu_irq(void *data)
{
	int irq = *(int *)data;

	enable_percpu_irq(irq, IRQ_TYPE_NONE);
}

static void cpu_pmu_disable_percpu_irq(void *data)
{
	int irq = *(int *)data;

	disable_percpu_irq(irq);
}

static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
	int i, irq, irqs;
	struct platform_device *pmu_device = cpu_pmu->plat_device;
	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;

	irqs = min(pmu_device->num_resources, num_possible_cpus());

	irq = platform_get_irq(pmu_device, 0);
	if (irq >= 0 && irq_is_percpu(irq)) {
		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
		free_percpu_irq(irq, &hw_events->percpu_pmu);
	} else {
		for (i = 0; i < irqs; ++i) {
			int cpu = i;

			if (cpu_pmu->irq_affinity)
				cpu = cpu_pmu->irq_affinity[i];

			if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
				continue;
			irq = platform_get_irq(pmu_device, i);
			if (irq >= 0)
				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
		}
	}
}

static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
	int i, err, irq, irqs;
	struct platform_device *pmu_device = cpu_pmu->plat_device;
	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;

	if (!pmu_device)
		return -ENODEV;

	irqs = min(pmu_device->num_resources, num_possible_cpus());
	if (irqs < 1) {
		pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
		return 0;
	}

	irq = platform_get_irq(pmu_device, 0);
	if (irq >= 0 && irq_is_percpu(irq)) {
		err = request_percpu_irq(irq, handler, "arm-pmu",
					 &hw_events->percpu_pmu);
		if (err) {
			pr_err("unable to request IRQ%d for ARM PMU counters\n",
				irq);
			return err;
		}
		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
	} else {
		for (i = 0; i < irqs; ++i) {
			int cpu = i;

			err = 0;
			irq = platform_get_irq(pmu_device, i);
			if (irq < 0)
				continue;

			if (cpu_pmu->irq_affinity)
				cpu = cpu_pmu->irq_affinity[i];

			/*
			 * If we have a single PMU interrupt that we can't shift,
			 * assume that we're running on a uniprocessor machine and
			 * continue. Otherwise, continue without this interrupt.
			 */
			if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
				pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
					irq, cpu);
				continue;
			}

			err = request_irq(irq, handler,
					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
					  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
			if (err) {
				pr_err("unable to request IRQ%d for ARM PMU counters\n",
					irq);
				return err;
			}

			cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
		}
	}

	return 0;
}

/*
 * PMU hardware loses all context when a CPU goes offline.
 * When a CPU is hotplugged back in, since some hardware registers are
 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 * junk values out of them.
 */
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
			  void *hcpu)
{
	int cpu = (unsigned long)hcpu;
	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);

	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
		return NOTIFY_DONE;

	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return NOTIFY_DONE;

	if (pmu->reset)
		pmu->reset(pmu);
	else
		return NOTIFY_DONE;

	return NOTIFY_OK;
}

static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
	int err;
	int cpu;
	struct pmu_hw_events __percpu *cpu_hw_events;

	cpu_hw_events = alloc_percpu(struct pmu_hw_events);
	if (!cpu_hw_events)
		return -ENOMEM;

	cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
	err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
	if (err)
		goto out_hw_events;

	for_each_possible_cpu(cpu) {
		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
		raw_spin_lock_init(&events->pmu_lock);
		events->percpu_pmu = cpu_pmu;
	}

	cpu_pmu->hw_events	= cpu_hw_events;
	cpu_pmu->request_irq	= cpu_pmu_request_irq;
	cpu_pmu->free_irq	= cpu_pmu_free_irq;

	/* Ensure the PMU has sane values out of reset. */
	if (cpu_pmu->reset)
		on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
			 cpu_pmu, 1);

	/* If no interrupts available, set the corresponding capability flag */
	if (!platform_get_irq(cpu_pmu->plat_device, 0))
		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;

	return 0;

out_hw_events:
	free_percpu(cpu_hw_events);
	return err;
}

static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
	free_percpu(cpu_pmu->hw_events);
}

/*
 * CPU PMU identification and probing.
 */
static int probe_current_pmu(struct arm_pmu *pmu,
			     const struct pmu_probe_info *info)
{
	int cpu = get_cpu();
	unsigned int cpuid = read_cpuid_id();
	int ret = -ENODEV;

	pr_info("probing PMU on CPU %d\n", cpu);

	for (; info->init != NULL; info++) {
		if ((cpuid & info->mask) != info->cpuid)
			continue;
		ret = info->init(pmu);
		break;
	}

	put_cpu();
	return ret;
}

static int of_pmu_irq_cfg(struct arm_pmu *pmu)
{
785 786
	int *irqs, i = 0;
	bool using_spi = false;
787 788 789 790 791 792
	struct platform_device *pdev = pmu->plat_device;

	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
	if (!irqs)
		return -ENOMEM;

793
	do {
794
		struct device_node *dn;
795
		int cpu, irq;
796

797 798 799
		/* See if we have an affinity entry */
		dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
		if (!dn)
800
			break;
801 802 803 804 805 806 807 808 809 810 811 812 813 814

		/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
		irq = platform_get_irq(pdev, i);
		if (irq >= 0) {
			bool spi = !irq_is_percpu(irq);

			if (i > 0 && spi != using_spi) {
				pr_err("PPI/SPI IRQ type mismatch for %s!\n",
					dn->name);
				kfree(irqs);
				return -EINVAL;
			}

			using_spi = spi;
815 816
		}

817
		/* Now look up the logical CPU number */
818 819 820 821 822 823 824
		for_each_possible_cpu(cpu) {
			struct device_node *cpu_dn;

			cpu_dn = of_cpu_device_node_get(cpu);
			of_node_put(cpu_dn);

			if (dn == cpu_dn)
825
				break;
826
		}
827 828 829 830

		if (cpu >= nr_cpu_ids) {
			pr_warn("Failed to find logical CPU for %s\n",
				dn->name);
831
			of_node_put(dn);
832
			cpumask_setall(&pmu->supported_cpus);
833 834
			break;
		}
835
		of_node_put(dn);
836

837 838 839 840 841 842 843 844 845 846 847
		/* For SPIs, we need to track the affinity per IRQ */
		if (using_spi) {
			if (i >= pdev->num_resources) {
				of_node_put(dn);
				break;
			}

			irqs[i] = cpu;
		}

		/* Keep track of the CPUs containing this PMU type */
848
		cpumask_set_cpu(cpu, &pmu->supported_cpus);
849 850 851
		of_node_put(dn);
		i++;
	} while (1);
852

853 854 855 856 857 858
	/* If we didn't manage to parse anything, claim to support all CPUs */
	if (cpumask_weight(&pmu->supported_cpus) == 0)
		cpumask_setall(&pmu->supported_cpus);

	/* If we matched up the IRQ affinities, use them to route the SPIs */
	if (using_spi && i == pdev->num_resources)
859
		pmu->irq_affinity = irqs;
860
	else
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
		kfree(irqs);

	return 0;
}

int arm_pmu_device_probe(struct platform_device *pdev,
			 const struct of_device_id *of_table,
			 const struct pmu_probe_info *probe_table)
{
	const struct of_device_id *of_id;
	const int (*init_fn)(struct arm_pmu *);
	struct device_node *node = pdev->dev.of_node;
	struct arm_pmu *pmu;
	int ret = -ENODEV;

	pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
	if (!pmu) {
		pr_info("failed to allocate PMU device!\n");
		return -ENOMEM;
	}

M
Mark Rutland 已提交
882 883
	armpmu_init(pmu);

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
	if (!__oprofile_cpu_pmu)
		__oprofile_cpu_pmu = pmu;

	pmu->plat_device = pdev;

	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
		init_fn = of_id->data;

		ret = of_pmu_irq_cfg(pmu);
		if (!ret)
			ret = init_fn(pmu);
	} else {
		ret = probe_current_pmu(pmu, probe_table);
		cpumask_setall(&pmu->supported_cpus);
	}

	if (ret) {
		pr_info("failed to probe PMU!\n");
		goto out_free;
	}

	ret = cpu_pmu_init(pmu);
	if (ret)
		goto out_free;

M
Mark Rutland 已提交
909
	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
910 911 912
	if (ret)
		goto out_destroy;

M
Mark Rutland 已提交
913 914 915
	pr_info("enabled with %s PMU driver, %d counters available\n",
			pmu->name, pmu->num_events);

916 917 918 919 920 921 922 923 924
	return 0;

out_destroy:
	cpu_pmu_destroy(pmu);
out_free:
	pr_info("failed to register PMU devices!\n");
	kfree(pmu);
	return ret;
}