arm_pmu.c 21.4 KB
Newer Older
1 2 3 4 5 6
#undef DEBUG

/*
 * ARM performance counter support.
 *
 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7
 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
8
 *
9
 * This code is based on the sparc64 perf event code, which is in turn based
10
 * on the x86 code.
11 12 13
 */
#define pr_fmt(fmt) "hw perfevents: " fmt

14
#include <linux/bitmap.h>
15
#include <linux/cpumask.h>
16
#include <linux/cpu_pm.h>
17
#include <linux/export.h>
18
#include <linux/kernel.h>
19
#include <linux/perf/arm_pmu.h>
20
#include <linux/platform_device.h>
21
#include <linux/slab.h>
22
#include <linux/sched/clock.h>
23
#include <linux/spinlock.h>
24 25
#include <linux/irq.h>
#include <linux/irqdesc.h>
26 27 28 29

#include <asm/irq_regs.h>

static int
M
Mark Rutland 已提交
30 31 32 33 34
armpmu_map_cache_event(const unsigned (*cache_map)
				      [PERF_COUNT_HW_CACHE_MAX]
				      [PERF_COUNT_HW_CACHE_OP_MAX]
				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
		       u64 config)
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
{
	unsigned int cache_type, cache_op, cache_result, ret;

	cache_type = (config >>  0) & 0xff;
	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
		return -EINVAL;

	cache_op = (config >>  8) & 0xff;
	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
		return -EINVAL;

	cache_result = (config >> 16) & 0xff;
	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
		return -EINVAL;

M
Mark Rutland 已提交
50
	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
51 52 53 54 55 56 57

	if (ret == CACHE_OP_UNSUPPORTED)
		return -ENOENT;

	return ret;
}

58
static int
59
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
60
{
61 62 63 64 65 66
	int mapping;

	if (config >= PERF_COUNT_HW_MAX)
		return -EINVAL;

	mapping = (*event_map)[config];
M
Mark Rutland 已提交
67
	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
68 69 70
}

static int
M
Mark Rutland 已提交
71
armpmu_map_raw_event(u32 raw_event_mask, u64 config)
72
{
M
Mark Rutland 已提交
73 74 75
	return (int)(config & raw_event_mask);
}

76 77 78 79 80 81 82 83
int
armpmu_map_event(struct perf_event *event,
		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
		 const unsigned (*cache_map)
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
				[PERF_COUNT_HW_CACHE_RESULT_MAX],
		 u32 raw_event_mask)
M
Mark Rutland 已提交
84 85
{
	u64 config = event->attr.config;
86
	int type = event->attr.type;
M
Mark Rutland 已提交
87

88 89 90 91
	if (type == event->pmu->type)
		return armpmu_map_raw_event(raw_event_mask, config);

	switch (type) {
M
Mark Rutland 已提交
92
	case PERF_TYPE_HARDWARE:
93
		return armpmu_map_hw_event(event_map, config);
M
Mark Rutland 已提交
94 95 96 97 98 99 100
	case PERF_TYPE_HW_CACHE:
		return armpmu_map_cache_event(cache_map, config);
	case PERF_TYPE_RAW:
		return armpmu_map_raw_event(raw_event_mask, config);
	}

	return -ENOENT;
101 102
}

103
int armpmu_event_set_period(struct perf_event *event)
104
{
105
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
106
	struct hw_perf_event *hwc = &event->hw;
107
	s64 left = local64_read(&hwc->period_left);
108 109 110 111 112
	s64 period = hwc->sample_period;
	int ret = 0;

	if (unlikely(left <= -period)) {
		left = period;
113
		local64_set(&hwc->period_left, left);
114 115 116 117 118 119
		hwc->last_period = period;
		ret = 1;
	}

	if (unlikely(left <= 0)) {
		left += period;
120
		local64_set(&hwc->period_left, left);
121 122 123 124
		hwc->last_period = period;
		ret = 1;
	}

125 126 127 128 129 130 131 132
	/*
	 * Limit the maximum period to prevent the counter value
	 * from overtaking the one we are about to program. In
	 * effect we are reducing max_period to account for
	 * interrupt latency (and we are being very conservative).
	 */
	if (left > (armpmu->max_period >> 1))
		left = armpmu->max_period >> 1;
133

134
	local64_set(&hwc->prev_count, (u64)-left);
135

136
	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
137 138 139 140 141 142

	perf_event_update_userpage(event);

	return ret;
}

143
u64 armpmu_event_update(struct perf_event *event)
144
{
145
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
146
	struct hw_perf_event *hwc = &event->hw;
147
	u64 delta, prev_raw_count, new_raw_count;
148 149

again:
150
	prev_raw_count = local64_read(&hwc->prev_count);
151
	new_raw_count = armpmu->read_counter(event);
152

153
	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
154 155 156
			     new_raw_count) != prev_raw_count)
		goto again;

157
	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
158

159 160
	local64_add(delta, &event->count);
	local64_sub(delta, &hwc->period_left);
161 162 163 164 165

	return new_raw_count;
}

static void
P
Peter Zijlstra 已提交
166
armpmu_read(struct perf_event *event)
167
{
168
	armpmu_event_update(event);
169 170 171
}

static void
P
Peter Zijlstra 已提交
172
armpmu_stop(struct perf_event *event, int flags)
173
{
174
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
175 176
	struct hw_perf_event *hwc = &event->hw;

P
Peter Zijlstra 已提交
177 178 179 180 181
	/*
	 * ARM pmu always has to update the counter, so ignore
	 * PERF_EF_UPDATE, see comments in armpmu_start().
	 */
	if (!(hwc->state & PERF_HES_STOPPED)) {
182 183
		armpmu->disable(event);
		armpmu_event_update(event);
P
Peter Zijlstra 已提交
184 185
		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
	}
186 187
}

188
static void armpmu_start(struct perf_event *event, int flags)
189
{
190
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
191 192
	struct hw_perf_event *hwc = &event->hw;

P
Peter Zijlstra 已提交
193 194 195 196 197 198 199 200
	/*
	 * ARM pmu always has to reprogram the period, so ignore
	 * PERF_EF_RELOAD, see the comment below.
	 */
	if (flags & PERF_EF_RELOAD)
		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));

	hwc->state = 0;
201 202
	/*
	 * Set the period again. Some counters can't be stopped, so when we
P
Peter Zijlstra 已提交
203
	 * were stopped we simply disabled the IRQ source and the counter
204 205 206 207
	 * may have been left counting. If we don't do this step then we may
	 * get an interrupt too soon or *way* too late if the overflow has
	 * happened since disabling.
	 */
208 209
	armpmu_event_set_period(event);
	armpmu->enable(event);
210 211
}

P
Peter Zijlstra 已提交
212 213 214
static void
armpmu_del(struct perf_event *event, int flags)
{
215
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
M
Mark Rutland 已提交
216
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
P
Peter Zijlstra 已提交
217 218 219 220
	struct hw_perf_event *hwc = &event->hw;
	int idx = hwc->idx;

	armpmu_stop(event, PERF_EF_UPDATE);
221 222
	hw_events->events[idx] = NULL;
	clear_bit(idx, hw_events->used_mask);
223 224
	if (armpmu->clear_event_idx)
		armpmu->clear_event_idx(hw_events, event);
P
Peter Zijlstra 已提交
225 226 227 228

	perf_event_update_userpage(event);
}

229
static int
P
Peter Zijlstra 已提交
230
armpmu_add(struct perf_event *event, int flags)
231
{
232
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
M
Mark Rutland 已提交
233
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
234 235 236
	struct hw_perf_event *hwc = &event->hw;
	int idx;

237 238 239 240
	/* An event following a process won't be stopped earlier */
	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
		return -ENOENT;

241
	/* If we don't have a space for the counter then finish early. */
242
	idx = armpmu->get_event_idx(hw_events, event);
243 244
	if (idx < 0)
		return idx;
245 246 247 248 249 250

	/*
	 * If there is an event in the counter we are going to use then make
	 * sure it is disabled.
	 */
	event->hw.idx = idx;
251
	armpmu->disable(event);
252
	hw_events->events[idx] = event;
253

P
Peter Zijlstra 已提交
254 255 256
	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
	if (flags & PERF_EF_START)
		armpmu_start(event, PERF_EF_RELOAD);
257 258 259 260

	/* Propagate our changes to the userspace mapping. */
	perf_event_update_userpage(event);

261
	return 0;
262 263 264
}

static int
265 266
validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
			       struct perf_event *event)
267
{
268
	struct arm_pmu *armpmu;
269

270 271 272
	if (is_software_event(event))
		return 1;

273 274 275 276 277 278 279 280
	/*
	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
	 * core perf code won't check that the pmu->ctx == leader->ctx
	 * until after pmu->event_init(event).
	 */
	if (event->pmu != pmu)
		return 0;

281
	if (event->state < PERF_EVENT_STATE_OFF)
282 283 284
		return 1;

	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
285
		return 1;
286

287
	armpmu = to_arm_pmu(event->pmu);
288
	return armpmu->get_event_idx(hw_events, event) >= 0;
289 290 291 292 293 294
}

static int
validate_group(struct perf_event *event)
{
	struct perf_event *sibling, *leader = event->group_leader;
295
	struct pmu_hw_events fake_pmu;
296

297 298 299 300
	/*
	 * Initialise the fake PMU. We only need to populate the
	 * used_mask for the purposes of validation.
	 */
301
	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
302

303
	if (!validate_event(event->pmu, &fake_pmu, leader))
304
		return -EINVAL;
305 306

	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
307
		if (!validate_event(event->pmu, &fake_pmu, sibling))
308
			return -EINVAL;
309 310
	}

311
	if (!validate_event(event->pmu, &fake_pmu, event))
312
		return -EINVAL;
313 314 315 316

	return 0;
}

317 318 319 320 321 322 323
static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
{
	struct platform_device *pdev = armpmu->plat_device;

	return pdev ? dev_get_platdata(&pdev->dev) : NULL;
}

324
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
325
{
326 327
	struct arm_pmu *armpmu;
	struct arm_pmu_platdata *plat;
328 329
	int ret;
	u64 start_clock, finish_clock;
330

331 332 333 334 335 336 337
	/*
	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
	 * do any necessary shifting, we just need to perform the first
	 * dereference.
	 */
	armpmu = *(void **)dev;
338 339

	plat = armpmu_get_platdata(armpmu);
340

341
	start_clock = sched_clock();
342
	if (plat && plat->handle_irq)
343
		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
344
	else
345
		ret = armpmu->handle_irq(irq, armpmu);
346 347 348 349
	finish_clock = sched_clock();

	perf_sample_event_took(finish_clock - start_clock);
	return ret;
350 351
}

352 353 354 355 356 357 358
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
	return attr->exclude_idle || attr->exclude_user ||
	       attr->exclude_kernel || attr->exclude_hv;
}

359 360 361
static int
__hw_perf_event_init(struct perf_event *event)
{
362
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
363
	struct hw_perf_event *hwc = &event->hw;
364
	int mapping;
365

M
Mark Rutland 已提交
366
	mapping = armpmu->map_event(event);
367 368 369 370 371 372 373

	if (mapping < 0) {
		pr_debug("event %x:%llx not supported\n", event->attr.type,
			 event->attr.config);
		return mapping;
	}

374 375 376 377 378 379 380 381 382 383 384
	/*
	 * We don't assign an index until we actually place the event onto
	 * hardware. Use -1 to signify that we haven't decided where to put it
	 * yet. For SMP systems, each core has it's own PMU so we can't do any
	 * clever allocation or constraints checking at this point.
	 */
	hwc->idx		= -1;
	hwc->config_base	= 0;
	hwc->config		= 0;
	hwc->event_base		= 0;

385 386 387
	/*
	 * Check whether we need to exclude the counter from certain modes.
	 */
388 389 390
	if ((!armpmu->set_event_filter ||
	     armpmu->set_event_filter(hwc, &event->attr)) &&
	     event_requires_mode_exclusion(&event->attr)) {
391 392
		pr_debug("ARM performance counters do not support "
			 "mode exclusion\n");
393
		return -EOPNOTSUPP;
394 395 396
	}

	/*
397
	 * Store the event encoding into the config_base field.
398
	 */
399
	hwc->config_base	    |= (unsigned long)mapping;
400

401
	if (!is_sampling_event(event)) {
402 403 404 405 406 407 408
		/*
		 * For non-sampling runs, limit the sample_period to half
		 * of the counter width. That way, the new counter value
		 * is far less likely to overtake the previous one unless
		 * you have some serious IRQ latency issues.
		 */
		hwc->sample_period  = armpmu->max_period >> 1;
409
		hwc->last_period    = hwc->sample_period;
410
		local64_set(&hwc->period_left, hwc->sample_period);
411 412 413
	}

	if (event->group_leader != event) {
414
		if (validate_group(event) != 0)
415 416 417
			return -EINVAL;
	}

418
	return 0;
419 420
}

421
static int armpmu_event_init(struct perf_event *event)
422
{
423
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
424

425 426 427 428 429 430 431 432 433 434 435
	/*
	 * Reject CPU-affine events for CPUs that are of a different class to
	 * that which this PMU handles. Process-following events (where
	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
	 * reject them later (in armpmu_add) if they're scheduled on a
	 * different class of CPU.
	 */
	if (event->cpu != -1 &&
		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
		return -ENOENT;

436 437 438 439
	/* does not support taken branch sampling */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

M
Mark Rutland 已提交
440
	if (armpmu->map_event(event) == -ENOENT)
441 442
		return -ENOENT;

443
	return __hw_perf_event_init(event);
444 445
}

P
Peter Zijlstra 已提交
446
static void armpmu_enable(struct pmu *pmu)
447
{
448
	struct arm_pmu *armpmu = to_arm_pmu(pmu);
M
Mark Rutland 已提交
449
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
450
	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
451

452 453 454 455
	/* For task-bound events we may be called on other CPUs */
	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
		return;

456
	if (enabled)
457
		armpmu->start(armpmu);
458 459
}

P
Peter Zijlstra 已提交
460
static void armpmu_disable(struct pmu *pmu)
461
{
462
	struct arm_pmu *armpmu = to_arm_pmu(pmu);
463 464 465 466 467

	/* For task-bound events we may be called on other CPUs */
	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
		return;

468
	armpmu->stop(armpmu);
469 470
}

471 472 473 474 475 476 477 478 479 480 481 482
/*
 * In heterogeneous systems, events are specific to a particular
 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
 * the same microarchitecture.
 */
static int armpmu_filter_match(struct perf_event *event)
{
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
	unsigned int cpu = smp_processor_id();
	return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
}

483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
static ssize_t armpmu_cpumask_show(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
	struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
	return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
}

static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);

static struct attribute *armpmu_common_attrs[] = {
	&dev_attr_cpus.attr,
	NULL,
};

static struct attribute_group armpmu_common_attr_group = {
	.attrs = armpmu_common_attrs,
};

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *__oprofile_cpu_pmu;

/*
 * Despite the names, these two functions are CPU-specific and are used
 * by the OProfile/perf code.
 */
const char *perf_pmu_name(void)
{
	if (!__oprofile_cpu_pmu)
		return NULL;

	return __oprofile_cpu_pmu->name;
}
EXPORT_SYMBOL_GPL(perf_pmu_name);

int perf_num_counters(void)
{
	int max_events = 0;

	if (__oprofile_cpu_pmu != NULL)
		max_events = __oprofile_cpu_pmu->num_events;

	return max_events;
}
EXPORT_SYMBOL_GPL(perf_num_counters);

528
void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
529
{
530
	struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
531
	int irq = per_cpu(hw_events->irq, cpu);
532

533 534
	if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
		return;
535

536 537 538 539 540
	if (irq_is_percpu(irq)) {
		free_percpu_irq(irq, &hw_events->percpu_pmu);
		cpumask_clear(&armpmu->active_irqs);
		return;
	}
541

542 543
	free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
544

545
void armpmu_free_irqs(struct arm_pmu *armpmu)
546 547 548 549 550
{
	int cpu;

	for_each_cpu(cpu, &armpmu->supported_cpus)
		armpmu_free_irq(armpmu, cpu);
551 552
}

553
int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
554
{
555
	int err = 0;
556
	struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
557
	const irq_handler_t handler = armpmu_dispatch_irq;
558 559 560
	int irq = per_cpu(hw_events->irq, cpu);
	if (!irq)
		return 0;
561

562 563 564 565 566 567
	if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
		err = request_percpu_irq(irq, handler, "arm-pmu",
					 &hw_events->percpu_pmu);
	} else if (irq_is_percpu(irq)) {
		int other_cpu = cpumask_first(&armpmu->active_irqs);
		int other_irq = per_cpu(hw_events->irq, other_cpu);
568

569 570 571
		if (irq != other_irq) {
			pr_warn("mismatched PPIs detected.\n");
			err = -EINVAL;
572
			goto err_out;
573
		}
574
	} else {
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
		struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
		unsigned long irq_flags;

		err = irq_force_affinity(irq, cpumask_of(cpu));

		if (err && num_possible_cpus() > 1) {
			pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
				irq, cpu);
			goto err_out;
		}

		if (platdata && platdata->irq_flags) {
			irq_flags = platdata->irq_flags;
		} else {
			irq_flags = IRQF_PERCPU |
				    IRQF_NOBALANCING |
				    IRQF_NO_THREAD;
		}

		err = request_irq(irq, handler, irq_flags, "arm-pmu",
595
				  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
596
	}
597

598 599
	if (err)
		goto err_out;
600

601
	cpumask_set_cpu(cpu, &armpmu->active_irqs);
602
	return 0;
603 604 605 606

err_out:
	pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
	return err;
607 608
}

609
int armpmu_request_irqs(struct arm_pmu *armpmu)
610 611 612 613 614 615 616 617 618 619 620 621
{
	int cpu, err;

	for_each_cpu(cpu, &armpmu->supported_cpus) {
		err = armpmu_request_irq(armpmu, cpu);
		if (err)
			break;
	}

	return err;
}

622 623 624 625 626 627
static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
{
	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
	return per_cpu(hw_events->irq, cpu);
}

628 629 630 631 632 633
/*
 * PMU hardware loses all context when a CPU goes offline.
 * When a CPU is hotplugged back in, since some hardware registers are
 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 * junk values out of them.
 */
634
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
635
{
636
	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
637
	int irq;
638

639 640 641 642
	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;
	if (pmu->reset)
		pmu->reset(pmu);
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666

	irq = armpmu_get_cpu_irq(pmu, cpu);
	if (irq) {
		if (irq_is_percpu(irq)) {
			enable_percpu_irq(irq, IRQ_TYPE_NONE);
			return 0;
		}
	}

	return 0;
}

static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
{
	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
	int irq;

	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
		return 0;

	irq = armpmu_get_cpu_irq(pmu, cpu);
	if (irq && irq_is_percpu(irq))
		disable_percpu_irq(irq);

667
	return 0;
668 669
}

670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
{
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
	struct perf_event *event;
	int idx;

	for (idx = 0; idx < armpmu->num_events; idx++) {
		/*
		 * If the counter is not used skip it, there is no
		 * need of stopping/restarting it.
		 */
		if (!test_bit(idx, hw_events->used_mask))
			continue;

		event = hw_events->events[idx];

		switch (cmd) {
		case CPU_PM_ENTER:
			/*
			 * Stop and update the counter
			 */
			armpmu_stop(event, PERF_EF_UPDATE);
			break;
		case CPU_PM_EXIT:
		case CPU_PM_ENTER_FAILED:
696 697 698 699 700 701 702 703 704 705 706 707 708
			 /*
			  * Restore and enable the counter.
			  * armpmu_start() indirectly calls
			  *
			  * perf_event_update_userpage()
			  *
			  * that requires RCU read locking to be functional,
			  * wrap the call within RCU_NONIDLE to make the
			  * RCU subsystem aware this cpu is not idle from
			  * an RCU perspective for the armpmu_start() call
			  * duration.
			  */
			RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
			break;
		default:
			break;
		}
	}
}

static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
			     void *v)
{
	struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);

	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
		return NOTIFY_DONE;

	/*
	 * Always reset the PMU registers on power-up even if
	 * there are no events running.
	 */
	if (cmd == CPU_PM_EXIT && armpmu->reset)
		armpmu->reset(armpmu);

	if (!enabled)
		return NOTIFY_OK;

	switch (cmd) {
	case CPU_PM_ENTER:
		armpmu->stop(armpmu);
		cpu_pm_pmu_setup(armpmu, cmd);
		break;
	case CPU_PM_EXIT:
		cpu_pm_pmu_setup(armpmu, cmd);
	case CPU_PM_ENTER_FAILED:
		armpmu->start(armpmu);
		break;
	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}

static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
{
	cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
	return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
}

static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
{
	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
}
#else
static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
#endif

768 769 770 771
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
	int err;

772 773
	err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
				       &cpu_pmu->node);
774
	if (err)
775
		goto out;
776

777 778 779 780
	err = cpu_pm_pmu_register(cpu_pmu);
	if (err)
		goto out_unregister;

781 782
	return 0;

783
out_unregister:
784 785
	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
					    &cpu_pmu->node);
786
out:
787 788 789 790 791
	return err;
}

static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
792
	cpu_pm_pmu_unregister(cpu_pmu);
793 794
	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
					    &cpu_pmu->node);
795 796
}

797
struct arm_pmu *armpmu_alloc(void)
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
{
	struct arm_pmu *pmu;
	int cpu;

	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
	if (!pmu) {
		pr_info("failed to allocate PMU device!\n");
		goto out;
	}

	pmu->hw_events = alloc_percpu(struct pmu_hw_events);
	if (!pmu->hw_events) {
		pr_info("failed to allocate per-cpu PMU data.\n");
		goto out_free_pmu;
	}

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
	pmu->pmu = (struct pmu) {
		.pmu_enable	= armpmu_enable,
		.pmu_disable	= armpmu_disable,
		.event_init	= armpmu_event_init,
		.add		= armpmu_add,
		.del		= armpmu_del,
		.start		= armpmu_start,
		.stop		= armpmu_stop,
		.read		= armpmu_read,
		.filter_match	= armpmu_filter_match,
		.attr_groups	= pmu->attr_groups,
		/*
		 * This is a CPU PMU potentially in a heterogeneous
		 * configuration (e.g. big.LITTLE). This is not an uncore PMU,
		 * and we have taken ctx sharing into account (e.g. with our
		 * pmu::filter_match callback and pmu::event_init group
		 * validation).
		 */
		.capabilities	= PERF_PMU_CAP_HETEROGENEOUS_CPUS,
	};

	pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
		&armpmu_common_attr_group;

838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
	for_each_possible_cpu(cpu) {
		struct pmu_hw_events *events;

		events = per_cpu_ptr(pmu->hw_events, cpu);
		raw_spin_lock_init(&events->pmu_lock);
		events->percpu_pmu = pmu;
	}

	return pmu;

out_free_pmu:
	kfree(pmu);
out:
	return NULL;
}

854
void armpmu_free(struct arm_pmu *pmu)
855 856 857 858 859
{
	free_percpu(pmu->hw_events);
	kfree(pmu);
}

860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
int armpmu_register(struct arm_pmu *pmu)
{
	int ret;

	ret = cpu_pmu_init(pmu);
	if (ret)
		return ret;

	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
	if (ret)
		goto out_destroy;

	if (!__oprofile_cpu_pmu)
		__oprofile_cpu_pmu = pmu;

	pr_info("enabled with %s PMU driver, %d counters available\n",
		pmu->name, pmu->num_events);

	return 0;

out_destroy:
	cpu_pmu_destroy(pmu);
	return ret;
}

885 886 887 888
static int arm_pmu_hp_init(void)
{
	int ret;

889
	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
T
Thomas Gleixner 已提交
890
				      "perf/arm/pmu:starting",
891 892
				      arm_perf_starting_cpu,
				      arm_perf_teardown_cpu);
893 894 895 896 897 898
	if (ret)
		pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
		       ret);
	return ret;
}
subsys_initcall(arm_pmu_hp_init);