perf_event.c 12.5 KB
Newer Older
1 2 3 4 5 6
#undef DEBUG

/*
 * ARM performance counter support.
 *
 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7
 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
8
 *
9
 * This code is based on the sparc64 perf event code, which is in turn based
10
 * on the x86 code.
11 12 13 14
 */
#define pr_fmt(fmt) "hw perfevents: " fmt

#include <linux/kernel.h>
15
#include <linux/platform_device.h>
J
Jon Hunter 已提交
16
#include <linux/pm_runtime.h>
17 18
#include <linux/irq.h>
#include <linux/irqdesc.h>
19 20 21 22 23

#include <asm/irq_regs.h>
#include <asm/pmu.h>

static int
M
Mark Rutland 已提交
24 25 26 27 28
armpmu_map_cache_event(const unsigned (*cache_map)
				      [PERF_COUNT_HW_CACHE_MAX]
				      [PERF_COUNT_HW_CACHE_OP_MAX]
				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
		       u64 config)
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
{
	unsigned int cache_type, cache_op, cache_result, ret;

	cache_type = (config >>  0) & 0xff;
	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
		return -EINVAL;

	cache_op = (config >>  8) & 0xff;
	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
		return -EINVAL;

	cache_result = (config >> 16) & 0xff;
	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
		return -EINVAL;

M
Mark Rutland 已提交
44
	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
45 46 47 48 49 50 51

	if (ret == CACHE_OP_UNSUPPORTED)
		return -ENOENT;

	return ret;
}

52
static int
53
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
54
{
55 56 57 58 59 60
	int mapping;

	if (config >= PERF_COUNT_HW_MAX)
		return -EINVAL;

	mapping = (*event_map)[config];
M
Mark Rutland 已提交
61
	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
62 63 64
}

static int
M
Mark Rutland 已提交
65
armpmu_map_raw_event(u32 raw_event_mask, u64 config)
66
{
M
Mark Rutland 已提交
67 68 69
	return (int)(config & raw_event_mask);
}

70 71 72 73 74 75 76 77
int
armpmu_map_event(struct perf_event *event,
		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
		 const unsigned (*cache_map)
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
				[PERF_COUNT_HW_CACHE_RESULT_MAX],
		 u32 raw_event_mask)
M
Mark Rutland 已提交
78 79
{
	u64 config = event->attr.config;
80
	int type = event->attr.type;
M
Mark Rutland 已提交
81

82 83 84 85
	if (type == event->pmu->type)
		return armpmu_map_raw_event(raw_event_mask, config);

	switch (type) {
M
Mark Rutland 已提交
86
	case PERF_TYPE_HARDWARE:
87
		return armpmu_map_hw_event(event_map, config);
M
Mark Rutland 已提交
88 89 90 91 92 93 94
	case PERF_TYPE_HW_CACHE:
		return armpmu_map_cache_event(cache_map, config);
	case PERF_TYPE_RAW:
		return armpmu_map_raw_event(raw_event_mask, config);
	}

	return -ENOENT;
95 96
}

97
int armpmu_event_set_period(struct perf_event *event)
98
{
99
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
100
	struct hw_perf_event *hwc = &event->hw;
101
	s64 left = local64_read(&hwc->period_left);
102 103 104 105 106
	s64 period = hwc->sample_period;
	int ret = 0;

	if (unlikely(left <= -period)) {
		left = period;
107
		local64_set(&hwc->period_left, left);
108 109 110 111 112 113
		hwc->last_period = period;
		ret = 1;
	}

	if (unlikely(left <= 0)) {
		left += period;
114
		local64_set(&hwc->period_left, left);
115 116 117 118 119 120 121
		hwc->last_period = period;
		ret = 1;
	}

	if (left > (s64)armpmu->max_period)
		left = armpmu->max_period;

122
	local64_set(&hwc->prev_count, (u64)-left);
123

124
	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
125 126 127 128 129 130

	perf_event_update_userpage(event);

	return ret;
}

131
u64 armpmu_event_update(struct perf_event *event)
132
{
133
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
134
	struct hw_perf_event *hwc = &event->hw;
135
	u64 delta, prev_raw_count, new_raw_count;
136 137

again:
138
	prev_raw_count = local64_read(&hwc->prev_count);
139
	new_raw_count = armpmu->read_counter(event);
140

141
	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
142 143 144
			     new_raw_count) != prev_raw_count)
		goto again;

145
	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
146

147 148
	local64_add(delta, &event->count);
	local64_sub(delta, &hwc->period_left);
149 150 151 152 153

	return new_raw_count;
}

static void
P
Peter Zijlstra 已提交
154
armpmu_read(struct perf_event *event)
155
{
156
	armpmu_event_update(event);
157 158 159
}

static void
P
Peter Zijlstra 已提交
160
armpmu_stop(struct perf_event *event, int flags)
161
{
162
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
163 164
	struct hw_perf_event *hwc = &event->hw;

P
Peter Zijlstra 已提交
165 166 167 168 169
	/*
	 * ARM pmu always has to update the counter, so ignore
	 * PERF_EF_UPDATE, see comments in armpmu_start().
	 */
	if (!(hwc->state & PERF_HES_STOPPED)) {
170 171
		armpmu->disable(event);
		armpmu_event_update(event);
P
Peter Zijlstra 已提交
172 173
		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
	}
174 175
}

176
static void armpmu_start(struct perf_event *event, int flags)
177
{
178
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
179 180
	struct hw_perf_event *hwc = &event->hw;

P
Peter Zijlstra 已提交
181 182 183 184 185 186 187 188
	/*
	 * ARM pmu always has to reprogram the period, so ignore
	 * PERF_EF_RELOAD, see the comment below.
	 */
	if (flags & PERF_EF_RELOAD)
		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));

	hwc->state = 0;
189 190
	/*
	 * Set the period again. Some counters can't be stopped, so when we
P
Peter Zijlstra 已提交
191
	 * were stopped we simply disabled the IRQ source and the counter
192 193 194 195
	 * may have been left counting. If we don't do this step then we may
	 * get an interrupt too soon or *way* too late if the overflow has
	 * happened since disabling.
	 */
196 197
	armpmu_event_set_period(event);
	armpmu->enable(event);
198 199
}

P
Peter Zijlstra 已提交
200 201 202
static void
armpmu_del(struct perf_event *event, int flags)
{
203
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
204
	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
P
Peter Zijlstra 已提交
205 206 207 208
	struct hw_perf_event *hwc = &event->hw;
	int idx = hwc->idx;

	armpmu_stop(event, PERF_EF_UPDATE);
209 210
	hw_events->events[idx] = NULL;
	clear_bit(idx, hw_events->used_mask);
211 212
	if (armpmu->clear_event_idx)
		armpmu->clear_event_idx(hw_events, event);
P
Peter Zijlstra 已提交
213 214 215 216

	perf_event_update_userpage(event);
}

217
static int
P
Peter Zijlstra 已提交
218
armpmu_add(struct perf_event *event, int flags)
219
{
220
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
221
	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
222 223 224 225
	struct hw_perf_event *hwc = &event->hw;
	int idx;
	int err = 0;

P
Peter Zijlstra 已提交
226
	perf_pmu_disable(event->pmu);
227

228
	/* If we don't have a space for the counter then finish early. */
229
	idx = armpmu->get_event_idx(hw_events, event);
230 231 232 233 234 235 236 237 238 239
	if (idx < 0) {
		err = idx;
		goto out;
	}

	/*
	 * If there is an event in the counter we are going to use then make
	 * sure it is disabled.
	 */
	event->hw.idx = idx;
240
	armpmu->disable(event);
241
	hw_events->events[idx] = event;
242

P
Peter Zijlstra 已提交
243 244 245
	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
	if (flags & PERF_EF_START)
		armpmu_start(event, PERF_EF_RELOAD);
246 247 248 249 250

	/* Propagate our changes to the userspace mapping. */
	perf_event_update_userpage(event);

out:
P
Peter Zijlstra 已提交
251
	perf_pmu_enable(event->pmu);
252 253 254 255
	return err;
}

static int
256
validate_event(struct pmu_hw_events *hw_events,
257 258
	       struct perf_event *event)
{
259
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
260

261 262 263
	if (is_software_event(event))
		return 1;

264
	if (event->state < PERF_EVENT_STATE_OFF)
265 266 267
		return 1;

	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
268
		return 1;
269

270
	return armpmu->get_event_idx(hw_events, event) >= 0;
271 272 273 274 275 276
}

static int
validate_group(struct perf_event *event)
{
	struct perf_event *sibling, *leader = event->group_leader;
277
	struct pmu_hw_events fake_pmu;
278

279 280 281 282
	/*
	 * Initialise the fake PMU. We only need to populate the
	 * used_mask for the purposes of validation.
	 */
283
	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
284 285

	if (!validate_event(&fake_pmu, leader))
286
		return -EINVAL;
287 288 289

	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
		if (!validate_event(&fake_pmu, sibling))
290
			return -EINVAL;
291 292 293
	}

	if (!validate_event(&fake_pmu, event))
294
		return -EINVAL;
295 296 297 298

	return 0;
}

299
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
300
{
301 302 303
	struct arm_pmu *armpmu;
	struct platform_device *plat_device;
	struct arm_pmu_platdata *plat;
304 305
	int ret;
	u64 start_clock, finish_clock;
306 307 308 309 310 311

	if (irq_is_percpu(irq))
		dev = *(void **)dev;
	armpmu = dev;
	plat_device = armpmu->plat_device;
	plat = dev_get_platdata(&plat_device->dev);
312

313
	start_clock = sched_clock();
314
	if (plat && plat->handle_irq)
315
		ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
316
	else
317 318 319 320 321
		ret = armpmu->handle_irq(irq, dev);
	finish_clock = sched_clock();

	perf_sample_event_took(finish_clock - start_clock);
	return ret;
322 323
}

324
static void
325
armpmu_release_hardware(struct arm_pmu *armpmu)
326
{
327
	armpmu->free_irq(armpmu);
328
	pm_runtime_put_sync(&armpmu->plat_device->dev);
329 330
}

331
static int
332
armpmu_reserve_hardware(struct arm_pmu *armpmu)
333
{
334
	int err;
335
	struct platform_device *pmu_device = armpmu->plat_device;
336

337 338 339
	if (!pmu_device)
		return -ENODEV;

J
Jon Hunter 已提交
340
	pm_runtime_get_sync(&pmu_device->dev);
341
	err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
342 343 344
	if (err) {
		armpmu_release_hardware(armpmu);
		return err;
345
	}
346

347
	return 0;
348 349 350 351 352
}

static void
hw_perf_event_destroy(struct perf_event *event)
{
353
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
354 355 356 357
	atomic_t *active_events	 = &armpmu->active_events;
	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;

	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
358
		armpmu_release_hardware(armpmu);
359
		mutex_unlock(pmu_reserve_mutex);
360 361 362
	}
}

363 364 365 366 367 368 369
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
	return attr->exclude_idle || attr->exclude_user ||
	       attr->exclude_kernel || attr->exclude_hv;
}

370 371 372
static int
__hw_perf_event_init(struct perf_event *event)
{
373
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
374
	struct hw_perf_event *hwc = &event->hw;
375
	int mapping;
376

M
Mark Rutland 已提交
377
	mapping = armpmu->map_event(event);
378 379 380 381 382 383 384

	if (mapping < 0) {
		pr_debug("event %x:%llx not supported\n", event->attr.type,
			 event->attr.config);
		return mapping;
	}

385 386 387 388 389 390 391 392 393 394 395
	/*
	 * We don't assign an index until we actually place the event onto
	 * hardware. Use -1 to signify that we haven't decided where to put it
	 * yet. For SMP systems, each core has it's own PMU so we can't do any
	 * clever allocation or constraints checking at this point.
	 */
	hwc->idx		= -1;
	hwc->config_base	= 0;
	hwc->config		= 0;
	hwc->event_base		= 0;

396 397 398
	/*
	 * Check whether we need to exclude the counter from certain modes.
	 */
399 400 401
	if ((!armpmu->set_event_filter ||
	     armpmu->set_event_filter(hwc, &event->attr)) &&
	     event_requires_mode_exclusion(&event->attr)) {
402 403
		pr_debug("ARM performance counters do not support "
			 "mode exclusion\n");
404
		return -EOPNOTSUPP;
405 406 407
	}

	/*
408
	 * Store the event encoding into the config_base field.
409
	 */
410
	hwc->config_base	    |= (unsigned long)mapping;
411

412
	if (!is_sampling_event(event)) {
413 414 415 416 417 418 419
		/*
		 * For non-sampling runs, limit the sample_period to half
		 * of the counter width. That way, the new counter value
		 * is far less likely to overtake the previous one unless
		 * you have some serious IRQ latency issues.
		 */
		hwc->sample_period  = armpmu->max_period >> 1;
420
		hwc->last_period    = hwc->sample_period;
421
		local64_set(&hwc->period_left, hwc->sample_period);
422 423 424
	}

	if (event->group_leader != event) {
425
		if (validate_group(event) != 0)
426 427 428
			return -EINVAL;
	}

429
	return 0;
430 431
}

432
static int armpmu_event_init(struct perf_event *event)
433
{
434
	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
435
	int err = 0;
436
	atomic_t *active_events = &armpmu->active_events;
437

438 439 440 441
	/* does not support taken branch sampling */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

M
Mark Rutland 已提交
442
	if (armpmu->map_event(event) == -ENOENT)
443 444
		return -ENOENT;

445 446
	event->destroy = hw_perf_event_destroy;

447 448 449
	if (!atomic_inc_not_zero(active_events)) {
		mutex_lock(&armpmu->reserve_mutex);
		if (atomic_read(active_events) == 0)
450
			err = armpmu_reserve_hardware(armpmu);
451 452

		if (!err)
453 454
			atomic_inc(active_events);
		mutex_unlock(&armpmu->reserve_mutex);
455 456 457
	}

	if (err)
458
		return err;
459 460 461 462 463

	err = __hw_perf_event_init(event);
	if (err)
		hw_perf_event_destroy(event);

464
	return err;
465 466
}

P
Peter Zijlstra 已提交
467
static void armpmu_enable(struct pmu *pmu)
468
{
469 470
	struct arm_pmu *armpmu = to_arm_pmu(pmu);
	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
471
	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
472

473
	if (enabled)
474
		armpmu->start(armpmu);
475 476
}

P
Peter Zijlstra 已提交
477
static void armpmu_disable(struct pmu *pmu)
478
{
479
	struct arm_pmu *armpmu = to_arm_pmu(pmu);
480
	armpmu->stop(armpmu);
481 482
}

J
Jon Hunter 已提交
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
#ifdef CONFIG_PM_RUNTIME
static int armpmu_runtime_resume(struct device *dev)
{
	struct arm_pmu_platdata *plat = dev_get_platdata(dev);

	if (plat && plat->runtime_resume)
		return plat->runtime_resume(dev);

	return 0;
}

static int armpmu_runtime_suspend(struct device *dev)
{
	struct arm_pmu_platdata *plat = dev_get_platdata(dev);

	if (plat && plat->runtime_suspend)
		return plat->runtime_suspend(dev);

	return 0;
}
#endif

505 506 507 508
const struct dev_pm_ops armpmu_dev_pm_ops = {
	SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
};

509
static void armpmu_init(struct arm_pmu *armpmu)
510 511 512
{
	atomic_set(&armpmu->active_events, 0);
	mutex_init(&armpmu->reserve_mutex);
513 514 515 516 517 518 519 520 521 522 523 524 525

	armpmu->pmu = (struct pmu) {
		.pmu_enable	= armpmu_enable,
		.pmu_disable	= armpmu_disable,
		.event_init	= armpmu_event_init,
		.add		= armpmu_add,
		.del		= armpmu_del,
		.start		= armpmu_start,
		.stop		= armpmu_stop,
		.read		= armpmu_read,
	};
}

526
int armpmu_register(struct arm_pmu *armpmu, int type)
527 528
{
	armpmu_init(armpmu);
J
Jon Hunter 已提交
529
	pm_runtime_enable(&armpmu->plat_device->dev);
530 531
	pr_info("enabled with %s PMU driver, %d counters available\n",
			armpmu->name, armpmu->num_events);
532
	return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
533 534
}