perf_event.c 37.2 KB
Newer Older
I
Ingo Molnar 已提交
1
/*
2
 * Performance events x86 architecture code
I
Ingo Molnar 已提交
3
 *
4 5 6 7 8
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2009 Jaswinder Singh Rajput
 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9
 *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10
 *  Copyright (C) 2009 Google, Inc., Stephane Eranian
I
Ingo Molnar 已提交
11 12 13 14
 *
 *  For licencing details see kernel-base/COPYING
 */

15
#include <linux/perf_event.h>
I
Ingo Molnar 已提交
16 17 18 19
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
20
#include <linux/module.h>
I
Ingo Molnar 已提交
21 22
#include <linux/kdebug.h>
#include <linux/sched.h>
23
#include <linux/uaccess.h>
24
#include <linux/highmem.h>
25
#include <linux/cpu.h>
26
#include <linux/bitops.h>
I
Ingo Molnar 已提交
27 28

#include <asm/apic.h>
29
#include <asm/stacktrace.h>
P
Peter Zijlstra 已提交
30
#include <asm/nmi.h>
I
Ingo Molnar 已提交
31

32
static u64 perf_event_mask __read_mostly;
33

34 35
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS	4
36 37 38 39 40

/* The size of a BTS record in bytes: */
#define BTS_RECORD_SIZE		24

/* The size of a per-cpu BTS buffer in bytes: */
41
#define BTS_BUFFER_SIZE		(BTS_RECORD_SIZE * 2048)
42 43

/* The BTS overflow threshold in bytes from the end of the buffer: */
44
#define BTS_OVFL_TH		(BTS_RECORD_SIZE * 128)
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69


/*
 * Bits in the debugctlmsr controlling branch tracing.
 */
#define X86_DEBUGCTL_TR			(1 << 6)
#define X86_DEBUGCTL_BTS		(1 << 7)
#define X86_DEBUGCTL_BTINT		(1 << 8)
#define X86_DEBUGCTL_BTS_OFF_OS		(1 << 9)
#define X86_DEBUGCTL_BTS_OFF_USR	(1 << 10)

/*
 * A debug store configuration.
 *
 * We only support architectures that use 64bit fields.
 */
struct debug_store {
	u64	bts_buffer_base;
	u64	bts_index;
	u64	bts_absolute_maximum;
	u64	bts_interrupt_threshold;
	u64	pebs_buffer_base;
	u64	pebs_index;
	u64	pebs_absolute_maximum;
	u64	pebs_interrupt_threshold;
70
	u64	pebs_event_reset[MAX_PEBS_EVENTS];
71 72
};

73
struct event_constraint {
74 75
	union {
		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76
		u64		idxmsk64;
77
	};
78 79
	u64	code;
	u64	cmask;
80
	int	weight;
81 82
};

83 84 85 86 87 88 89
struct amd_nb {
	int nb_id;  /* NorthBridge id */
	int refcnt; /* reference count */
	struct perf_event *owners[X86_PMC_IDX_MAX];
	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};

90
struct cpu_hw_events {
91
	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
92
	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
93
	unsigned long		interrupts;
94
	int			enabled;
95
	struct debug_store	*ds;
I
Ingo Molnar 已提交
96

97 98 99
	int			n_events;
	int			n_added;
	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
100
	u64			tags[X86_PMC_IDX_MAX];
101
	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
102
	struct amd_nb		*amd_nb;
103 104
};

105
#define __EVENT_CONSTRAINT(c, n, m, w) {\
106
	{ .idxmsk64 = (n) },		\
107 108
	.code = (c),			\
	.cmask = (m),			\
109
	.weight = (w),			\
110
}
111

112 113 114
#define EVENT_CONSTRAINT(c, n, m)	\
	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))

115 116
#define INTEL_EVENT_CONSTRAINT(c, n)	\
	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
117

118
#define FIXED_EVENT_CONSTRAINT(c, n)	\
119
	EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
120

121 122 123 124 125
#define EVENT_CONSTRAINT_END		\
	EVENT_CONSTRAINT(0, 0, 0)

#define for_each_event_constraint(e, c)	\
	for ((e) = (c); (e)->cmask; (e)++)
126

I
Ingo Molnar 已提交
127
/*
128
 * struct x86_pmu - generic x86 pmu
I
Ingo Molnar 已提交
129
 */
130
struct x86_pmu {
131 132
	const char	*name;
	int		version;
133
	int		(*handle_irq)(struct pt_regs *);
134 135
	void		(*disable_all)(void);
	void		(*enable_all)(void);
136 137
	void		(*enable)(struct perf_event *);
	void		(*disable)(struct perf_event *);
138 139
	unsigned	eventsel;
	unsigned	perfctr;
140 141
	u64		(*event_map)(int);
	u64		(*raw_event)(u64);
142
	int		max_events;
143 144 145 146
	int		num_events;
	int		num_events_fixed;
	int		event_bits;
	u64		event_mask;
147
	int		apic;
148
	u64		max_period;
149
	u64		intel_ctrl;
150 151
	void		(*enable_bts)(u64 config);
	void		(*disable_bts)(void);
152 153 154 155 156

	struct event_constraint *
			(*get_event_constraints)(struct cpu_hw_events *cpuc,
						 struct perf_event *event);

157 158
	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
						 struct perf_event *event);
159
	struct event_constraint *event_constraints;
160 161 162 163 164

	void		(*cpu_prepare)(int cpu);
	void		(*cpu_starting)(int cpu);
	void		(*cpu_dying)(int cpu);
	void		(*cpu_dead)(int cpu);
165 166
};

167
static struct x86_pmu x86_pmu __read_mostly;
168

169
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
170 171
	.enabled = 1,
};
I
Ingo Molnar 已提交
172

173
static int x86_perf_event_set_period(struct perf_event *event);
174

175
/*
176
 * Generalized hw caching related hw_event table, filled
177
 * in on a per model basis. A value of 0 means
178 179
 * 'not supported', -1 means 'hw_event makes no sense on
 * this CPU', any other value means the raw hw_event
180 181 182 183 184 185 186 187 188 189
 * ID.
 */

#define C(x) PERF_COUNT_HW_CACHE_##x

static u64 __read_mostly hw_cache_event_ids
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
				[PERF_COUNT_HW_CACHE_RESULT_MAX];

190
/*
191 192
 * Propagate event elapsed time into the generic event.
 * Can only be executed on the CPU where the event is active.
193 194
 * Returns the delta events processed.
 */
195
static u64
196
x86_perf_event_update(struct perf_event *event)
197
{
198
	struct hw_perf_event *hwc = &event->hw;
199
	int shift = 64 - x86_pmu.event_bits;
200
	u64 prev_raw_count, new_raw_count;
201
	int idx = hwc->idx;
202
	s64 delta;
203

204 205 206
	if (idx == X86_PMC_IDX_FIXED_BTS)
		return 0;

207
	/*
208
	 * Careful: an NMI might modify the previous event value.
209 210 211
	 *
	 * Our tactic to handle this is to first atomically read and
	 * exchange a new raw count - then add that new-prev delta
212
	 * count to the generic event atomically:
213 214 215
	 */
again:
	prev_raw_count = atomic64_read(&hwc->prev_count);
216
	rdmsrl(hwc->event_base + idx, new_raw_count);
217 218 219 220 221 222 223 224

	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
					new_raw_count) != prev_raw_count)
		goto again;

	/*
	 * Now we have the new raw value and have updated the prev
	 * timestamp already. We can now calculate the elapsed delta
225
	 * (event-)time and add that to the generic event.
226 227
	 *
	 * Careful, not all hw sign-extends above the physical width
228
	 * of the count.
229
	 */
230 231
	delta = (new_raw_count << shift) - (prev_raw_count << shift);
	delta >>= shift;
232

233
	atomic64_add(delta, &event->count);
234
	atomic64_sub(delta, &hwc->period_left);
235 236

	return new_raw_count;
237 238
}

239
static atomic_t active_events;
P
Peter Zijlstra 已提交
240 241 242 243
static DEFINE_MUTEX(pmc_reserve_mutex);

static bool reserve_pmc_hardware(void)
{
244
#ifdef CONFIG_X86_LOCAL_APIC
P
Peter Zijlstra 已提交
245 246 247 248 249
	int i;

	if (nmi_watchdog == NMI_LOCAL_APIC)
		disable_lapic_nmi_watchdog();

250
	for (i = 0; i < x86_pmu.num_events; i++) {
251
		if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
P
Peter Zijlstra 已提交
252 253 254
			goto perfctr_fail;
	}

255
	for (i = 0; i < x86_pmu.num_events; i++) {
256
		if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
P
Peter Zijlstra 已提交
257 258
			goto eventsel_fail;
	}
259
#endif
P
Peter Zijlstra 已提交
260 261 262

	return true;

263
#ifdef CONFIG_X86_LOCAL_APIC
P
Peter Zijlstra 已提交
264 265
eventsel_fail:
	for (i--; i >= 0; i--)
266
		release_evntsel_nmi(x86_pmu.eventsel + i);
P
Peter Zijlstra 已提交
267

268
	i = x86_pmu.num_events;
P
Peter Zijlstra 已提交
269 270 271

perfctr_fail:
	for (i--; i >= 0; i--)
272
		release_perfctr_nmi(x86_pmu.perfctr + i);
P
Peter Zijlstra 已提交
273 274 275 276 277

	if (nmi_watchdog == NMI_LOCAL_APIC)
		enable_lapic_nmi_watchdog();

	return false;
278
#endif
P
Peter Zijlstra 已提交
279 280 281 282
}

static void release_pmc_hardware(void)
{
283
#ifdef CONFIG_X86_LOCAL_APIC
P
Peter Zijlstra 已提交
284 285
	int i;

286
	for (i = 0; i < x86_pmu.num_events; i++) {
287 288
		release_perfctr_nmi(x86_pmu.perfctr + i);
		release_evntsel_nmi(x86_pmu.eventsel + i);
P
Peter Zijlstra 已提交
289 290 291 292
	}

	if (nmi_watchdog == NMI_LOCAL_APIC)
		enable_lapic_nmi_watchdog();
293
#endif
P
Peter Zijlstra 已提交
294 295
}

296 297 298 299 300
static inline bool bts_available(void)
{
	return x86_pmu.enable_bts != NULL;
}

301
static void init_debug_store_on_cpu(int cpu)
302
{
303
	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
304 305 306 307 308

	if (!ds)
		return;

	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
309 310
		     (u32)((u64)(unsigned long)ds),
		     (u32)((u64)(unsigned long)ds >> 32));
311 312
}

313
static void fini_debug_store_on_cpu(int cpu)
314
{
315
	if (!per_cpu(cpu_hw_events, cpu).ds)
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
		return;

	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
}

static void release_bts_hardware(void)
{
	int cpu;

	if (!bts_available())
		return;

	get_online_cpus();

	for_each_online_cpu(cpu)
		fini_debug_store_on_cpu(cpu);

	for_each_possible_cpu(cpu) {
334
		struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
335 336 337 338

		if (!ds)
			continue;

339
		per_cpu(cpu_hw_events, cpu).ds = NULL;
340

341
		kfree((void *)(unsigned long)ds->bts_buffer_base);
342 343 344 345 346 347 348 349 350 351 352
		kfree(ds);
	}

	put_online_cpus();
}

static int reserve_bts_hardware(void)
{
	int cpu, err = 0;

	if (!bts_available())
353
		return 0;
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371

	get_online_cpus();

	for_each_possible_cpu(cpu) {
		struct debug_store *ds;
		void *buffer;

		err = -ENOMEM;
		buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
		if (unlikely(!buffer))
			break;

		ds = kzalloc(sizeof(*ds), GFP_KERNEL);
		if (unlikely(!ds)) {
			kfree(buffer);
			break;
		}

372
		ds->bts_buffer_base = (u64)(unsigned long)buffer;
373 374 375 376 377 378
		ds->bts_index = ds->bts_buffer_base;
		ds->bts_absolute_maximum =
			ds->bts_buffer_base + BTS_BUFFER_SIZE;
		ds->bts_interrupt_threshold =
			ds->bts_absolute_maximum - BTS_OVFL_TH;

379
		per_cpu(cpu_hw_events, cpu).ds = ds;
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
		err = 0;
	}

	if (err)
		release_bts_hardware();
	else {
		for_each_online_cpu(cpu)
			init_debug_store_on_cpu(cpu);
	}

	put_online_cpus();

	return err;
}

395
static void hw_perf_event_destroy(struct perf_event *event)
P
Peter Zijlstra 已提交
396
{
397
	if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
P
Peter Zijlstra 已提交
398
		release_pmc_hardware();
399
		release_bts_hardware();
P
Peter Zijlstra 已提交
400 401 402 403
		mutex_unlock(&pmc_reserve_mutex);
	}
}

404 405 406 407 408
static inline int x86_pmu_initialized(void)
{
	return x86_pmu.handle_irq != NULL;
}

409
static inline int
410
set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
{
	unsigned int cache_type, cache_op, cache_result;
	u64 config, val;

	config = attr->config;

	cache_type = (config >>  0) & 0xff;
	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
		return -EINVAL;

	cache_op = (config >>  8) & 0xff;
	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
		return -EINVAL;

	cache_result = (config >> 16) & 0xff;
	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
		return -EINVAL;

	val = hw_cache_event_ids[cache_type][cache_op][cache_result];

	if (val == 0)
		return -ENOENT;

	if (val == -1)
		return -EINVAL;

	hwc->config |= val;

	return 0;
}

I
Ingo Molnar 已提交
442
/*
443
 * Setup the hardware configuration for a given attr_type
I
Ingo Molnar 已提交
444
 */
445
static int __hw_perf_event_init(struct perf_event *event)
I
Ingo Molnar 已提交
446
{
447 448
	struct perf_event_attr *attr = &event->attr;
	struct hw_perf_event *hwc = &event->hw;
449
	u64 config;
P
Peter Zijlstra 已提交
450
	int err;
I
Ingo Molnar 已提交
451

452 453
	if (!x86_pmu_initialized())
		return -ENODEV;
I
Ingo Molnar 已提交
454

P
Peter Zijlstra 已提交
455
	err = 0;
456
	if (!atomic_inc_not_zero(&active_events)) {
P
Peter Zijlstra 已提交
457
		mutex_lock(&pmc_reserve_mutex);
458
		if (atomic_read(&active_events) == 0) {
459 460 461
			if (!reserve_pmc_hardware())
				err = -EBUSY;
			else
462
				err = reserve_bts_hardware();
463 464
		}
		if (!err)
465
			atomic_inc(&active_events);
P
Peter Zijlstra 已提交
466 467 468 469 470
		mutex_unlock(&pmc_reserve_mutex);
	}
	if (err)
		return err;

471
	event->destroy = hw_perf_event_destroy;
472

I
Ingo Molnar 已提交
473
	/*
474
	 * Generate PMC IRQs:
I
Ingo Molnar 已提交
475 476
	 * (keep 'enabled' bit clear for now)
	 */
477
	hwc->config = ARCH_PERFMON_EVENTSEL_INT;
I
Ingo Molnar 已提交
478

479
	hwc->idx = -1;
480 481
	hwc->last_cpu = -1;
	hwc->last_tag = ~0ULL;
482

I
Ingo Molnar 已提交
483
	/*
484
	 * Count user and OS events unless requested not to.
I
Ingo Molnar 已提交
485
	 */
486
	if (!attr->exclude_user)
487
		hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
488
	if (!attr->exclude_kernel)
I
Ingo Molnar 已提交
489
		hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
490

491
	if (!hwc->sample_period) {
492
		hwc->sample_period = x86_pmu.max_period;
493
		hwc->last_period = hwc->sample_period;
494
		atomic64_set(&hwc->period_left, hwc->sample_period);
495 496 497 498
	} else {
		/*
		 * If we have a PMU initialized but no APIC
		 * interrupts, we cannot sample hardware
499 500
		 * events (user-space has to fall back and
		 * sample via a hrtimer based software event):
501 502 503
		 */
		if (!x86_pmu.apic)
			return -EOPNOTSUPP;
504
	}
505

I
Ingo Molnar 已提交
506
	/*
507
	 * Raw hw_event type provide the config in the hw_event structure
I
Ingo Molnar 已提交
508
	 */
509 510
	if (attr->type == PERF_TYPE_RAW) {
		hwc->config |= x86_pmu.raw_event(attr->config);
511 512 513
		if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
		    perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
			return -EACCES;
514
		return 0;
I
Ingo Molnar 已提交
515 516
	}

517 518 519 520 521
	if (attr->type == PERF_TYPE_HW_CACHE)
		return set_ext_hw_attr(hwc, attr);

	if (attr->config >= x86_pmu.max_events)
		return -EINVAL;
522

523 524 525
	/*
	 * The generic map:
	 */
526 527 528 529 530 531 532 533
	config = x86_pmu.event_map(attr->config);

	if (config == 0)
		return -ENOENT;

	if (config == -1LL)
		return -EINVAL;

534 535 536 537
	/*
	 * Branch tracing:
	 */
	if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
538 539 540 541 542 543 544 545 546
	    (hwc->sample_period == 1)) {
		/* BTS is not supported by this architecture. */
		if (!bts_available())
			return -EOPNOTSUPP;

		/* BTS is currently only allowed for user-mode. */
		if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
			return -EOPNOTSUPP;
	}
547

548
	hwc->config |= config;
P
Peter Zijlstra 已提交
549

I
Ingo Molnar 已提交
550 551 552
	return 0;
}

553
static void x86_pmu_disable_all(void)
554
{
555
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
556 557
	int idx;

558
	for (idx = 0; idx < x86_pmu.num_events; idx++) {
559 560
		u64 val;

561
		if (!test_bit(idx, cpuc->active_mask))
562
			continue;
563
		rdmsrl(x86_pmu.eventsel + idx, val);
564
		if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
565
			continue;
566
		val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
567
		wrmsrl(x86_pmu.eventsel + idx, val);
568 569 570
	}
}

571
void hw_perf_disable(void)
572
{
573 574
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

575
	if (!x86_pmu_initialized())
576
		return;
577

578 579 580 581 582 583
	if (!cpuc->enabled)
		return;

	cpuc->n_added = 0;
	cpuc->enabled = 0;
	barrier();
584 585

	x86_pmu.disable_all();
586
}
I
Ingo Molnar 已提交
587

588
static void x86_pmu_enable_all(void)
589
{
590
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
591 592
	int idx;

593 594
	for (idx = 0; idx < x86_pmu.num_events; idx++) {
		struct perf_event *event = cpuc->events[idx];
595
		u64 val;
596

597
		if (!test_bit(idx, cpuc->active_mask))
598
			continue;
599

600
		val = event->hw.config;
601
		val |= ARCH_PERFMON_EVENTSEL_ENABLE;
602
		wrmsrl(x86_pmu.eventsel + idx, val);
603 604 605
	}
}

606 607 608 609 610 611 612 613 614
static const struct pmu pmu;

static inline int is_x86_event(struct perf_event *event)
{
	return event->pmu == &pmu;
}

static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
615
	struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
616
	unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
617
	int i, j, w, wmax, num = 0;
618 619 620 621 622
	struct hw_perf_event *hwc;

	bitmap_zero(used_mask, X86_PMC_IDX_MAX);

	for (i = 0; i < n; i++) {
623 624
		c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
		constraints[i] = c;
625 626
	}

627 628 629
	/*
	 * fastpath, try to reuse previous register
	 */
630
	for (i = 0; i < n; i++) {
631
		hwc = &cpuc->event_list[i]->hw;
632
		c = constraints[i];
633 634 635 636 637 638

		/* never assigned */
		if (hwc->idx == -1)
			break;

		/* constraint still honored */
639
		if (!test_bit(hwc->idx, c->idxmsk))
640 641 642 643 644 645
			break;

		/* not already used */
		if (test_bit(hwc->idx, used_mask))
			break;

P
Peter Zijlstra 已提交
646
		__set_bit(hwc->idx, used_mask);
647 648 649
		if (assign)
			assign[i] = hwc->idx;
	}
650
	if (i == n)
651 652 653 654 655 656 657 658
		goto done;

	/*
	 * begin slow path
	 */

	bitmap_zero(used_mask, X86_PMC_IDX_MAX);

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
	/*
	 * weight = number of possible counters
	 *
	 * 1    = most constrained, only works on one counter
	 * wmax = least constrained, works on any counter
	 *
	 * assign events to counters starting with most
	 * constrained events.
	 */
	wmax = x86_pmu.num_events;

	/*
	 * when fixed event counters are present,
	 * wmax is incremented by 1 to account
	 * for one more choice
	 */
	if (x86_pmu.num_events_fixed)
		wmax++;

678
	for (w = 1, num = n; num && w <= wmax; w++) {
679
		/* for each event */
680
		for (i = 0; num && i < n; i++) {
681
			c = constraints[i];
682 683
			hwc = &cpuc->event_list[i]->hw;

684
			if (c->weight != w)
685 686
				continue;

687
			for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
688 689 690 691 692 693 694
				if (!test_bit(j, used_mask))
					break;
			}

			if (j == X86_PMC_IDX_MAX)
				break;

P
Peter Zijlstra 已提交
695
			__set_bit(j, used_mask);
696

697 698 699 700 701
			if (assign)
				assign[i] = j;
			num--;
		}
	}
702
done:
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
	/*
	 * scheduling failed or is just a simulation,
	 * free resources if necessary
	 */
	if (!assign || num) {
		for (i = 0; i < n; i++) {
			if (x86_pmu.put_event_constraints)
				x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
		}
	}
	return num ? -ENOSPC : 0;
}

/*
 * dogrp: true if must collect siblings events (group)
 * returns total number of events and error code
 */
static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{
	struct perf_event *event;
	int n, max_count;

	max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;

	/* current number of events already accepted */
	n = cpuc->n_events;

	if (is_x86_event(leader)) {
		if (n >= max_count)
			return -ENOSPC;
		cpuc->event_list[n] = leader;
		n++;
	}
	if (!dogrp)
		return n;

	list_for_each_entry(event, &leader->sibling_list, group_entry) {
		if (!is_x86_event(event) ||
741
		    event->state <= PERF_EVENT_STATE_OFF)
742 743 744 745 746 747 748 749 750 751 752 753
			continue;

		if (n >= max_count)
			return -ENOSPC;

		cpuc->event_list[n] = event;
		n++;
	}
	return n;
}

static inline void x86_assign_hw_event(struct perf_event *event,
754
				struct cpu_hw_events *cpuc, int i)
755
{
756 757 758 759 760
	struct hw_perf_event *hwc = &event->hw;

	hwc->idx = cpuc->assign[i];
	hwc->last_cpu = smp_processor_id();
	hwc->last_tag = ++cpuc->tags[i];
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

	if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
		hwc->config_base = 0;
		hwc->event_base	= 0;
	} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
		hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
		/*
		 * We set it so that event_base + idx in wrmsr/rdmsr maps to
		 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
		 */
		hwc->event_base =
			MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
	} else {
		hwc->config_base = x86_pmu.eventsel;
		hwc->event_base  = x86_pmu.perfctr;
	}
}

779 780 781 782 783 784 785 786 787
static inline int match_prev_assignment(struct hw_perf_event *hwc,
					struct cpu_hw_events *cpuc,
					int i)
{
	return hwc->idx == cpuc->assign[i] &&
		hwc->last_cpu == smp_processor_id() &&
		hwc->last_tag == cpuc->tags[i];
}

P
Peter Zijlstra 已提交
788
static int x86_pmu_start(struct perf_event *event);
789
static void x86_pmu_stop(struct perf_event *event);
790

791
void hw_perf_enable(void)
792
{
793 794 795 796 797
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct perf_event *event;
	struct hw_perf_event *hwc;
	int i;

798
	if (!x86_pmu_initialized())
799
		return;
800 801 802 803

	if (cpuc->enabled)
		return;

804
	if (cpuc->n_added) {
805
		int n_running = cpuc->n_events - cpuc->n_added;
806 807 808 809 810 811 812
		/*
		 * apply assignment obtained either from
		 * hw_perf_group_sched_in() or x86_pmu_enable()
		 *
		 * step1: save events moving to new counters
		 * step2: reprogram moved events into new counters
		 */
813
		for (i = 0; i < n_running; i++) {
814 815 816
			event = cpuc->event_list[i];
			hwc = &event->hw;

817 818 819 820 821 822 823 824
			/*
			 * we can avoid reprogramming counter if:
			 * - assigned same counter as last time
			 * - running on same CPU as last time
			 * - no other event has used the counter since
			 */
			if (hwc->idx == -1 ||
			    match_prev_assignment(hwc, cpuc, i))
825 826
				continue;

827
			x86_pmu_stop(event);
828 829 830 831 832 833
		}

		for (i = 0; i < cpuc->n_events; i++) {
			event = cpuc->event_list[i];
			hwc = &event->hw;

834
			if (!match_prev_assignment(hwc, cpuc, i))
835
				x86_assign_hw_event(event, cpuc, i);
836 837
			else if (i < n_running)
				continue;
838

P
Peter Zijlstra 已提交
839
			x86_pmu_start(event);
840 841 842 843
		}
		cpuc->n_added = 0;
		perf_events_lapic_init();
	}
844 845 846 847

	cpuc->enabled = 1;
	barrier();

848
	x86_pmu.enable_all();
849 850
}

851
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
852
{
853
	(void)checking_wrmsrl(hwc->config_base + hwc->idx,
854
			      hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
855 856
}

857
static inline void x86_pmu_disable_event(struct perf_event *event)
858
{
859 860
	struct hw_perf_event *hwc = &event->hw;
	(void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
861 862
}

863
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
I
Ingo Molnar 已提交
864

865 866
/*
 * Set the next IRQ period, based on the hwc->period_left value.
867
 * To be called with the event disabled in hw:
868
 */
869
static int
870
x86_perf_event_set_period(struct perf_event *event)
I
Ingo Molnar 已提交
871
{
872
	struct hw_perf_event *hwc = &event->hw;
873
	s64 left = atomic64_read(&hwc->period_left);
874
	s64 period = hwc->sample_period;
875
	int err, ret = 0, idx = hwc->idx;
876

877 878 879
	if (idx == X86_PMC_IDX_FIXED_BTS)
		return 0;

880
	/*
881
	 * If we are way outside a reasonable range then just skip forward:
882 883 884 885
	 */
	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
886
		hwc->last_period = period;
887
		ret = 1;
888 889 890 891 892
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_set(&hwc->period_left, left);
893
		hwc->last_period = period;
894
		ret = 1;
895
	}
896
	/*
897
	 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
898 899 900
	 */
	if (unlikely(left < 2))
		left = 2;
I
Ingo Molnar 已提交
901

902 903 904
	if (left > x86_pmu.max_period)
		left = x86_pmu.max_period;

905
	per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
906 907

	/*
908
	 * The hw event starts counting from this event offset,
909 910
	 * mark it to be able to extra future deltas:
	 */
911
	atomic64_set(&hwc->prev_count, (u64)-left);
912

913 914
	err = checking_wrmsrl(hwc->event_base + idx,
			     (u64)(-left) & x86_pmu.event_mask);
915

916
	perf_event_update_userpage(event);
917

918
	return ret;
919 920
}

921
static void x86_pmu_enable_event(struct perf_event *event)
922
{
923
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
924
	if (cpuc->enabled)
925
		__x86_pmu_enable_event(&event->hw);
I
Ingo Molnar 已提交
926 927
}

928
/*
929 930 931 932 933 934 935
 * activate a single event
 *
 * The event is added to the group of enabled events
 * but only if it can be scehduled with existing events.
 *
 * Called with PMU disabled. If successful and return value 1,
 * then guaranteed to call perf_enable() and hw_perf_enable()
936 937 938 939
 */
static int x86_pmu_enable(struct perf_event *event)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
940 941 942
	struct hw_perf_event *hwc;
	int assign[X86_PMC_IDX_MAX];
	int n, n0, ret;
943

944
	hwc = &event->hw;
945

946 947 948 949
	n0 = cpuc->n_events;
	n = collect_events(cpuc, event, false);
	if (n < 0)
		return n;
950

951 952 953 954 955 956 957 958
	ret = x86_schedule_events(cpuc, n, assign);
	if (ret)
		return ret;
	/*
	 * copy new assignment, now we know it is possible
	 * will be used by hw_perf_enable()
	 */
	memcpy(cpuc->assign, assign, n*sizeof(int));
959

960
	cpuc->n_events = n;
961
	cpuc->n_added += n - n0;
962 963

	return 0;
I
Ingo Molnar 已提交
964 965
}

966 967
static int x86_pmu_start(struct perf_event *event)
{
P
Peter Zijlstra 已提交
968 969 970 971
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	int idx = event->hw.idx;

	if (idx == -1)
972 973
		return -EAGAIN;

974
	x86_perf_event_set_period(event);
P
Peter Zijlstra 已提交
975 976
	cpuc->events[idx] = event;
	__set_bit(idx, cpuc->active_mask);
977
	x86_pmu.enable(event);
P
Peter Zijlstra 已提交
978
	perf_event_update_userpage(event);
979 980 981 982

	return 0;
}

983
static void x86_pmu_unthrottle(struct perf_event *event)
984
{
985 986
	int ret = x86_pmu_start(event);
	WARN_ON_ONCE(ret);
987 988
}

989
void perf_event_print_debug(void)
I
Ingo Molnar 已提交
990
{
991
	u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
992
	struct cpu_hw_events *cpuc;
993
	unsigned long flags;
994 995
	int cpu, idx;

996
	if (!x86_pmu.num_events)
997
		return;
I
Ingo Molnar 已提交
998

999
	local_irq_save(flags);
I
Ingo Molnar 已提交
1000 1001

	cpu = smp_processor_id();
1002
	cpuc = &per_cpu(cpu_hw_events, cpu);
I
Ingo Molnar 已提交
1003

1004
	if (x86_pmu.version >= 2) {
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
		rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
		rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
		rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
		rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);

		pr_info("\n");
		pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
		pr_info("CPU#%d: status:     %016llx\n", cpu, status);
		pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
		pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1015
	}
1016
	pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
I
Ingo Molnar 已提交
1017

1018
	for (idx = 0; idx < x86_pmu.num_events; idx++) {
1019 1020
		rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
		rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
I
Ingo Molnar 已提交
1021

1022
		prev_left = per_cpu(pmc_prev_left[idx], cpu);
I
Ingo Molnar 已提交
1023

1024
		pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
I
Ingo Molnar 已提交
1025
			cpu, idx, pmc_ctrl);
1026
		pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
I
Ingo Molnar 已提交
1027
			cpu, idx, pmc_count);
1028
		pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1029
			cpu, idx, prev_left);
I
Ingo Molnar 已提交
1030
	}
1031
	for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1032 1033
		rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);

1034
		pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1035 1036
			cpu, idx, pmc_count);
	}
1037
	local_irq_restore(flags);
I
Ingo Molnar 已提交
1038 1039
}

1040
static void x86_pmu_stop(struct perf_event *event)
I
Ingo Molnar 已提交
1041
{
1042
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1043
	struct hw_perf_event *hwc = &event->hw;
1044
	int idx = hwc->idx;
I
Ingo Molnar 已提交
1045

1046 1047 1048
	if (!__test_and_clear_bit(idx, cpuc->active_mask))
		return;

1049
	x86_pmu.disable(event);
I
Ingo Molnar 已提交
1050

1051
	/*
1052
	 * Drain the remaining delta count out of a event
1053 1054
	 * that we are disabling:
	 */
1055
	x86_perf_event_update(event);
1056

1057
	cpuc->events[idx] = NULL;
1058 1059 1060 1061 1062 1063 1064
}

static void x86_pmu_disable(struct perf_event *event)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	int i;

1065
	x86_pmu_stop(event);
1066

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
	for (i = 0; i < cpuc->n_events; i++) {
		if (event == cpuc->event_list[i]) {

			if (x86_pmu.put_event_constraints)
				x86_pmu.put_event_constraints(cpuc, event);

			while (++i < cpuc->n_events)
				cpuc->event_list[i-1] = cpuc->event_list[i];

			--cpuc->n_events;
1077
			break;
1078 1079
		}
	}
1080
	perf_event_update_userpage(event);
I
Ingo Molnar 已提交
1081 1082
}

1083
static int x86_pmu_handle_irq(struct pt_regs *regs)
1084
{
1085
	struct perf_sample_data data;
1086 1087 1088
	struct cpu_hw_events *cpuc;
	struct perf_event *event;
	struct hw_perf_event *hwc;
V
Vince Weaver 已提交
1089
	int idx, handled = 0;
1090 1091
	u64 val;

1092
	perf_sample_data_init(&data, 0);
1093

1094
	cpuc = &__get_cpu_var(cpu_hw_events);
1095

1096
	for (idx = 0; idx < x86_pmu.num_events; idx++) {
1097
		if (!test_bit(idx, cpuc->active_mask))
1098
			continue;
1099

1100 1101
		event = cpuc->events[idx];
		hwc = &event->hw;
1102

1103
		val = x86_perf_event_update(event);
1104
		if (val & (1ULL << (x86_pmu.event_bits - 1)))
1105
			continue;
1106

1107
		/*
1108
		 * event overflow
1109 1110
		 */
		handled		= 1;
1111
		data.period	= event->hw.last_period;
1112

1113
		if (!x86_perf_event_set_period(event))
1114 1115
			continue;

1116
		if (perf_event_overflow(event, 1, &data, regs))
1117
			x86_pmu_stop(event);
1118
	}
1119

1120 1121 1122
	if (handled)
		inc_irq_stat(apic_perf_irqs);

1123 1124
	return handled;
}
1125

1126 1127 1128 1129 1130
void smp_perf_pending_interrupt(struct pt_regs *regs)
{
	irq_enter();
	ack_APIC_irq();
	inc_irq_stat(apic_pending_irqs);
1131
	perf_event_do_pending();
1132 1133 1134
	irq_exit();
}

1135
void set_perf_event_pending(void)
1136
{
1137
#ifdef CONFIG_X86_LOCAL_APIC
1138 1139 1140
	if (!x86_pmu.apic || !x86_pmu_initialized())
		return;

1141
	apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1142
#endif
1143 1144
}

1145
void perf_events_lapic_init(void)
I
Ingo Molnar 已提交
1146
{
1147 1148
#ifdef CONFIG_X86_LOCAL_APIC
	if (!x86_pmu.apic || !x86_pmu_initialized())
I
Ingo Molnar 已提交
1149
		return;
1150

I
Ingo Molnar 已提交
1151
	/*
1152
	 * Always use NMI for PMU
I
Ingo Molnar 已提交
1153
	 */
1154
	apic_write(APIC_LVTPC, APIC_DM_NMI);
1155
#endif
I
Ingo Molnar 已提交
1156 1157 1158
}

static int __kprobes
1159
perf_event_nmi_handler(struct notifier_block *self,
I
Ingo Molnar 已提交
1160 1161 1162 1163
			 unsigned long cmd, void *__args)
{
	struct die_args *args = __args;
	struct pt_regs *regs;
1164

1165
	if (!atomic_read(&active_events))
1166 1167
		return NOTIFY_DONE;

1168 1169 1170 1171
	switch (cmd) {
	case DIE_NMI:
	case DIE_NMI_IPI:
		break;
I
Ingo Molnar 已提交
1172

1173
	default:
I
Ingo Molnar 已提交
1174
		return NOTIFY_DONE;
1175
	}
I
Ingo Molnar 已提交
1176 1177 1178

	regs = args->regs;

1179
#ifdef CONFIG_X86_LOCAL_APIC
I
Ingo Molnar 已提交
1180
	apic_write(APIC_LVTPC, APIC_DM_NMI);
1181
#endif
1182 1183
	/*
	 * Can't rely on the handled return value to say it was our NMI, two
1184
	 * events could trigger 'simultaneously' raising two back-to-back NMIs.
1185 1186 1187 1188
	 *
	 * If the first NMI handles both, the latter will be empty and daze
	 * the CPU.
	 */
1189
	x86_pmu.handle_irq(regs);
I
Ingo Molnar 已提交
1190

1191
	return NOTIFY_STOP;
I
Ingo Molnar 已提交
1192 1193
}

1194 1195 1196 1197 1198 1199
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
	.notifier_call		= perf_event_nmi_handler,
	.next			= NULL,
	.priority		= 1
};

1200
static struct event_constraint unconstrained;
1201
static struct event_constraint emptyconstraint;
1202 1203

static struct event_constraint *
1204
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1205
{
1206
	struct event_constraint *c;
1207 1208 1209

	if (x86_pmu.event_constraints) {
		for_each_event_constraint(c, x86_pmu.event_constraints) {
1210 1211
			if ((event->hw.config & c->cmask) == c->code)
				return c;
1212 1213
		}
	}
1214 1215

	return &unconstrained;
1216 1217 1218
}

static int x86_event_sched_in(struct perf_event *event,
1219
			  struct perf_cpu_context *cpuctx)
1220 1221 1222 1223
{
	int ret = 0;

	event->state = PERF_EVENT_STATE_ACTIVE;
1224
	event->oncpu = smp_processor_id();
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	event->tstamp_running += event->ctx->time - event->tstamp_stopped;

	if (!is_x86_event(event))
		ret = event->pmu->enable(event);

	if (!ret && !is_software_event(event))
		cpuctx->active_oncpu++;

	if (!ret && event->attr.exclusive)
		cpuctx->exclusive = 1;

	return ret;
}

static void x86_event_sched_out(struct perf_event *event,
1240
			    struct perf_cpu_context *cpuctx)
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
{
	event->state = PERF_EVENT_STATE_INACTIVE;
	event->oncpu = -1;

	if (!is_x86_event(event))
		event->pmu->disable(event);

	event->tstamp_running -= event->ctx->time - event->tstamp_stopped;

	if (!is_software_event(event))
		cpuctx->active_oncpu--;

	if (event->attr.exclusive || !cpuctx->active_oncpu)
		cpuctx->exclusive = 0;
}

/*
 * Called to enable a whole group of events.
 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
 * Assumes the caller has disabled interrupts and has
 * frozen the PMU with hw_perf_save_disable.
 *
 * called with PMU disabled. If successful and return value 1,
 * then guaranteed to call perf_enable() and hw_perf_enable()
 */
int hw_perf_group_sched_in(struct perf_event *leader,
	       struct perf_cpu_context *cpuctx,
1268
	       struct perf_event_context *ctx)
1269
{
1270
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
	struct perf_event *sub;
	int assign[X86_PMC_IDX_MAX];
	int n0, n1, ret;

	/* n0 = total number of events */
	n0 = collect_events(cpuc, leader, true);
	if (n0 < 0)
		return n0;

	ret = x86_schedule_events(cpuc, n0, assign);
	if (ret)
		return ret;

1284
	ret = x86_event_sched_in(leader, cpuctx);
1285 1286 1287 1288 1289
	if (ret)
		return ret;

	n1 = 1;
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1290
		if (sub->state > PERF_EVENT_STATE_OFF) {
1291
			ret = x86_event_sched_in(sub, cpuctx);
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
			if (ret)
				goto undo;
			++n1;
		}
	}
	/*
	 * copy new assignment, now we know it is possible
	 * will be used by hw_perf_enable()
	 */
	memcpy(cpuc->assign, assign, n0*sizeof(int));

	cpuc->n_events  = n0;
1304
	cpuc->n_added  += n1;
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	ctx->nr_active += n1;

	/*
	 * 1 means successful and events are active
	 * This is not quite true because we defer
	 * actual activation until hw_perf_enable() but
	 * this way we* ensure caller won't try to enable
	 * individual events
	 */
	return 1;
undo:
1316
	x86_event_sched_out(leader, cpuctx);
1317 1318 1319
	n0  = 1;
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
		if (sub->state == PERF_EVENT_STATE_ACTIVE) {
1320
			x86_event_sched_out(sub, cpuctx);
1321 1322 1323 1324 1325 1326 1327
			if (++n0 == n1)
				break;
		}
	}
	return ret;
}

1328 1329 1330
#include "perf_event_amd.c"
#include "perf_event_p6.c"
#include "perf_event_intel.c"
1331

1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
static int __cpuinit
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_UP_PREPARE:
		if (x86_pmu.cpu_prepare)
			x86_pmu.cpu_prepare(cpu);
		break;

	case CPU_STARTING:
		if (x86_pmu.cpu_starting)
			x86_pmu.cpu_starting(cpu);
		break;

	case CPU_DYING:
		if (x86_pmu.cpu_dying)
			x86_pmu.cpu_dying(cpu);
		break;

	case CPU_DEAD:
		if (x86_pmu.cpu_dead)
			x86_pmu.cpu_dead(cpu);
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
static void __init pmu_check_apic(void)
{
	if (cpu_has_apic)
		return;

	x86_pmu.apic = 0;
	pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
	pr_info("no hardware sampling interrupt available.\n");
}

1375
void __init init_hw_perf_events(void)
1376
{
1377
	struct event_constraint *c;
1378 1379
	int err;

1380
	pr_info("Performance Events: ");
1381

1382 1383
	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_INTEL:
1384
		err = intel_pmu_init();
1385
		break;
1386
	case X86_VENDOR_AMD:
1387
		err = amd_pmu_init();
1388
		break;
1389 1390
	default:
		return;
1391
	}
1392
	if (err != 0) {
1393
		pr_cont("no PMU driver, software events only.\n");
1394
		return;
1395
	}
1396

1397 1398
	pmu_check_apic();

1399
	pr_cont("%s PMU driver.\n", x86_pmu.name);
1400

1401 1402 1403 1404
	if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
		WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
		     x86_pmu.num_events, X86_PMC_MAX_GENERIC);
		x86_pmu.num_events = X86_PMC_MAX_GENERIC;
I
Ingo Molnar 已提交
1405
	}
1406 1407
	perf_event_mask = (1 << x86_pmu.num_events) - 1;
	perf_max_events = x86_pmu.num_events;
I
Ingo Molnar 已提交
1408

1409 1410 1411 1412
	if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
		WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
		     x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
		x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
1413
	}
1414

1415 1416 1417
	perf_event_mask |=
		((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
	x86_pmu.intel_ctrl = perf_event_mask;
I
Ingo Molnar 已提交
1418

1419 1420
	perf_events_lapic_init();
	register_die_notifier(&perf_event_nmi_notifier);
1421

1422
	unconstrained = (struct event_constraint)
1423 1424
		__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
				   0, x86_pmu.num_events);
1425

1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
	if (x86_pmu.event_constraints) {
		for_each_event_constraint(c, x86_pmu.event_constraints) {
			if (c->cmask != INTEL_ARCH_FIXED_MASK)
				continue;

			c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
			c->weight += x86_pmu.num_events;
		}
	}

I
Ingo Molnar 已提交
1436 1437 1438 1439 1440 1441 1442
	pr_info("... version:                %d\n",     x86_pmu.version);
	pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
	pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
	pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
	pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
	pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
	pr_info("... event mask:             %016Lx\n", perf_event_mask);
1443 1444

	perf_cpu_notifier(x86_pmu_notifier);
I
Ingo Molnar 已提交
1445
}
I
Ingo Molnar 已提交
1446

1447
static inline void x86_pmu_read(struct perf_event *event)
1448
{
1449
	x86_perf_event_update(event);
1450 1451
}

1452 1453 1454
static const struct pmu pmu = {
	.enable		= x86_pmu_enable,
	.disable	= x86_pmu_disable,
1455 1456
	.start		= x86_pmu_start,
	.stop		= x86_pmu_stop,
1457
	.read		= x86_pmu_read,
1458
	.unthrottle	= x86_pmu_unthrottle,
I
Ingo Molnar 已提交
1459 1460
};

1461 1462 1463 1464
/*
 * validate a single event group
 *
 * validation include:
1465 1466 1467
 *	- check events are compatible which each other
 *	- events do not compete for the same counter
 *	- number of events <= number of counters
1468 1469 1470 1471
 *
 * validation ensures the group can be loaded onto the
 * PMU if it was the only group available.
 */
1472 1473
static int validate_group(struct perf_event *event)
{
1474
	struct perf_event *leader = event->group_leader;
1475 1476
	struct cpu_hw_events *fake_cpuc;
	int ret, n;
1477

1478 1479 1480 1481
	ret = -ENOMEM;
	fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
	if (!fake_cpuc)
		goto out;
1482

1483 1484 1485 1486 1487 1488
	/*
	 * the event is not yet connected with its
	 * siblings therefore we must first collect
	 * existing siblings, then add the new event
	 * before we can simulate the scheduling
	 */
1489 1490
	ret = -ENOSPC;
	n = collect_events(fake_cpuc, leader, true);
1491
	if (n < 0)
1492
		goto out_free;
1493

1494 1495
	fake_cpuc->n_events = n;
	n = collect_events(fake_cpuc, event, false);
1496
	if (n < 0)
1497
		goto out_free;
1498

1499
	fake_cpuc->n_events = n;
1500

1501 1502 1503 1504 1505 1506
	ret = x86_schedule_events(fake_cpuc, n, NULL);

out_free:
	kfree(fake_cpuc);
out:
	return ret;
1507 1508
}

1509
const struct pmu *hw_perf_event_init(struct perf_event *event)
I
Ingo Molnar 已提交
1510
{
1511
	const struct pmu *tmp;
I
Ingo Molnar 已提交
1512 1513
	int err;

1514
	err = __hw_perf_event_init(event);
1515
	if (!err) {
1516 1517 1518 1519 1520 1521 1522 1523
		/*
		 * we temporarily connect event to its pmu
		 * such that validate_group() can classify
		 * it as an x86 event using is_x86_event()
		 */
		tmp = event->pmu;
		event->pmu = &pmu;

1524 1525
		if (event->group_leader != event)
			err = validate_group(event);
1526 1527

		event->pmu = tmp;
1528
	}
1529
	if (err) {
1530 1531
		if (event->destroy)
			event->destroy(event);
1532
		return ERR_PTR(err);
1533
	}
I
Ingo Molnar 已提交
1534

1535
	return &pmu;
I
Ingo Molnar 已提交
1536
}
1537 1538 1539 1540 1541 1542

/*
 * callchain support
 */

static inline
1543
void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1544
{
1545
	if (entry->nr < PERF_MAX_STACK_DEPTH)
1546 1547 1548
		entry->ip[entry->nr++] = ip;
}

1549 1550
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565


static void
backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
{
	/* Ignore warnings */
}

static void backtrace_warning(void *data, char *msg)
{
	/* Ignore warnings */
}

static int backtrace_stack(void *data, char *name)
{
1566
	return 0;
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
}

static void backtrace_address(void *data, unsigned long addr, int reliable)
{
	struct perf_callchain_entry *entry = data;

	if (reliable)
		callchain_store(entry, addr);
}

static const struct stacktrace_ops backtrace_ops = {
	.warning		= backtrace_warning,
	.warning_symbol		= backtrace_warning_symbol,
	.stack			= backtrace_stack,
	.address		= backtrace_address,
1582
	.walk_stack		= print_context_stack_bp,
1583 1584
};

1585 1586
#include "../dumpstack.h"

1587 1588 1589
static void
perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
1590
	callchain_store(entry, PERF_CONTEXT_KERNEL);
1591
	callchain_store(entry, regs->ip);
1592

1593
	dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
1594 1595
}

1596 1597 1598 1599 1600
/*
 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
 */
static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1601
{
1602 1603 1604 1605 1606
	unsigned long offset, addr = (unsigned long)from;
	int type = in_nmi() ? KM_NMI : KM_IRQ0;
	unsigned long size, len = 0;
	struct page *page;
	void *map;
1607 1608
	int ret;

1609 1610 1611 1612
	do {
		ret = __get_user_pages_fast(addr, 1, 0, &page);
		if (!ret)
			break;
1613

1614 1615
		offset = addr & (PAGE_SIZE - 1);
		size = min(PAGE_SIZE - offset, n - len);
1616

1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
		map = kmap_atomic(page, type);
		memcpy(to, map+offset, size);
		kunmap_atomic(map, type);
		put_page(page);

		len  += size;
		to   += size;
		addr += size;

	} while (len < n);

	return len;
}

static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
{
	unsigned long bytes;

	bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));

	return bytes == sizeof(*frame);
1638 1639 1640 1641 1642 1643 1644 1645
}

static void
perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
	struct stack_frame frame;
	const void __user *fp;

1646 1647 1648
	if (!user_mode(regs))
		regs = task_pt_regs(current);

1649
	fp = (void __user *)regs->bp;
1650

1651
	callchain_store(entry, PERF_CONTEXT_USER);
1652 1653
	callchain_store(entry, regs->ip);

1654
	while (entry->nr < PERF_MAX_STACK_DEPTH) {
1655
		frame.next_frame	     = NULL;
1656 1657 1658 1659 1660
		frame.return_address = 0;

		if (!copy_stack_frame(fp, &frame))
			break;

1661
		if ((unsigned long)fp < regs->sp)
1662 1663 1664
			break;

		callchain_store(entry, frame.return_address);
1665
		fp = frame.next_frame;
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
	}
}

static void
perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
	int is_user;

	if (!regs)
		return;

	is_user = user_mode(regs);

	if (is_user && current->state != TASK_RUNNING)
		return;

	if (!is_user)
		perf_callchain_kernel(regs, entry);

	if (current->mm)
		perf_callchain_user(regs, entry);
}

struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
	struct perf_callchain_entry *entry;

	if (in_nmi())
1694
		entry = &__get_cpu_var(pmc_nmi_entry);
1695
	else
1696
		entry = &__get_cpu_var(pmc_irq_entry);
1697 1698 1699 1700 1701 1702 1703

	entry->nr = 0;

	perf_do_callchain(regs, entry);

	return entry;
}
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715

void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
{
	regs->ip = ip;
	/*
	 * perf_arch_fetch_caller_regs adds another call, we need to increment
	 * the skip level
	 */
	regs->bp = rewind_frame_pointer(skip + 1);
	regs->cs = __KERNEL_CS;
	local_save_flags(regs->flags);
}