perf_event_amd.c 21.5 KB
Newer Older
1
#include <linux/perf_event.h>
2
#include <linux/export.h>
3 4 5
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
6
#include <asm/apicdef.h>
7 8

#include "perf_event.h"
9

10
static __initconst const u64 amd_hw_cache_event_ids
11 12 13 14 15 16 17
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
 [ C(L1D) ] = {
	[ C(OP_READ) ] = {
		[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
18
		[ C(RESULT_MISS)   ] = 0x0141, /* Data Cache Misses          */
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
		[ C(RESULT_MISS)   ] = 0,
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
		[ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
	},
 },
 [ C(L1I ) ] = {
	[ C(OP_READ) ] = {
		[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
		[ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = -1,
		[ C(RESULT_MISS)   ] = -1,
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
		[ C(RESULT_MISS)   ] = 0,
	},
 },
 [ C(LL  ) ] = {
	[ C(OP_READ) ] = {
		[ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
		[ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
		[ C(RESULT_MISS)   ] = 0,
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = 0,
		[ C(RESULT_MISS)   ] = 0,
	},
 },
 [ C(DTLB) ] = {
	[ C(OP_READ) ] = {
		[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
60
		[ C(RESULT_MISS)   ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
61 62 63 64 65 66 67 68 69 70 71 72 73
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = 0,
		[ C(RESULT_MISS)   ] = 0,
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = 0,
		[ C(RESULT_MISS)   ] = 0,
	},
 },
 [ C(ITLB) ] = {
	[ C(OP_READ) ] = {
		[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
74
		[ C(RESULT_MISS)   ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = -1,
		[ C(RESULT_MISS)   ] = -1,
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = -1,
		[ C(RESULT_MISS)   ] = -1,
	},
 },
 [ C(BPU ) ] = {
	[ C(OP_READ) ] = {
		[ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
		[ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = -1,
		[ C(RESULT_MISS)   ] = -1,
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = -1,
		[ C(RESULT_MISS)   ] = -1,
	},
 },
99 100 101 102 103 104 105 106 107 108 109 110 111 112
 [ C(NODE) ] = {
	[ C(OP_READ) ] = {
		[ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
		[ C(RESULT_MISS)   ] = 0x98e9, /* CPU Request to Memory, r   */
	},
	[ C(OP_WRITE) ] = {
		[ C(RESULT_ACCESS) ] = -1,
		[ C(RESULT_MISS)   ] = -1,
	},
	[ C(OP_PREFETCH) ] = {
		[ C(RESULT_ACCESS) ] = -1,
		[ C(RESULT_MISS)   ] = -1,
	},
 },
113 114 115 116 117 118 119
};

/*
 * AMD Performance Monitor K7 and later.
 */
static const u64 amd_perfmon_event_map[] =
{
120 121 122 123 124 125 126 127
  [PERF_COUNT_HW_CPU_CYCLES]			= 0x0076,
  [PERF_COUNT_HW_INSTRUCTIONS]			= 0x00c0,
  [PERF_COUNT_HW_CACHE_REFERENCES]		= 0x0080,
  [PERF_COUNT_HW_CACHE_MISSES]			= 0x0081,
  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]		= 0x00c2,
  [PERF_COUNT_HW_BRANCH_MISSES]			= 0x00c3,
  [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= 0x00d0, /* "Decoder empty" event */
  [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= 0x00d1, /* "Dispatch stalls" event */
128 129 130 131 132 133 134
};

static u64 amd_pmu_event_map(int hw_event)
{
	return amd_perfmon_event_map[hw_event];
}

135 136
static struct event_constraint *amd_nb_event_constraint;

137 138 139 140 141
/*
 * Previously calculated offsets
 */
static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
142
static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
143 144 145 146 147 148 149

/*
 * Legacy CPUs:
 *   4 counters starting at 0xc0010000 each offset by 1
 *
 * CPUs with core performance counter extensions:
 *   6 counters starting at 0xc0010200 each offset by 2
150 151 152 153
 *
 * CPUs with north bridge performance counter extensions:
 *   4 additional counters starting at 0xc0010240 each offset by 2
 *   (indexed right above either one of the above core counters)
154 155 156
 */
static inline int amd_pmu_addr_offset(int index, bool eventsel)
{
157
	int offset, first, base;
158 159 160 161 162 163 164 165 166 167 168 169

	if (!index)
		return index;

	if (eventsel)
		offset = event_offsets[index];
	else
		offset = count_offsets[index];

	if (offset)
		return offset;

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	if (amd_nb_event_constraint &&
	    test_bit(index, amd_nb_event_constraint->idxmsk)) {
		/*
		 * calculate the offset of NB counters with respect to
		 * base eventsel or perfctr
		 */

		first = find_first_bit(amd_nb_event_constraint->idxmsk,
				       X86_PMC_IDX_MAX);

		if (eventsel)
			base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
		else
			base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;

		offset = base + ((index - first) << 1);
	} else if (!cpu_has_perfctr_core)
187 188 189 190 191 192 193 194 195 196 197 198
		offset = index;
	else
		offset = index << 1;

	if (eventsel)
		event_offsets[index] = offset;
	else
		count_offsets[index] = offset;

	return offset;
}

199 200
static inline int amd_pmu_rdpmc_index(int index)
{
201
	int ret, first;
202

203 204
	if (!index)
		return index;
205

206
	ret = rdpmc_indexes[index];
207

208 209 210
	if (ret)
		return ret;

211 212 213 214 215 216 217 218 219 220 221 222 223 224
	if (amd_nb_event_constraint &&
	    test_bit(index, amd_nb_event_constraint->idxmsk)) {
		/*
		 * according to the mnual, ECX value of the NB counters is
		 * the index of the NB counter (0, 1, 2 or 3) plus 6
		 */

		first = find_first_bit(amd_nb_event_constraint->idxmsk,
				       X86_PMC_IDX_MAX);
		ret = index - first + 6;
	} else
		ret = index;

	rdpmc_indexes[index] = ret;
225

226 227 228 229 230
	return ret;
}

static int amd_core_hw_config(struct perf_event *event)
{
231 232 233 234 235 236 237 238 239
	if (event->attr.exclude_host && event->attr.exclude_guest)
		/*
		 * When HO == GO == 1 the hardware treats that as GO == HO == 0
		 * and will count in both modes. We don't want to count in that
		 * case so we emulate no-counting by setting US = OS = 0.
		 */
		event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
				      ARCH_PERFMON_EVENTSEL_OS);
	else if (event->attr.exclude_host)
240
		event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
241
	else if (event->attr.exclude_guest)
242
		event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
243

244 245
	return 0;
}
246

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
/*
 * NB counters do not support the following event select bits:
 *   Host/Guest only
 *   Counter mask
 *   Invert counter mask
 *   Edge detect
 *   OS/User mode
 */
static int amd_nb_hw_config(struct perf_event *event)
{
	/* for NB, we only allow system wide counting mode */
	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
		return -EINVAL;

	if (event->attr.exclude_user || event->attr.exclude_kernel ||
	    event->attr.exclude_host || event->attr.exclude_guest)
		return -EINVAL;

	event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
			      ARCH_PERFMON_EVENTSEL_OS);

	if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
				 ARCH_PERFMON_EVENTSEL_INT))
		return -EINVAL;
271 272

	return 0;
273 274 275 276 277
}

/*
 * AMD64 events are detected based on their event codes.
 */
278 279 280 281 282
static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
{
	return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
}

283 284 285 286 287
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
{
	return (hwc->config & 0xe0) == 0xe0;
}

288 289 290 291 292
static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
{
	return amd_nb_event_constraint && amd_is_nb_event(hwc);
}

293 294 295 296 297 298 299
static inline int amd_has_nb(struct cpu_hw_events *cpuc)
{
	struct amd_nb *nb = cpuc->amd_nb;

	return nb && nb->nb_id != -1;
}

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
static int amd_pmu_hw_config(struct perf_event *event)
{
	int ret;

	/* pass precise event sampling to ibs: */
	if (event->attr.precise_ip && get_ibs_caps())
		return -ENOENT;

	if (has_branch_stack(event))
		return -EOPNOTSUPP;

	ret = x86_pmu_hw_config(event);
	if (ret)
		return ret;

	if (event->attr.type == PERF_TYPE_RAW)
		event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;

	if (amd_is_perfctr_nb_event(&event->hw))
		return amd_nb_hw_config(event);

	return amd_core_hw_config(event);
}

324 325
static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
					   struct perf_event *event)
326 327 328 329 330 331 332 333 334 335 336 337
{
	struct amd_nb *nb = cpuc->amd_nb;
	int i;

	/*
	 * need to scan whole list because event may not have
	 * been assigned during scheduling
	 *
	 * no race condition possible because event can only
	 * be removed on one CPU at a time AND PMU is disabled
	 * when we come here
	 */
338
	for (i = 0; i < x86_pmu.num_counters; i++) {
339
		if (cmpxchg(nb->owners + i, event, NULL) == event)
340 341 342 343
			break;
	}
}

344 345 346 347 348 349 350 351 352 353 354 355 356
static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
{
	int core_id = cpu_data(smp_processor_id()).cpu_core_id;

	/* deliver interrupts only to this core */
	if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
		hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
		hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
		hwc->config |= (u64)(core_id) <<
			AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
	}
}

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
 /*
  * AMD64 NorthBridge events need special treatment because
  * counter access needs to be synchronized across all cores
  * of a package. Refer to BKDG section 3.12
  *
  * NB events are events measuring L3 cache, Hypertransport
  * traffic. They are identified by an event code >= 0xe00.
  * They measure events on the NorthBride which is shared
  * by all cores on a package. NB events are counted on a
  * shared set of counters. When a NB event is programmed
  * in a counter, the data actually comes from a shared
  * counter. Thus, access to those counters needs to be
  * synchronized.
  *
  * We implement the synchronization such that no two cores
  * can be measuring NB events using the same counters. Thus,
  * we maintain a per-NB allocation table. The available slot
  * is propagated using the event_constraint structure.
  *
  * We provide only one choice for each NB event based on
  * the fact that only NB events have restrictions. Consequently,
  * if a counter is available, there is a guarantee the NB event
  * will be assigned to it. If no slot is available, an empty
  * constraint is returned and scheduling will eventually fail
  * for this event.
  *
  * Note that all cores attached the same NB compete for the same
  * counters to host NB events, this is why we use atomic ops. Some
  * multi-chip CPUs may have more than one NB.
  *
  * Given that resources are allocated (cmpxchg), they must be
  * eventually freed for others to use. This is accomplished by
389
  * calling __amd_put_nb_event_constraints()
390 391 392 393
  *
  * Non NB events are not impacted by this restriction.
  */
static struct event_constraint *
394 395
__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
			       struct event_constraint *c)
396 397 398
{
	struct hw_perf_event *hwc = &event->hw;
	struct amd_nb *nb = cpuc->amd_nb;
399 400
	struct perf_event *old;
	int idx, new = -1;
401

402 403 404 405 406 407
	if (!c)
		c = &unconstrained;

	if (cpuc->is_fake)
		return c;

408 409 410 411 412 413 414 415 416 417
	/*
	 * detect if already present, if so reuse
	 *
	 * cannot merge with actual allocation
	 * because of possible holes
	 *
	 * event can already be present yet not assigned (in hwc->idx)
	 * because of successive calls to x86_schedule_events() from
	 * hw_perf_group_sched_in() without hw_perf_enable()
	 */
418
	for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
		if (new == -1 || hwc->idx == idx)
			/* assign free slot, prefer hwc->idx */
			old = cmpxchg(nb->owners + idx, NULL, event);
		else if (nb->owners[idx] == event)
			/* event already present */
			old = event;
		else
			continue;

		if (old && old != event)
			continue;

		/* reassign to this slot */
		if (new != -1)
			cmpxchg(nb->owners + new, event, NULL);
		new = idx;
435 436

		/* already present, reuse */
437
		if (old == event)
438
			break;
439 440 441 442 443
	}

	if (new == -1)
		return &emptyconstraint;

444 445 446
	if (amd_is_perfctr_nb_event(hwc))
		amd_nb_interrupt_hw_config(hwc);

447
	return &nb->event_constraints[new];
448 449
}

P
Peter Zijlstra 已提交
450
static struct amd_nb *amd_alloc_nb(int cpu)
451 452 453 454
{
	struct amd_nb *nb;
	int i;

455 456
	nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
			  cpu_to_node(cpu));
457 458 459
	if (!nb)
		return NULL;

P
Peter Zijlstra 已提交
460
	nb->nb_id = -1;
461 462 463 464

	/*
	 * initialize all possible NB constraints
	 */
465
	for (i = 0; i < x86_pmu.num_counters; i++) {
P
Peter Zijlstra 已提交
466
		__set_bit(i, nb->event_constraints[i].idxmsk);
467 468 469 470 471
		nb->event_constraints[i].weight = 1;
	}
	return nb;
}

472 473 474 475 476 477 478 479 480
static int amd_pmu_cpu_prepare(int cpu)
{
	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);

	WARN_ON_ONCE(cpuc->amd_nb);

	if (boot_cpu_data.x86_max_cores < 2)
		return NOTIFY_OK;

P
Peter Zijlstra 已提交
481
	cpuc->amd_nb = amd_alloc_nb(cpu);
482 483 484 485 486 487 488
	if (!cpuc->amd_nb)
		return NOTIFY_BAD;

	return NOTIFY_OK;
}

static void amd_pmu_cpu_starting(int cpu)
489
{
490 491
	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
	struct amd_nb *nb;
492 493
	int i, nb_id;

494
	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
495

496
	if (boot_cpu_data.x86_max_cores < 2)
497 498 499
		return;

	nb_id = amd_get_nb_id(cpu);
500
	WARN_ON_ONCE(nb_id == BAD_APICID);
501 502

	for_each_online_cpu(i) {
503 504
		nb = per_cpu(cpu_hw_events, i).amd_nb;
		if (WARN_ON_ONCE(!nb))
505 506
			continue;

507
		if (nb->nb_id == nb_id) {
508
			cpuc->kfree_on_online = cpuc->amd_nb;
509 510 511
			cpuc->amd_nb = nb;
			break;
		}
512
	}
513 514 515

	cpuc->amd_nb->nb_id = nb_id;
	cpuc->amd_nb->refcnt++;
516 517
}

518
static void amd_pmu_cpu_dead(int cpu)
519 520 521 522 523 524 525 526
{
	struct cpu_hw_events *cpuhw;

	if (boot_cpu_data.x86_max_cores < 2)
		return;

	cpuhw = &per_cpu(cpu_hw_events, cpu);

527
	if (cpuhw->amd_nb) {
528 529 530 531
		struct amd_nb *nb = cpuhw->amd_nb;

		if (nb->nb_id == -1 || --nb->refcnt == 0)
			kfree(nb);
532

533 534
		cpuhw->amd_nb = NULL;
	}
535 536
}

537 538 539 540 541 542 543 544 545
static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
	/*
	 * if not NB event or no NB, then no constraints
	 */
	if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
		return &unconstrained;

546 547
	return __amd_get_nb_event_constraints(cpuc, event,
					      amd_nb_event_constraint);
548 549 550 551 552 553 554 555 556
}

static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
				      struct perf_event *event)
{
	if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
		__amd_put_nb_event_constraints(cpuc, event);
}

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
PMU_FORMAT_ATTR(event,	"config:0-7,32-35");
PMU_FORMAT_ATTR(umask,	"config:8-15"	);
PMU_FORMAT_ATTR(edge,	"config:18"	);
PMU_FORMAT_ATTR(inv,	"config:23"	);
PMU_FORMAT_ATTR(cmask,	"config:24-31"	);

static struct attribute *amd_format_attr[] = {
	&format_attr_event.attr,
	&format_attr_umask.attr,
	&format_attr_edge.attr,
	&format_attr_inv.attr,
	&format_attr_cmask.attr,
	NULL,
};

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
/* AMD Family 15h */

#define AMD_EVENT_TYPE_MASK	0x000000F0ULL

#define AMD_EVENT_FP		0x00000000ULL ... 0x00000010ULL
#define AMD_EVENT_LS		0x00000020ULL ... 0x00000030ULL
#define AMD_EVENT_DC		0x00000040ULL ... 0x00000050ULL
#define AMD_EVENT_CU		0x00000060ULL ... 0x00000070ULL
#define AMD_EVENT_IC_DE		0x00000080ULL ... 0x00000090ULL
#define AMD_EVENT_EX_LS		0x000000C0ULL
#define AMD_EVENT_DE		0x000000D0ULL
#define AMD_EVENT_NB		0x000000E0ULL ... 0x000000F0ULL

/*
 * AMD family 15h event code/PMC mappings:
 *
 * type = event_code & 0x0F0:
 *
 * 0x000	FP	PERF_CTL[5:3]
 * 0x010	FP	PERF_CTL[5:3]
 * 0x020	LS	PERF_CTL[5:0]
 * 0x030	LS	PERF_CTL[5:0]
 * 0x040	DC	PERF_CTL[5:0]
 * 0x050	DC	PERF_CTL[5:0]
 * 0x060	CU	PERF_CTL[2:0]
 * 0x070	CU	PERF_CTL[2:0]
 * 0x080	IC/DE	PERF_CTL[2:0]
 * 0x090	IC/DE	PERF_CTL[2:0]
 * 0x0A0	---
 * 0x0B0	---
 * 0x0C0	EX/LS	PERF_CTL[5:0]
 * 0x0D0	DE	PERF_CTL[2:0]
 * 0x0E0	NB	NB_PERF_CTL[3:0]
 * 0x0F0	NB	NB_PERF_CTL[3:0]
 *
 * Exceptions:
 *
609
 * 0x000	FP	PERF_CTL[3], PERF_CTL[5:3] (*)
610
 * 0x003	FP	PERF_CTL[3]
611
 * 0x004	FP	PERF_CTL[3], PERF_CTL[5:3] (*)
612 613 614 615 616
 * 0x00B	FP	PERF_CTL[3]
 * 0x00D	FP	PERF_CTL[3]
 * 0x023	DE	PERF_CTL[2:0]
 * 0x02D	LS	PERF_CTL[3]
 * 0x02E	LS	PERF_CTL[3,0]
617
 * 0x031	LS	PERF_CTL[2:0] (**)
618 619 620 621 622 623 624 625 626 627 628 629 630
 * 0x043	CU	PERF_CTL[2:0]
 * 0x045	CU	PERF_CTL[2:0]
 * 0x046	CU	PERF_CTL[2:0]
 * 0x054	CU	PERF_CTL[2:0]
 * 0x055	CU	PERF_CTL[2:0]
 * 0x08F	IC	PERF_CTL[0]
 * 0x187	DE	PERF_CTL[0]
 * 0x188	DE	PERF_CTL[0]
 * 0x0DB	EX	PERF_CTL[5:0]
 * 0x0DC	LS	PERF_CTL[5:0]
 * 0x0DD	LS	PERF_CTL[5:0]
 * 0x0DE	LS	PERF_CTL[5:0]
 * 0x0DF	LS	PERF_CTL[5:0]
631
 * 0x1C0	EX	PERF_CTL[5:3]
632 633
 * 0x1D6	EX	PERF_CTL[5:0]
 * 0x1D8	EX	PERF_CTL[5:0]
634
 *
635 636
 * (*)  depending on the umask all FPU counters may be used
 * (**) only one unitmask enabled at a time
637 638 639 640 641
 */

static struct event_constraint amd_f15_PMC0  = EVENT_CONSTRAINT(0, 0x01, 0);
static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
static struct event_constraint amd_f15_PMC3  = EVENT_CONSTRAINT(0, 0x08, 0);
642
static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
643 644 645
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);

646 647 648
static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);

649 650 651
static struct event_constraint *
amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
{
652 653
	struct hw_perf_event *hwc = &event->hw;
	unsigned int event_code = amd_get_event_code(hwc);
654 655 656 657

	switch (event_code & AMD_EVENT_TYPE_MASK) {
	case AMD_EVENT_FP:
		switch (event_code) {
658 659 660 661 662 663 664 665 666 667
		case 0x000:
			if (!(hwc->config & 0x0000F000ULL))
				break;
			if (!(hwc->config & 0x00000F00ULL))
				break;
			return &amd_f15_PMC3;
		case 0x004:
			if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
				break;
			return &amd_f15_PMC3;
668 669 670 671 672
		case 0x003:
		case 0x00B:
		case 0x00D:
			return &amd_f15_PMC3;
		}
673
		return &amd_f15_PMC53;
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
	case AMD_EVENT_LS:
	case AMD_EVENT_DC:
	case AMD_EVENT_EX_LS:
		switch (event_code) {
		case 0x023:
		case 0x043:
		case 0x045:
		case 0x046:
		case 0x054:
		case 0x055:
			return &amd_f15_PMC20;
		case 0x02D:
			return &amd_f15_PMC3;
		case 0x02E:
			return &amd_f15_PMC30;
689 690 691 692 693 694
		case 0x031:
			if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
				return &amd_f15_PMC20;
			return &emptyconstraint;
		case 0x1C0:
			return &amd_f15_PMC53;
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
		default:
			return &amd_f15_PMC50;
		}
	case AMD_EVENT_CU:
	case AMD_EVENT_IC_DE:
	case AMD_EVENT_DE:
		switch (event_code) {
		case 0x08F:
		case 0x187:
		case 0x188:
			return &amd_f15_PMC0;
		case 0x0DB ... 0x0DF:
		case 0x1D6:
		case 0x1D8:
			return &amd_f15_PMC50;
		default:
			return &amd_f15_PMC20;
		}
	case AMD_EVENT_NB:
714 715
		return __amd_get_nb_event_constraints(cpuc, event,
						      amd_nb_event_constraint);
716 717 718 719 720
	default:
		return &emptyconstraint;
	}
}

721 722 723 724 725 726 727 728
static ssize_t amd_event_sysfs_show(char *page, u64 config)
{
	u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
		    (config & AMD64_EVENTSEL_EVENT) >> 24;

	return x86_event_sysfs_show(page, config, event);
}

729 730
static __initconst const struct x86_pmu amd_pmu = {
	.name			= "AMD",
731 732 733 734 735 736 737
	.handle_irq		= x86_pmu_handle_irq,
	.disable_all		= x86_pmu_disable_all,
	.enable_all		= x86_pmu_enable_all,
	.enable			= x86_pmu_enable_event,
	.disable		= x86_pmu_disable_event,
	.hw_config		= amd_pmu_hw_config,
	.schedule_events	= x86_schedule_events,
738 739
	.eventsel		= MSR_K7_EVNTSEL0,
	.perfctr		= MSR_K7_PERFCTR0,
740
	.addr_offset            = amd_pmu_addr_offset,
741
	.rdpmc_index		= amd_pmu_rdpmc_index,
742 743
	.event_map		= amd_pmu_event_map,
	.max_events		= ARRAY_SIZE(amd_perfmon_event_map),
744
	.num_counters		= AMD64_NUM_COUNTERS,
745 746 747 748 749
	.cntval_bits		= 48,
	.cntval_mask		= (1ULL << 48) - 1,
	.apic			= 1,
	/* use highest bit to detect overflow */
	.max_period		= (1ULL << 47) - 1,
750
	.get_event_constraints	= amd_get_event_constraints,
751 752
	.put_event_constraints	= amd_put_event_constraints,

753
	.format_attrs		= amd_format_attr,
754
	.events_sysfs_show	= amd_event_sysfs_show,
755

756
	.cpu_prepare		= amd_pmu_cpu_prepare,
757
	.cpu_starting		= amd_pmu_cpu_starting,
758
	.cpu_dead		= amd_pmu_cpu_dead,
759 760
};

761 762
static int setup_event_constraints(void)
{
763
	if (boot_cpu_data.x86 == 0x15)
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
		x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
	return 0;
}

static int setup_perfctr_core(void)
{
	if (!cpu_has_perfctr_core) {
		WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h,
		     KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!");
		return -ENODEV;
	}

	WARN(x86_pmu.get_event_constraints == amd_get_event_constraints,
	     KERN_ERR "hw perf events core counters need constraints handler!");

	/*
	 * If core performance counter extensions exists, we must use
	 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
	 * x86_pmu_addr_offset().
	 */
	x86_pmu.eventsel	= MSR_F15H_PERF_CTL;
	x86_pmu.perfctr		= MSR_F15H_PERF_CTR;
	x86_pmu.num_counters	= AMD64_NUM_COUNTERS_CORE;

	printk(KERN_INFO "perf: AMD core performance counters detected\n");

	return 0;
}

793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
static int setup_perfctr_nb(void)
{
	if (!cpu_has_perfctr_nb)
		return -ENODEV;

	x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;

	if (cpu_has_perfctr_core)
		amd_nb_event_constraint = &amd_NBPMC96;
	else
		amd_nb_event_constraint = &amd_NBPMC74;

	printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");

	return 0;
}

810
__init int amd_pmu_init(void)
811 812 813 814 815
{
	/* Performance-monitoring supported from K7 and later: */
	if (boot_cpu_data.x86 < 6)
		return -ENODEV;

816 817 818 819
	x86_pmu = amd_pmu;

	setup_event_constraints();
	setup_perfctr_core();
820
	setup_perfctr_nb();
821 822 823 824 825 826 827

	/* Events are common for all AMDs */
	memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
	       sizeof(hw_cache_event_ids));

	return 0;
}
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850

void amd_pmu_enable_virt(void)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

	cpuc->perf_ctr_virt_mask = 0;

	/* Reload all events */
	x86_pmu_disable_all();
	x86_pmu_enable_all(0);
}
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);

void amd_pmu_disable_virt(void)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);

	/*
	 * We only mask out the Host-only bit so that host-only counting works
	 * when SVM is disabled. If someone sets up a guest-only counter when
	 * SVM is disabled the Guest-only bits still gets set and the counter
	 * will not count anything.
	 */
851
	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
852 853 854 855 856 857

	/* Reload all events */
	x86_pmu_disable_all();
	x86_pmu_enable_all(0);
}
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);