uncore.c 15.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright (C) 2013 Advanced Micro Devices, Inc.
 *
 * Author: Jacob Shin <jacob.shin@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>

#include <asm/cpufeature.h>
#include <asm/perf_event.h>
#include <asm/msr.h>

#define NUM_COUNTERS_NB		4
#define NUM_COUNTERS_L2		4
25 26
#define NUM_COUNTERS_L3		6
#define MAX_COUNTERS		6
27 28

#define RDPMC_BASE_NB		6
29
#define RDPMC_BASE_LLC		10
30 31 32

#define COUNTER_SHIFT		16

33 34 35
#undef pr_fmt
#define pr_fmt(fmt)	"amd_uncore: " fmt

36 37 38
static int num_counters_llc;
static int num_counters_nb;

39 40
static HLIST_HEAD(uncore_unused_list);

41 42 43 44 45 46 47 48 49 50
struct amd_uncore {
	int id;
	int refcnt;
	int cpu;
	int num_counters;
	int rdpmc_base;
	u32 msr_base;
	cpumask_t *active_mask;
	struct pmu *pmu;
	struct perf_event *events[MAX_COUNTERS];
51
	struct hlist_node node;
52 53 54
};

static struct amd_uncore * __percpu *amd_uncore_nb;
55
static struct amd_uncore * __percpu *amd_uncore_llc;
56 57

static struct pmu amd_nb_pmu;
58
static struct pmu amd_llc_pmu;
59 60

static cpumask_t amd_nb_active_mask;
61
static cpumask_t amd_llc_active_mask;
62 63 64 65 66 67

static bool is_nb_event(struct perf_event *event)
{
	return event->pmu->type == amd_nb_pmu.type;
}

68
static bool is_llc_event(struct perf_event *event)
69
{
70
	return event->pmu->type == amd_llc_pmu.type;
71 72 73 74 75 76
}

static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
{
	if (is_nb_event(event) && amd_uncore_nb)
		return *per_cpu_ptr(amd_uncore_nb, event->cpu);
77 78
	else if (is_llc_event(event) && amd_uncore_llc)
		return *per_cpu_ptr(amd_uncore_llc, event->cpu);
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192

	return NULL;
}

static void amd_uncore_read(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	u64 prev, new;
	s64 delta;

	/*
	 * since we do not enable counter overflow interrupts,
	 * we do not have to worry about prev_count changing on us
	 */

	prev = local64_read(&hwc->prev_count);
	rdpmcl(hwc->event_base_rdpmc, new);
	local64_set(&hwc->prev_count, new);
	delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
	delta >>= COUNTER_SHIFT;
	local64_add(delta, &event->count);
}

static void amd_uncore_start(struct perf_event *event, int flags)
{
	struct hw_perf_event *hwc = &event->hw;

	if (flags & PERF_EF_RELOAD)
		wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));

	hwc->state = 0;
	wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
	perf_event_update_userpage(event);
}

static void amd_uncore_stop(struct perf_event *event, int flags)
{
	struct hw_perf_event *hwc = &event->hw;

	wrmsrl(hwc->config_base, hwc->config);
	hwc->state |= PERF_HES_STOPPED;

	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
		amd_uncore_read(event);
		hwc->state |= PERF_HES_UPTODATE;
	}
}

static int amd_uncore_add(struct perf_event *event, int flags)
{
	int i;
	struct amd_uncore *uncore = event_to_amd_uncore(event);
	struct hw_perf_event *hwc = &event->hw;

	/* are we already assigned? */
	if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
		goto out;

	for (i = 0; i < uncore->num_counters; i++) {
		if (uncore->events[i] == event) {
			hwc->idx = i;
			goto out;
		}
	}

	/* if not, take the first available counter */
	hwc->idx = -1;
	for (i = 0; i < uncore->num_counters; i++) {
		if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
			hwc->idx = i;
			break;
		}
	}

out:
	if (hwc->idx == -1)
		return -EBUSY;

	hwc->config_base = uncore->msr_base + (2 * hwc->idx);
	hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
	hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;

	if (flags & PERF_EF_START)
		amd_uncore_start(event, PERF_EF_RELOAD);

	return 0;
}

static void amd_uncore_del(struct perf_event *event, int flags)
{
	int i;
	struct amd_uncore *uncore = event_to_amd_uncore(event);
	struct hw_perf_event *hwc = &event->hw;

	amd_uncore_stop(event, PERF_EF_UPDATE);

	for (i = 0; i < uncore->num_counters; i++) {
		if (cmpxchg(&uncore->events[i], event, NULL) == event)
			break;
	}

	hwc->idx = -1;
}

static int amd_uncore_event_init(struct perf_event *event)
{
	struct amd_uncore *uncore;
	struct hw_perf_event *hwc = &event->hw;

	if (event->attr.type != event->pmu->type)
		return -ENOENT;

	/*
193 194 195 196 197
	 * NB and Last level cache counters (MSRs) are shared across all cores
	 * that share the same NB / Last level cache. Interrupts can be directed
	 * to a single target core, however, event counts generated by processes
	 * running on other cores cannot be masked out. So we do not support
	 * sampling and per-thread events.
198 199 200 201
	 */
	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
		return -EINVAL;

202
	/* NB and Last level cache counters do not have usr/os/guest/host bits */
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
	if (event->attr.exclude_user || event->attr.exclude_kernel ||
	    event->attr.exclude_host || event->attr.exclude_guest)
		return -EINVAL;

	/* and we do not enable counter overflow interrupts */
	hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
	hwc->idx = -1;

	if (event->cpu < 0)
		return -EINVAL;

	uncore = event_to_amd_uncore(event);
	if (!uncore)
		return -ENODEV;

	/*
	 * since request can come in to any of the shared cores, we will remap
	 * to a single common cpu.
	 */
	event->cpu = uncore->cpu;

	return 0;
}

static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
					    struct device_attribute *attr,
					    char *buf)
{
	cpumask_t *active_mask;
	struct pmu *pmu = dev_get_drvdata(dev);

	if (pmu->type == amd_nb_pmu.type)
		active_mask = &amd_nb_active_mask;
236 237
	else if (pmu->type == amd_llc_pmu.type)
		active_mask = &amd_llc_active_mask;
238 239 240
	else
		return 0;

241
	return cpumap_print_to_pagebuf(true, buf, active_mask);
242 243 244 245 246 247 248 249 250 251 252 253
}
static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);

static struct attribute *amd_uncore_attrs[] = {
	&dev_attr_cpumask.attr,
	NULL,
};

static struct attribute_group amd_uncore_attr_group = {
	.attrs = amd_uncore_attrs,
};

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
/*
 * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
 * on family
 */
#define AMD_FORMAT_ATTR(_dev, _name, _format)				     \
static ssize_t								     \
_dev##_show##_name(struct device *dev,					     \
		struct device_attribute *attr,				     \
		char *page)						     \
{									     \
	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			     \
	return sprintf(page, _format "\n");				     \
}									     \
static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);

/* Used for each uncore counter type */
#define AMD_ATTRIBUTE(_name)						     \
static struct attribute *amd_uncore_format_attr_##_name[] = {		     \
	&format_attr_event_##_name.attr,				     \
	&format_attr_umask.attr,					     \
	NULL,								     \
};									     \
static struct attribute_group amd_uncore_format_group_##_name = {	     \
	.name = "format",						     \
	.attrs = amd_uncore_format_attr_##_name,			     \
};									     \
static const struct attribute_group *amd_uncore_attr_groups_##_name[] = {    \
	&amd_uncore_attr_group,						     \
	&amd_uncore_format_group_##_name,				     \
	NULL,								     \
284 285
};

286 287 288 289 290 291
AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
AMD_FORMAT_ATTR(umask, , "config:8-15");
AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
AMD_FORMAT_ATTR(event, _l3, "config:0-7");
AMD_ATTRIBUTE(df);
AMD_ATTRIBUTE(l3);
292 293

static struct pmu amd_nb_pmu = {
294
	.task_ctx_nr	= perf_invalid_context,
295 296 297 298 299 300 301 302
	.event_init	= amd_uncore_event_init,
	.add		= amd_uncore_add,
	.del		= amd_uncore_del,
	.start		= amd_uncore_start,
	.stop		= amd_uncore_stop,
	.read		= amd_uncore_read,
};

303
static struct pmu amd_llc_pmu = {
304
	.task_ctx_nr	= perf_invalid_context,
305 306 307 308 309 310 311 312
	.event_init	= amd_uncore_event_init,
	.add		= amd_uncore_add,
	.del		= amd_uncore_del,
	.start		= amd_uncore_start,
	.stop		= amd_uncore_stop,
	.read		= amd_uncore_read,
};

313
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
314 315 316 317 318
{
	return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
			cpu_to_node(cpu));
}

319
static int amd_uncore_cpu_up_prepare(unsigned int cpu)
320
{
321
	struct amd_uncore *uncore_nb = NULL, *uncore_llc;
322 323

	if (amd_uncore_nb) {
324 325 326 327
		uncore_nb = amd_uncore_alloc(cpu);
		if (!uncore_nb)
			goto fail;
		uncore_nb->cpu = cpu;
328
		uncore_nb->num_counters = num_counters_nb;
329 330 331 332
		uncore_nb->rdpmc_base = RDPMC_BASE_NB;
		uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
		uncore_nb->active_mask = &amd_nb_active_mask;
		uncore_nb->pmu = &amd_nb_pmu;
333
		uncore_nb->id = -1;
334
		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
335 336
	}

337 338 339
	if (amd_uncore_llc) {
		uncore_llc = amd_uncore_alloc(cpu);
		if (!uncore_llc)
340
			goto fail;
341
		uncore_llc->cpu = cpu;
342
		uncore_llc->num_counters = num_counters_llc;
343 344 345 346 347 348
		uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
		uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
		uncore_llc->active_mask = &amd_llc_active_mask;
		uncore_llc->pmu = &amd_llc_pmu;
		uncore_llc->id = -1;
		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
349
	}
350 351 352 353

	return 0;

fail:
354 355
	if (amd_uncore_nb)
		*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
356 357
	kfree(uncore_nb);
	return -ENOMEM;
358 359 360
}

static struct amd_uncore *
361 362
amd_uncore_find_online_sibling(struct amd_uncore *this,
			       struct amd_uncore * __percpu *uncores)
363 364 365 366 367 368 369 370 371 372 373 374 375 376
{
	unsigned int cpu;
	struct amd_uncore *that;

	for_each_online_cpu(cpu) {
		that = *per_cpu_ptr(uncores, cpu);

		if (!that)
			continue;

		if (this == that)
			continue;

		if (this->id == that->id) {
377
			hlist_add_head(&this->node, &uncore_unused_list);
378 379 380 381 382 383 384 385 386
			this = that;
			break;
		}
	}

	this->refcnt++;
	return this;
}

387
static int amd_uncore_cpu_starting(unsigned int cpu)
388 389 390 391 392 393 394 395 396 397 398 399 400
{
	unsigned int eax, ebx, ecx, edx;
	struct amd_uncore *uncore;

	if (amd_uncore_nb) {
		uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
		uncore->id = ecx & 0xff;

		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
	}

401
	if (amd_uncore_llc) {
402
		unsigned int apicid = cpu_data(cpu).apicid;
403
		unsigned int nshared, subleaf, prev_eax = 0;
404

405
		uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
		/*
		 * Iterate over Cache Topology Definition leaves until no
		 * more cache descriptions are available.
		 */
		for (subleaf = 0; subleaf < 5; subleaf++) {
			cpuid_count(0x8000001d, subleaf, &eax, &ebx, &ecx, &edx);

			/* EAX[0:4] gives type of cache */
			if (!(eax & 0x1f))
				break;

			prev_eax = eax;
		}
		nshared = ((prev_eax >> 14) & 0xfff) + 1;

421 422
		uncore->id = apicid - (apicid % nshared);

423 424
		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
425
	}
426 427

	return 0;
428 429
}

430 431 432 433 434 435 436 437 438 439 440
static void uncore_clean_online(void)
{
	struct amd_uncore *uncore;
	struct hlist_node *n;

	hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
		hlist_del(&uncore->node);
		kfree(uncore);
	}
}

441 442
static void uncore_online(unsigned int cpu,
			  struct amd_uncore * __percpu *uncores)
443 444 445
{
	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);

446
	uncore_clean_online();
447 448 449 450 451

	if (cpu == uncore->cpu)
		cpumask_set_cpu(cpu, uncore->active_mask);
}

452
static int amd_uncore_cpu_online(unsigned int cpu)
453 454 455 456
{
	if (amd_uncore_nb)
		uncore_online(cpu, amd_uncore_nb);

457 458
	if (amd_uncore_llc)
		uncore_online(cpu, amd_uncore_llc);
459 460

	return 0;
461 462
}

463 464
static void uncore_down_prepare(unsigned int cpu,
				struct amd_uncore * __percpu *uncores)
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
{
	unsigned int i;
	struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);

	if (this->cpu != cpu)
		return;

	/* this cpu is going down, migrate to a shared sibling if possible */
	for_each_online_cpu(i) {
		struct amd_uncore *that = *per_cpu_ptr(uncores, i);

		if (cpu == i)
			continue;

		if (this == that) {
			perf_pmu_migrate_context(this->pmu, cpu, i);
			cpumask_clear_cpu(cpu, that->active_mask);
			cpumask_set_cpu(i, that->active_mask);
			that->cpu = i;
			break;
		}
	}
}

489
static int amd_uncore_cpu_down_prepare(unsigned int cpu)
490 491 492 493
{
	if (amd_uncore_nb)
		uncore_down_prepare(cpu, amd_uncore_nb);

494 495
	if (amd_uncore_llc)
		uncore_down_prepare(cpu, amd_uncore_llc);
496 497

	return 0;
498 499
}

500
static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
501 502 503 504 505 506 507 508
{
	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);

	if (cpu == uncore->cpu)
		cpumask_clear_cpu(cpu, uncore->active_mask);

	if (!--uncore->refcnt)
		kfree(uncore);
509
	*per_cpu_ptr(uncores, cpu) = NULL;
510 511
}

512
static int amd_uncore_cpu_dead(unsigned int cpu)
513 514 515 516
{
	if (amd_uncore_nb)
		uncore_dead(cpu, amd_uncore_nb);

517 518
	if (amd_uncore_llc)
		uncore_dead(cpu, amd_uncore_llc);
519

520
	return 0;
521 522
}

523 524 525 526 527
static int __init amd_uncore_init(void)
{
	int ret = -ENODEV;

	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
528 529 530 531
		return -ENODEV;

	if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
		return -ENODEV;
532

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	if (boot_cpu_data.x86 == 0x17) {
		/*
		 * For F17h, the Northbridge counters are repurposed as Data
		 * Fabric counters. Also, L3 counters are supported too. The PMUs
		 * are exported based on  family as either L2 or L3 and NB or DF.
		 */
		num_counters_nb		  = NUM_COUNTERS_NB;
		num_counters_llc	  = NUM_COUNTERS_L3;
		amd_nb_pmu.name		  = "amd_df";
		amd_llc_pmu.name	  = "amd_l3";
		format_attr_event_df.show = &event_show_df;
		format_attr_event_l3.show = &event_show_l3;
	} else {
		num_counters_nb		  = NUM_COUNTERS_NB;
		num_counters_llc	  = NUM_COUNTERS_L2;
		amd_nb_pmu.name		  = "amd_nb";
		amd_llc_pmu.name	  = "amd_l2";
		format_attr_event_df	  = format_attr_event;
		format_attr_event_l3	  = format_attr_event;
552
	}
553 554

	amd_nb_pmu.attr_groups	= amd_uncore_attr_groups_df;
555
	amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
556

557
	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
558
		amd_uncore_nb = alloc_percpu(struct amd_uncore *);
559 560 561 562 563 564 565
		if (!amd_uncore_nb) {
			ret = -ENOMEM;
			goto fail_nb;
		}
		ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
		if (ret)
			goto fail_nb;
566

567
		pr_info("AMD NB counters detected\n");
568 569 570
		ret = 0;
	}

571
	if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
572 573
		amd_uncore_llc = alloc_percpu(struct amd_uncore *);
		if (!amd_uncore_llc) {
574
			ret = -ENOMEM;
575
			goto fail_llc;
576
		}
577
		ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
578
		if (ret)
579
			goto fail_llc;
580

581
		pr_info("AMD LLC counters detected\n");
582 583 584
		ret = 0;
	}

585 586 587 588
	/*
	 * Install callbacks. Core will call them for each online cpu.
	 */
	if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
T
Thomas Gleixner 已提交
589
			      "perf/x86/amd/uncore:prepare",
590
			      amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
591
		goto fail_llc;
592 593

	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
T
Thomas Gleixner 已提交
594
			      "perf/x86/amd/uncore:starting",
595 596 597
			      amd_uncore_cpu_starting, NULL))
		goto fail_prep;
	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
T
Thomas Gleixner 已提交
598
			      "perf/x86/amd/uncore:online",
599 600 601
			      amd_uncore_cpu_online,
			      amd_uncore_cpu_down_prepare))
		goto fail_start;
602
	return 0;
603

604 605 606 607
fail_start:
	cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
fail_prep:
	cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
608
fail_llc:
609
	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
610
		perf_pmu_unregister(&amd_nb_pmu);
611 612
	if (amd_uncore_llc)
		free_percpu(amd_uncore_llc);
613 614 615 616 617
fail_nb:
	if (amd_uncore_nb)
		free_percpu(amd_uncore_nb);

	return ret;
618 619
}
device_initcall(amd_uncore_init);