perf_counter.c 14.3 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Performance counter x86 architecture code
 *
 *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
 *
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/perf_counter.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
15
#include <linux/module.h>
I
Ingo Molnar 已提交
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
#include <linux/kdebug.h>
#include <linux/sched.h>

#include <asm/intel_arch_perfmon.h>
#include <asm/apic.h>

static bool perf_counters_initialized __read_mostly;

/*
 * Number of (generic) HW counters:
 */
static int nr_hw_counters __read_mostly;
static u32 perf_counter_mask __read_mostly;

/* No support for fixed function counters yet */

#define MAX_HW_COUNTERS		8

struct cpu_hw_counters {
	struct perf_counter	*counters[MAX_HW_COUNTERS];
	unsigned long		used[BITS_TO_LONGS(MAX_HW_COUNTERS)];
};

/*
 * Intel PerfMon v3. Used on Core2 and later.
 */
static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);

44
static const int intel_perfmon_event_map[] =
I
Ingo Molnar 已提交
45 46 47 48 49 50 51 52 53
{
  [PERF_COUNT_CYCLES]			= 0x003c,
  [PERF_COUNT_INSTRUCTIONS]		= 0x00c0,
  [PERF_COUNT_CACHE_REFERENCES]		= 0x4f2e,
  [PERF_COUNT_CACHE_MISSES]		= 0x412e,
  [PERF_COUNT_BRANCH_INSTRUCTIONS]	= 0x00c4,
  [PERF_COUNT_BRANCH_MISSES]		= 0x00c5,
};

54
static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
I
Ingo Molnar 已提交
55

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/*
 * Propagate counter elapsed time into the generic counter.
 * Can only be executed on the CPU where the counter is active.
 * Returns the delta events processed.
 */
static void
x86_perf_counter_update(struct perf_counter *counter,
			struct hw_perf_counter *hwc, int idx)
{
	u64 prev_raw_count, new_raw_count, delta;

	WARN_ON_ONCE(counter->state != PERF_COUNTER_STATE_ACTIVE);
	/*
	 * Careful: an NMI might modify the previous counter value.
	 *
	 * Our tactic to handle this is to first atomically read and
	 * exchange a new raw count - then add that new-prev delta
	 * count to the generic counter atomically:
	 */
again:
	prev_raw_count = atomic64_read(&hwc->prev_count);
	rdmsrl(hwc->counter_base + idx, new_raw_count);

	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
					new_raw_count) != prev_raw_count)
		goto again;

	/*
	 * Now we have the new raw value and have updated the prev
	 * timestamp already. We can now calculate the elapsed delta
	 * (counter-)time and add that to the generic counter.
	 *
	 * Careful, not all hw sign-extends above the physical width
	 * of the count, so we do that by clipping the delta to 32 bits:
	 */
	delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
	WARN_ON_ONCE((int)delta < 0);

	atomic64_add(delta, &counter->count);
	atomic64_sub(delta, &hwc->period_left);
}

I
Ingo Molnar 已提交
98 99 100
/*
 * Setup the hardware configuration for a given hw_event_type
 */
I
Ingo Molnar 已提交
101
static int __hw_perf_counter_init(struct perf_counter *counter)
I
Ingo Molnar 已提交
102
{
I
Ingo Molnar 已提交
103
	struct perf_counter_hw_event *hw_event = &counter->hw_event;
I
Ingo Molnar 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
	struct hw_perf_counter *hwc = &counter->hw;

	if (unlikely(!perf_counters_initialized))
		return -EINVAL;

	/*
	 * Count user events, and generate PMC IRQs:
	 * (keep 'enabled' bit clear for now)
	 */
	hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT;

	/*
	 * If privileged enough, count OS events too, and allow
	 * NMI events as well:
	 */
	hwc->nmi = 0;
	if (capable(CAP_SYS_ADMIN)) {
		hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
I
Ingo Molnar 已提交
122
		if (hw_event->nmi)
I
Ingo Molnar 已提交
123 124 125
			hwc->nmi = 1;
	}

I
Ingo Molnar 已提交
126 127
	hwc->config_base	= MSR_ARCH_PERFMON_EVENTSEL0;
	hwc->counter_base	= MSR_ARCH_PERFMON_PERFCTR0;
I
Ingo Molnar 已提交
128

I
Ingo Molnar 已提交
129
	hwc->irq_period		= hw_event->irq_period;
I
Ingo Molnar 已提交
130 131 132 133 134
	/*
	 * Intel PMCs cannot be accessed sanely above 32 bit width,
	 * so we install an artificial 1<<31 period regardless of
	 * the generic counter period:
	 */
135
	if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
I
Ingo Molnar 已提交
136 137
		hwc->irq_period = 0x7FFFFFFF;

138
	atomic64_set(&hwc->period_left, hwc->irq_period);
I
Ingo Molnar 已提交
139 140

	/*
141
	 * Raw event type provide the config in the event structure
I
Ingo Molnar 已提交
142
	 */
I
Ingo Molnar 已提交
143 144
	if (hw_event->raw) {
		hwc->config |= hw_event->type;
I
Ingo Molnar 已提交
145
	} else {
I
Ingo Molnar 已提交
146
		if (hw_event->type >= max_intel_perfmon_events)
I
Ingo Molnar 已提交
147 148 149 150
			return -EINVAL;
		/*
		 * The generic map:
		 */
I
Ingo Molnar 已提交
151
		hwc->config |= intel_perfmon_event_map[hw_event->type];
I
Ingo Molnar 已提交
152 153 154 155 156 157 158 159
	}
	counter->wakeup_pending = 0;

	return 0;
}

void hw_perf_enable_all(void)
{
160 161 162
	if (unlikely(!perf_counters_initialized))
		return;

163
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
I
Ingo Molnar 已提交
164 165
}

166
u64 hw_perf_save_disable(void)
167 168 169
{
	u64 ctrl;

170 171 172
	if (unlikely(!perf_counters_initialized))
		return 0;

173
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
I
Ingo Molnar 已提交
174
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
175

176
	return ctrl;
I
Ingo Molnar 已提交
177
}
178
EXPORT_SYMBOL_GPL(hw_perf_save_disable);
I
Ingo Molnar 已提交
179

180 181
void hw_perf_restore(u64 ctrl)
{
182 183 184
	if (unlikely(!perf_counters_initialized))
		return;

185 186 187 188
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
}
EXPORT_SYMBOL_GPL(hw_perf_restore);

189
static inline void
190 191
__x86_perf_counter_disable(struct perf_counter *counter,
			   struct hw_perf_counter *hwc, unsigned int idx)
192
{
193 194 195 196
	int err;

	err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
	WARN_ON_ONCE(err);
197 198
}

199
static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]);
I
Ingo Molnar 已提交
200

201 202 203 204 205 206 207
/*
 * Set the next IRQ period, based on the hwc->period_left value.
 * To be called with the counter disabled in hw:
 */
static void
__hw_perf_counter_set_period(struct perf_counter *counter,
			     struct hw_perf_counter *hwc, int idx)
I
Ingo Molnar 已提交
208
{
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
	s32 left = atomic64_read(&hwc->period_left);
	s32 period = hwc->irq_period;

	WARN_ON_ONCE(period <= 0);

	/*
	 * If we are way outside a reasoable range then just skip forward:
	 */
	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_set(&hwc->period_left, left);
	}
I
Ingo Molnar 已提交
226

227 228 229 230 231 232 233 234 235 236 237
	WARN_ON_ONCE(left <= 0);

	per_cpu(prev_left[idx], smp_processor_id()) = left;

	/*
	 * The hw counter starts counting from this counter offset,
	 * mark it to be able to extra future deltas:
	 */
	atomic64_set(&hwc->prev_count, (u64)(s64)-left);

	wrmsr(hwc->counter_base + idx, -left, 0);
238 239
}

240 241 242
static void
__x86_perf_counter_enable(struct perf_counter *counter,
			  struct hw_perf_counter *hwc, int idx)
243 244 245
{
	wrmsr(hwc->config_base + idx,
	      hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
I
Ingo Molnar 已提交
246 247
}

248 249 250
/*
 * Find a PMC slot for the freshly enabled / scheduled in counter:
 */
I
Ingo Molnar 已提交
251
static void x86_perf_counter_enable(struct perf_counter *counter)
I
Ingo Molnar 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265
{
	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
	struct hw_perf_counter *hwc = &counter->hw;
	int idx = hwc->idx;

	/* Try to get the previous counter again */
	if (test_and_set_bit(idx, cpuc->used)) {
		idx = find_first_zero_bit(cpuc->used, nr_hw_counters);
		set_bit(idx, cpuc->used);
		hwc->idx = idx;
	}

	perf_counters_lapic_init(hwc->nmi);

266
	__x86_perf_counter_disable(counter, hwc, idx);
I
Ingo Molnar 已提交
267 268

	cpuc->counters[idx] = counter;
269

270 271
	__hw_perf_counter_set_period(counter, hwc, idx);
	__x86_perf_counter_enable(counter, hwc, idx);
I
Ingo Molnar 已提交
272 273 274 275
}

void perf_counter_print_debug(void)
{
276
	u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left;
277 278 279 280
	int cpu, idx;

	if (!nr_hw_counters)
		return;
I
Ingo Molnar 已提交
281 282 283 284 285

	local_irq_disable();

	cpu = smp_processor_id();

286 287 288
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
I
Ingo Molnar 已提交
289 290 291 292 293 294 295

	printk(KERN_INFO "\n");
	printk(KERN_INFO "CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
	printk(KERN_INFO "CPU#%d: status:     %016llx\n", cpu, status);
	printk(KERN_INFO "CPU#%d: overflow:   %016llx\n", cpu, overflow);

	for (idx = 0; idx < nr_hw_counters; idx++) {
296 297
		rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
		rdmsrl(MSR_ARCH_PERFMON_PERFCTR0  + idx, pmc_count);
I
Ingo Molnar 已提交
298

299
		prev_left = per_cpu(prev_left[idx], cpu);
I
Ingo Molnar 已提交
300 301 302 303 304

		printk(KERN_INFO "CPU#%d: PMC%d ctrl:  %016llx\n",
			cpu, idx, pmc_ctrl);
		printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n",
			cpu, idx, pmc_count);
305 306
		printk(KERN_INFO "CPU#%d: PMC%d left:  %016llx\n",
			cpu, idx, prev_left);
I
Ingo Molnar 已提交
307 308 309 310
	}
	local_irq_enable();
}

I
Ingo Molnar 已提交
311
static void x86_perf_counter_disable(struct perf_counter *counter)
I
Ingo Molnar 已提交
312 313 314 315 316
{
	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
	struct hw_perf_counter *hwc = &counter->hw;
	unsigned int idx = hwc->idx;

317
	__x86_perf_counter_disable(counter, hwc, idx);
I
Ingo Molnar 已提交
318 319 320 321

	clear_bit(idx, cpuc->used);
	cpuc->counters[idx] = NULL;

322 323 324 325 326
	/*
	 * Drain the remaining delta count out of a counter
	 * that we are disabling:
	 */
	x86_perf_counter_update(counter, hwc, idx);
I
Ingo Molnar 已提交
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
}

static void perf_store_irq_data(struct perf_counter *counter, u64 data)
{
	struct perf_data *irqdata = counter->irqdata;

	if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
		irqdata->overrun++;
	} else {
		u64 *p = (u64 *) &irqdata->data[irqdata->len];

		*p = data;
		irqdata->len += sizeof(u64);
	}
}

343
/*
344 345
 * Save and restart an expired counter. Called by NMI contexts,
 * so it has to be careful about preempting normal counter ops:
346
 */
I
Ingo Molnar 已提交
347 348 349 350
static void perf_save_and_restart(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	int idx = hwc->idx;
351
	u64 pmc_ctrl;
I
Ingo Molnar 已提交
352

353
	rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
I
Ingo Molnar 已提交
354

355 356
	x86_perf_counter_update(counter, hwc, idx);
	__hw_perf_counter_set_period(counter, hwc, idx);
357 358

	if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE)
359
		__x86_perf_counter_enable(counter, hwc, idx);
I
Ingo Molnar 已提交
360 361 362
}

static void
363
perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
I
Ingo Molnar 已提交
364
{
365
	struct perf_counter *counter, *group_leader = sibling->group_leader;
I
Ingo Molnar 已提交
366

367
	/*
368
	 * Store sibling timestamps (if any):
369 370
	 */
	list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
371
		x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
372
		perf_store_irq_data(sibling, counter->hw_event.type);
373
		perf_store_irq_data(sibling, atomic64_read(&counter->count));
I
Ingo Molnar 已提交
374 375 376 377 378 379 380 381 382 383
	}
}

/*
 * This handler is triggered by the local APIC, so the APIC IRQ handling
 * rules apply:
 */
static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
{
	int bit, cpu = smp_processor_id();
384
	u64 ack, status, saved_global;
I
Ingo Molnar 已提交
385
	struct cpu_hw_counters *cpuc;
386 387

	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
I
Ingo Molnar 已提交
388 389 390 391 392 393 394

	/* Disable counters globally */
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
	ack_APIC_irq();

	cpuc = &per_cpu(cpu_hw_counters, cpu);

395 396 397 398
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	if (!status)
		goto out;

I
Ingo Molnar 已提交
399 400 401 402 403 404 405 406 407 408 409
again:
	ack = status;
	for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
		struct perf_counter *counter = cpuc->counters[bit];

		clear_bit(bit, (unsigned long *) &status);
		if (!counter)
			continue;

		perf_save_and_restart(counter);

I
Ingo Molnar 已提交
410
		switch (counter->hw_event.record_type) {
I
Ingo Molnar 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
		case PERF_RECORD_SIMPLE:
			continue;
		case PERF_RECORD_IRQ:
			perf_store_irq_data(counter, instruction_pointer(regs));
			break;
		case PERF_RECORD_GROUP:
			perf_handle_group(counter, &status, &ack);
			break;
		}
		/*
		 * From NMI context we cannot call into the scheduler to
		 * do a task wakeup - but we mark these counters as
		 * wakeup_pending and initate a wakeup callback:
		 */
		if (nmi) {
			counter->wakeup_pending = 1;
			set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
		} else {
			wake_up(&counter->waitq);
		}
	}

	wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0);

	/*
	 * Repeat if there is more work to be done:
	 */
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	if (status)
		goto again;
441
out:
I
Ingo Molnar 已提交
442
	/*
443
	 * Restore - do not reenable when global enable is off:
I
Ingo Molnar 已提交
444
	 */
445
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0);
I
Ingo Molnar 已提交
446 447 448 449 450
}

void smp_perf_counter_interrupt(struct pt_regs *regs)
{
	irq_enter();
451
	inc_irq_stat(apic_perf_irqs);
I
Ingo Molnar 已提交
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
	__smp_perf_counter_interrupt(regs, 0);

	irq_exit();
}

/*
 * This handler is triggered by NMI contexts:
 */
void perf_counter_notify(struct pt_regs *regs)
{
	struct cpu_hw_counters *cpuc;
	unsigned long flags;
	int bit, cpu;

	local_irq_save(flags);
	cpu = smp_processor_id();
	cpuc = &per_cpu(cpu_hw_counters, cpu);

	for_each_bit(bit, cpuc->used, nr_hw_counters) {
		struct perf_counter *counter = cpuc->counters[bit];

		if (!counter)
			continue;

		if (counter->wakeup_pending) {
			counter->wakeup_pending = 0;
			wake_up(&counter->waitq);
		}
	}

	local_irq_restore(flags);
}

void __cpuinit perf_counters_lapic_init(int nmi)
{
	u32 apic_val;

	if (!perf_counters_initialized)
		return;
	/*
	 * Enable the performance counter vector in the APIC LVT:
	 */
	apic_val = apic_read(APIC_LVTERR);

	apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
	if (nmi)
		apic_write(APIC_LVTPC, APIC_DM_NMI);
	else
		apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
	apic_write(APIC_LVTERR, apic_val);
}

static int __kprobes
perf_counter_nmi_handler(struct notifier_block *self,
			 unsigned long cmd, void *__args)
{
	struct die_args *args = __args;
	struct pt_regs *regs;

	if (likely(cmd != DIE_NMI_IPI))
		return NOTIFY_DONE;

	regs = args->regs;

	apic_write(APIC_LVTPC, APIC_DM_NMI);
	__smp_perf_counter_interrupt(regs, 1);

	return NOTIFY_STOP;
}

static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
	.notifier_call		= perf_counter_nmi_handler
};

void __init init_hw_perf_counters(void)
{
	union cpuid10_eax eax;
	unsigned int unused;
	unsigned int ebx;

	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
		return;

	/*
	 * Check whether the Architectural PerfMon supports
	 * Branch Misses Retired Event or not.
	 */
	cpuid(10, &(eax.full), &ebx, &unused, &unused);
	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
		return;

	printk(KERN_INFO "Intel Performance Monitoring support detected.\n");

	printk(KERN_INFO "... version:      %d\n", eax.split.version_id);
	printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters);
	nr_hw_counters = eax.split.num_counters;
	if (nr_hw_counters > MAX_HW_COUNTERS) {
		nr_hw_counters = MAX_HW_COUNTERS;
		WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
			nr_hw_counters, MAX_HW_COUNTERS);
	}
	perf_counter_mask = (1 << nr_hw_counters) - 1;
	perf_max_counters = nr_hw_counters;

	printk(KERN_INFO "... bit_width:    %d\n", eax.split.bit_width);
	printk(KERN_INFO "... mask_length:  %d\n", eax.split.mask_length);

560 561
	perf_counters_initialized = true;

I
Ingo Molnar 已提交
562 563 564
	perf_counters_lapic_init(0);
	register_die_notifier(&perf_counter_nmi_notifier);
}
I
Ingo Molnar 已提交
565

566 567 568 569 570
static void x86_perf_counter_read(struct perf_counter *counter)
{
	x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
}

571
static const struct hw_perf_counter_ops x86_perf_counter_ops = {
I
Ingo Molnar 已提交
572 573 574 575 576
	.hw_perf_counter_enable		= x86_perf_counter_enable,
	.hw_perf_counter_disable	= x86_perf_counter_disable,
	.hw_perf_counter_read		= x86_perf_counter_read,
};

577 578
const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter)
I
Ingo Molnar 已提交
579 580 581 582 583 584 585 586 587
{
	int err;

	err = __hw_perf_counter_init(counter);
	if (err)
		return NULL;

	return &x86_perf_counter_ops;
}