perf_counter.c 14.1 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Performance counter x86 architecture code
 *
 *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
 *
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/perf_counter.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
15
#include <linux/module.h>
I
Ingo Molnar 已提交
16 17 18
#include <linux/kdebug.h>
#include <linux/sched.h>

19
#include <asm/perf_counter.h>
I
Ingo Molnar 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
#include <asm/apic.h>

static bool perf_counters_initialized __read_mostly;

/*
 * Number of (generic) HW counters:
 */
static int nr_hw_counters __read_mostly;
static u32 perf_counter_mask __read_mostly;

/* No support for fixed function counters yet */

#define MAX_HW_COUNTERS		8

struct cpu_hw_counters {
	struct perf_counter	*counters[MAX_HW_COUNTERS];
	unsigned long		used[BITS_TO_LONGS(MAX_HW_COUNTERS)];
};

/*
 * Intel PerfMon v3. Used on Core2 and later.
 */
static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);

44
static const int intel_perfmon_event_map[] =
I
Ingo Molnar 已提交
45 46 47 48 49 50 51 52 53
{
  [PERF_COUNT_CYCLES]			= 0x003c,
  [PERF_COUNT_INSTRUCTIONS]		= 0x00c0,
  [PERF_COUNT_CACHE_REFERENCES]		= 0x4f2e,
  [PERF_COUNT_CACHE_MISSES]		= 0x412e,
  [PERF_COUNT_BRANCH_INSTRUCTIONS]	= 0x00c4,
  [PERF_COUNT_BRANCH_MISSES]		= 0x00c5,
};

54
static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
I
Ingo Molnar 已提交
55

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
/*
 * Propagate counter elapsed time into the generic counter.
 * Can only be executed on the CPU where the counter is active.
 * Returns the delta events processed.
 */
static void
x86_perf_counter_update(struct perf_counter *counter,
			struct hw_perf_counter *hwc, int idx)
{
	u64 prev_raw_count, new_raw_count, delta;

	/*
	 * Careful: an NMI might modify the previous counter value.
	 *
	 * Our tactic to handle this is to first atomically read and
	 * exchange a new raw count - then add that new-prev delta
	 * count to the generic counter atomically:
	 */
again:
	prev_raw_count = atomic64_read(&hwc->prev_count);
	rdmsrl(hwc->counter_base + idx, new_raw_count);

	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
					new_raw_count) != prev_raw_count)
		goto again;

	/*
	 * Now we have the new raw value and have updated the prev
	 * timestamp already. We can now calculate the elapsed delta
	 * (counter-)time and add that to the generic counter.
	 *
	 * Careful, not all hw sign-extends above the physical width
	 * of the count, so we do that by clipping the delta to 32 bits:
	 */
	delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);

	atomic64_add(delta, &counter->count);
	atomic64_sub(delta, &hwc->period_left);
}

I
Ingo Molnar 已提交
96 97 98
/*
 * Setup the hardware configuration for a given hw_event_type
 */
I
Ingo Molnar 已提交
99
static int __hw_perf_counter_init(struct perf_counter *counter)
I
Ingo Molnar 已提交
100
{
I
Ingo Molnar 已提交
101
	struct perf_counter_hw_event *hw_event = &counter->hw_event;
I
Ingo Molnar 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
	struct hw_perf_counter *hwc = &counter->hw;

	if (unlikely(!perf_counters_initialized))
		return -EINVAL;

	/*
	 * Count user events, and generate PMC IRQs:
	 * (keep 'enabled' bit clear for now)
	 */
	hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT;

	/*
	 * If privileged enough, count OS events too, and allow
	 * NMI events as well:
	 */
	hwc->nmi = 0;
	if (capable(CAP_SYS_ADMIN)) {
		hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
I
Ingo Molnar 已提交
120
		if (hw_event->nmi)
I
Ingo Molnar 已提交
121 122 123
			hwc->nmi = 1;
	}

I
Ingo Molnar 已提交
124 125
	hwc->config_base	= MSR_ARCH_PERFMON_EVENTSEL0;
	hwc->counter_base	= MSR_ARCH_PERFMON_PERFCTR0;
I
Ingo Molnar 已提交
126

I
Ingo Molnar 已提交
127
	hwc->irq_period		= hw_event->irq_period;
I
Ingo Molnar 已提交
128 129 130 131 132
	/*
	 * Intel PMCs cannot be accessed sanely above 32 bit width,
	 * so we install an artificial 1<<31 period regardless of
	 * the generic counter period:
	 */
133
	if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
I
Ingo Molnar 已提交
134 135
		hwc->irq_period = 0x7FFFFFFF;

136
	atomic64_set(&hwc->period_left, hwc->irq_period);
I
Ingo Molnar 已提交
137 138

	/*
139
	 * Raw event type provide the config in the event structure
I
Ingo Molnar 已提交
140
	 */
I
Ingo Molnar 已提交
141 142
	if (hw_event->raw) {
		hwc->config |= hw_event->type;
I
Ingo Molnar 已提交
143
	} else {
I
Ingo Molnar 已提交
144
		if (hw_event->type >= max_intel_perfmon_events)
I
Ingo Molnar 已提交
145 146 147 148
			return -EINVAL;
		/*
		 * The generic map:
		 */
I
Ingo Molnar 已提交
149
		hwc->config |= intel_perfmon_event_map[hw_event->type];
I
Ingo Molnar 已提交
150 151 152 153 154 155 156 157
	}
	counter->wakeup_pending = 0;

	return 0;
}

void hw_perf_enable_all(void)
{
158 159 160
	if (unlikely(!perf_counters_initialized))
		return;

161
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
I
Ingo Molnar 已提交
162 163
}

164
u64 hw_perf_save_disable(void)
165 166 167
{
	u64 ctrl;

168 169 170
	if (unlikely(!perf_counters_initialized))
		return 0;

171
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
I
Ingo Molnar 已提交
172
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
173

174
	return ctrl;
I
Ingo Molnar 已提交
175
}
176
EXPORT_SYMBOL_GPL(hw_perf_save_disable);
I
Ingo Molnar 已提交
177

178 179
void hw_perf_restore(u64 ctrl)
{
180 181 182
	if (unlikely(!perf_counters_initialized))
		return;

183 184 185 186
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
}
EXPORT_SYMBOL_GPL(hw_perf_restore);

187
static inline void
188 189
__x86_perf_counter_disable(struct perf_counter *counter,
			   struct hw_perf_counter *hwc, unsigned int idx)
190
{
191 192 193
	int err;

	err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
194 195
}

196
static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]);
I
Ingo Molnar 已提交
197

198 199 200 201 202 203 204
/*
 * Set the next IRQ period, based on the hwc->period_left value.
 * To be called with the counter disabled in hw:
 */
static void
__hw_perf_counter_set_period(struct perf_counter *counter,
			     struct hw_perf_counter *hwc, int idx)
I
Ingo Molnar 已提交
205
{
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
	s32 left = atomic64_read(&hwc->period_left);
	s32 period = hwc->irq_period;

	/*
	 * If we are way outside a reasoable range then just skip forward:
	 */
	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_set(&hwc->period_left, left);
	}
I
Ingo Molnar 已提交
221

222 223 224 225 226 227 228 229 230
	per_cpu(prev_left[idx], smp_processor_id()) = left;

	/*
	 * The hw counter starts counting from this counter offset,
	 * mark it to be able to extra future deltas:
	 */
	atomic64_set(&hwc->prev_count, (u64)(s64)-left);

	wrmsr(hwc->counter_base + idx, -left, 0);
231 232
}

233 234 235
static void
__x86_perf_counter_enable(struct perf_counter *counter,
			  struct hw_perf_counter *hwc, int idx)
236 237 238
{
	wrmsr(hwc->config_base + idx,
	      hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
I
Ingo Molnar 已提交
239 240
}

241 242 243
/*
 * Find a PMC slot for the freshly enabled / scheduled in counter:
 */
I
Ingo Molnar 已提交
244
static void x86_perf_counter_enable(struct perf_counter *counter)
I
Ingo Molnar 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258
{
	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
	struct hw_perf_counter *hwc = &counter->hw;
	int idx = hwc->idx;

	/* Try to get the previous counter again */
	if (test_and_set_bit(idx, cpuc->used)) {
		idx = find_first_zero_bit(cpuc->used, nr_hw_counters);
		set_bit(idx, cpuc->used);
		hwc->idx = idx;
	}

	perf_counters_lapic_init(hwc->nmi);

259
	__x86_perf_counter_disable(counter, hwc, idx);
I
Ingo Molnar 已提交
260 261

	cpuc->counters[idx] = counter;
262

263 264
	__hw_perf_counter_set_period(counter, hwc, idx);
	__x86_perf_counter_enable(counter, hwc, idx);
I
Ingo Molnar 已提交
265 266 267 268
}

void perf_counter_print_debug(void)
{
269
	u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left;
270 271 272 273
	int cpu, idx;

	if (!nr_hw_counters)
		return;
I
Ingo Molnar 已提交
274 275 276 277 278

	local_irq_disable();

	cpu = smp_processor_id();

279 280 281
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
I
Ingo Molnar 已提交
282 283 284 285 286 287 288

	printk(KERN_INFO "\n");
	printk(KERN_INFO "CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
	printk(KERN_INFO "CPU#%d: status:     %016llx\n", cpu, status);
	printk(KERN_INFO "CPU#%d: overflow:   %016llx\n", cpu, overflow);

	for (idx = 0; idx < nr_hw_counters; idx++) {
289 290
		rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
		rdmsrl(MSR_ARCH_PERFMON_PERFCTR0  + idx, pmc_count);
I
Ingo Molnar 已提交
291

292
		prev_left = per_cpu(prev_left[idx], cpu);
I
Ingo Molnar 已提交
293 294 295 296 297

		printk(KERN_INFO "CPU#%d: PMC%d ctrl:  %016llx\n",
			cpu, idx, pmc_ctrl);
		printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n",
			cpu, idx, pmc_count);
298 299
		printk(KERN_INFO "CPU#%d: PMC%d left:  %016llx\n",
			cpu, idx, prev_left);
I
Ingo Molnar 已提交
300 301 302 303
	}
	local_irq_enable();
}

I
Ingo Molnar 已提交
304
static void x86_perf_counter_disable(struct perf_counter *counter)
I
Ingo Molnar 已提交
305 306 307 308 309
{
	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
	struct hw_perf_counter *hwc = &counter->hw;
	unsigned int idx = hwc->idx;

310
	__x86_perf_counter_disable(counter, hwc, idx);
I
Ingo Molnar 已提交
311 312 313 314

	clear_bit(idx, cpuc->used);
	cpuc->counters[idx] = NULL;

315 316 317 318 319
	/*
	 * Drain the remaining delta count out of a counter
	 * that we are disabling:
	 */
	x86_perf_counter_update(counter, hwc, idx);
I
Ingo Molnar 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
}

static void perf_store_irq_data(struct perf_counter *counter, u64 data)
{
	struct perf_data *irqdata = counter->irqdata;

	if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
		irqdata->overrun++;
	} else {
		u64 *p = (u64 *) &irqdata->data[irqdata->len];

		*p = data;
		irqdata->len += sizeof(u64);
	}
}

336
/*
337 338
 * Save and restart an expired counter. Called by NMI contexts,
 * so it has to be careful about preempting normal counter ops:
339
 */
I
Ingo Molnar 已提交
340 341 342 343
static void perf_save_and_restart(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	int idx = hwc->idx;
344
	u64 pmc_ctrl;
I
Ingo Molnar 已提交
345

346
	rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
I
Ingo Molnar 已提交
347

348 349
	x86_perf_counter_update(counter, hwc, idx);
	__hw_perf_counter_set_period(counter, hwc, idx);
350 351

	if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE)
352
		__x86_perf_counter_enable(counter, hwc, idx);
I
Ingo Molnar 已提交
353 354 355
}

static void
356
perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
I
Ingo Molnar 已提交
357
{
358
	struct perf_counter *counter, *group_leader = sibling->group_leader;
I
Ingo Molnar 已提交
359

360
	/*
361
	 * Store sibling timestamps (if any):
362 363
	 */
	list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
364
		x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
365
		perf_store_irq_data(sibling, counter->hw_event.type);
366
		perf_store_irq_data(sibling, atomic64_read(&counter->count));
I
Ingo Molnar 已提交
367 368 369 370 371 372 373 374 375 376
	}
}

/*
 * This handler is triggered by the local APIC, so the APIC IRQ handling
 * rules apply:
 */
static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
{
	int bit, cpu = smp_processor_id();
377
	u64 ack, status, saved_global;
I
Ingo Molnar 已提交
378
	struct cpu_hw_counters *cpuc;
379 380

	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
I
Ingo Molnar 已提交
381 382 383 384 385 386 387

	/* Disable counters globally */
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
	ack_APIC_irq();

	cpuc = &per_cpu(cpu_hw_counters, cpu);

388 389 390 391
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	if (!status)
		goto out;

I
Ingo Molnar 已提交
392 393 394 395 396 397 398 399 400 401 402
again:
	ack = status;
	for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
		struct perf_counter *counter = cpuc->counters[bit];

		clear_bit(bit, (unsigned long *) &status);
		if (!counter)
			continue;

		perf_save_and_restart(counter);

I
Ingo Molnar 已提交
403
		switch (counter->hw_event.record_type) {
I
Ingo Molnar 已提交
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
		case PERF_RECORD_SIMPLE:
			continue;
		case PERF_RECORD_IRQ:
			perf_store_irq_data(counter, instruction_pointer(regs));
			break;
		case PERF_RECORD_GROUP:
			perf_handle_group(counter, &status, &ack);
			break;
		}
		/*
		 * From NMI context we cannot call into the scheduler to
		 * do a task wakeup - but we mark these counters as
		 * wakeup_pending and initate a wakeup callback:
		 */
		if (nmi) {
			counter->wakeup_pending = 1;
			set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
		} else {
			wake_up(&counter->waitq);
		}
	}

	wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0);

	/*
	 * Repeat if there is more work to be done:
	 */
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	if (status)
		goto again;
434
out:
I
Ingo Molnar 已提交
435
	/*
436
	 * Restore - do not reenable when global enable is off:
I
Ingo Molnar 已提交
437
	 */
438
	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0);
I
Ingo Molnar 已提交
439 440 441 442 443
}

void smp_perf_counter_interrupt(struct pt_regs *regs)
{
	irq_enter();
444
	inc_irq_stat(apic_perf_irqs);
I
Ingo Molnar 已提交
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
	apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
	__smp_perf_counter_interrupt(regs, 0);

	irq_exit();
}

/*
 * This handler is triggered by NMI contexts:
 */
void perf_counter_notify(struct pt_regs *regs)
{
	struct cpu_hw_counters *cpuc;
	unsigned long flags;
	int bit, cpu;

	local_irq_save(flags);
	cpu = smp_processor_id();
	cpuc = &per_cpu(cpu_hw_counters, cpu);

	for_each_bit(bit, cpuc->used, nr_hw_counters) {
		struct perf_counter *counter = cpuc->counters[bit];

		if (!counter)
			continue;

		if (counter->wakeup_pending) {
			counter->wakeup_pending = 0;
			wake_up(&counter->waitq);
		}
	}

	local_irq_restore(flags);
}

void __cpuinit perf_counters_lapic_init(int nmi)
{
	u32 apic_val;

	if (!perf_counters_initialized)
		return;
	/*
	 * Enable the performance counter vector in the APIC LVT:
	 */
	apic_val = apic_read(APIC_LVTERR);

	apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
	if (nmi)
		apic_write(APIC_LVTPC, APIC_DM_NMI);
	else
		apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
	apic_write(APIC_LVTERR, apic_val);
}

static int __kprobes
perf_counter_nmi_handler(struct notifier_block *self,
			 unsigned long cmd, void *__args)
{
	struct die_args *args = __args;
	struct pt_regs *regs;

	if (likely(cmd != DIE_NMI_IPI))
		return NOTIFY_DONE;

	regs = args->regs;

	apic_write(APIC_LVTPC, APIC_DM_NMI);
	__smp_perf_counter_interrupt(regs, 1);

	return NOTIFY_STOP;
}

static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
	.notifier_call		= perf_counter_nmi_handler
};

void __init init_hw_perf_counters(void)
{
	union cpuid10_eax eax;
	unsigned int unused;
	unsigned int ebx;

	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
		return;

	/*
	 * Check whether the Architectural PerfMon supports
	 * Branch Misses Retired Event or not.
	 */
	cpuid(10, &(eax.full), &ebx, &unused, &unused);
	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
		return;

	printk(KERN_INFO "Intel Performance Monitoring support detected.\n");

	printk(KERN_INFO "... version:      %d\n", eax.split.version_id);
	printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters);
	nr_hw_counters = eax.split.num_counters;
	if (nr_hw_counters > MAX_HW_COUNTERS) {
		nr_hw_counters = MAX_HW_COUNTERS;
		WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
			nr_hw_counters, MAX_HW_COUNTERS);
	}
	perf_counter_mask = (1 << nr_hw_counters) - 1;
	perf_max_counters = nr_hw_counters;

	printk(KERN_INFO "... bit_width:    %d\n", eax.split.bit_width);
	printk(KERN_INFO "... mask_length:  %d\n", eax.split.mask_length);

553 554
	perf_counters_initialized = true;

I
Ingo Molnar 已提交
555 556 557
	perf_counters_lapic_init(0);
	register_die_notifier(&perf_counter_nmi_notifier);
}
I
Ingo Molnar 已提交
558

559 560 561 562 563
static void x86_perf_counter_read(struct perf_counter *counter)
{
	x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
}

564
static const struct hw_perf_counter_ops x86_perf_counter_ops = {
I
Ingo Molnar 已提交
565 566 567 568 569
	.hw_perf_counter_enable		= x86_perf_counter_enable,
	.hw_perf_counter_disable	= x86_perf_counter_disable,
	.hw_perf_counter_read		= x86_perf_counter_read,
};

570 571
const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter)
I
Ingo Molnar 已提交
572 573 574 575 576 577 578 579 580
{
	int err;

	err = __hw_perf_counter_init(counter);
	if (err)
		return NULL;

	return &x86_perf_counter_ops;
}