perf_counter.c 18.5 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Performance counter x86 architecture code
 *
 *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
 *
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/perf_counter.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
15
#include <linux/module.h>
I
Ingo Molnar 已提交
16 17 18
#include <linux/kdebug.h>
#include <linux/sched.h>

19
#include <asm/perf_counter.h>
I
Ingo Molnar 已提交
20 21 22 23 24 25 26
#include <asm/apic.h>

static bool perf_counters_initialized __read_mostly;

/*
 * Number of (generic) HW counters:
 */
27 28
static int nr_counters_generic __read_mostly;
static u64 perf_counter_mask __read_mostly;
29
static u64 counter_value_mask __read_mostly;
I
Ingo Molnar 已提交
30

31
static int nr_counters_fixed __read_mostly;
32

I
Ingo Molnar 已提交
33
struct cpu_hw_counters {
34 35
	struct perf_counter	*counters[X86_PMC_IDX_MAX];
	unsigned long		used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
36
	unsigned long		interrupts;
37
	u64			global_enable;
I
Ingo Molnar 已提交
38 39 40 41 42 43 44
};

/*
 * Intel PerfMon v3. Used on Core2 and later.
 */
static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);

45
static const int intel_perfmon_event_map[] =
I
Ingo Molnar 已提交
46
{
47
  [PERF_COUNT_CPU_CYCLES]		= 0x003c,
I
Ingo Molnar 已提交
48 49 50 51 52
  [PERF_COUNT_INSTRUCTIONS]		= 0x00c0,
  [PERF_COUNT_CACHE_REFERENCES]		= 0x4f2e,
  [PERF_COUNT_CACHE_MISSES]		= 0x412e,
  [PERF_COUNT_BRANCH_INSTRUCTIONS]	= 0x00c4,
  [PERF_COUNT_BRANCH_MISSES]		= 0x00c5,
53
  [PERF_COUNT_BUS_CYCLES]		= 0x013c,
I
Ingo Molnar 已提交
54 55
};

56
static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
I
Ingo Molnar 已提交
57

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/*
 * Propagate counter elapsed time into the generic counter.
 * Can only be executed on the CPU where the counter is active.
 * Returns the delta events processed.
 */
static void
x86_perf_counter_update(struct perf_counter *counter,
			struct hw_perf_counter *hwc, int idx)
{
	u64 prev_raw_count, new_raw_count, delta;

	/*
	 * Careful: an NMI might modify the previous counter value.
	 *
	 * Our tactic to handle this is to first atomically read and
	 * exchange a new raw count - then add that new-prev delta
	 * count to the generic counter atomically:
	 */
again:
	prev_raw_count = atomic64_read(&hwc->prev_count);
	rdmsrl(hwc->counter_base + idx, new_raw_count);

	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
					new_raw_count) != prev_raw_count)
		goto again;

	/*
	 * Now we have the new raw value and have updated the prev
	 * timestamp already. We can now calculate the elapsed delta
	 * (counter-)time and add that to the generic counter.
	 *
	 * Careful, not all hw sign-extends above the physical width
	 * of the count, so we do that by clipping the delta to 32 bits:
	 */
	delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);

	atomic64_add(delta, &counter->count);
	atomic64_sub(delta, &hwc->period_left);
}

I
Ingo Molnar 已提交
98 99 100
/*
 * Setup the hardware configuration for a given hw_event_type
 */
I
Ingo Molnar 已提交
101
static int __hw_perf_counter_init(struct perf_counter *counter)
I
Ingo Molnar 已提交
102
{
I
Ingo Molnar 已提交
103
	struct perf_counter_hw_event *hw_event = &counter->hw_event;
I
Ingo Molnar 已提交
104 105 106 107 108 109
	struct hw_perf_counter *hwc = &counter->hw;

	if (unlikely(!perf_counters_initialized))
		return -EINVAL;

	/*
110
	 * Generate PMC IRQs:
I
Ingo Molnar 已提交
111 112
	 * (keep 'enabled' bit clear for now)
	 */
113
	hwc->config = ARCH_PERFMON_EVENTSEL_INT;
I
Ingo Molnar 已提交
114 115

	/*
116
	 * Count user and OS events unless requested not to.
I
Ingo Molnar 已提交
117
	 */
118 119 120
	if (!hw_event->exclude_user)
		hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
	if (!hw_event->exclude_kernel)
I
Ingo Molnar 已提交
121
		hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
122 123 124 125 126 127 128

	/*
	 * If privileged enough, allow NMI events:
	 */
	hwc->nmi = 0;
	if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
		hwc->nmi = 1;
I
Ingo Molnar 已提交
129

I
Ingo Molnar 已提交
130
	hwc->irq_period		= hw_event->irq_period;
I
Ingo Molnar 已提交
131 132 133 134 135
	/*
	 * Intel PMCs cannot be accessed sanely above 32 bit width,
	 * so we install an artificial 1<<31 period regardless of
	 * the generic counter period:
	 */
136
	if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
I
Ingo Molnar 已提交
137 138
		hwc->irq_period = 0x7FFFFFFF;

139
	atomic64_set(&hwc->period_left, hwc->irq_period);
I
Ingo Molnar 已提交
140 141

	/*
142
	 * Raw event type provide the config in the event structure
I
Ingo Molnar 已提交
143
	 */
I
Ingo Molnar 已提交
144 145
	if (hw_event->raw) {
		hwc->config |= hw_event->type;
I
Ingo Molnar 已提交
146
	} else {
I
Ingo Molnar 已提交
147
		if (hw_event->type >= max_intel_perfmon_events)
I
Ingo Molnar 已提交
148 149 150 151
			return -EINVAL;
		/*
		 * The generic map:
		 */
I
Ingo Molnar 已提交
152
		hwc->config |= intel_perfmon_event_map[hw_event->type];
I
Ingo Molnar 已提交
153 154 155 156 157 158
	}
	counter->wakeup_pending = 0;

	return 0;
}

159
u64 hw_perf_save_disable(void)
160 161 162
{
	u64 ctrl;

163 164 165
	if (unlikely(!perf_counters_initialized))
		return 0;

166
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
167
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
168

169
	return ctrl;
I
Ingo Molnar 已提交
170
}
171
EXPORT_SYMBOL_GPL(hw_perf_save_disable);
I
Ingo Molnar 已提交
172

173 174
void hw_perf_restore(u64 ctrl)
{
175 176 177
	if (unlikely(!perf_counters_initialized))
		return;

178
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
179 180 181
}
EXPORT_SYMBOL_GPL(hw_perf_restore);

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
static inline void
__pmc_fixed_disable(struct perf_counter *counter,
		    struct hw_perf_counter *hwc, unsigned int __idx)
{
	int idx = __idx - X86_PMC_IDX_FIXED;
	u64 ctrl_val, mask;
	int err;

	mask = 0xfULL << (idx * 4);

	rdmsrl(hwc->config_base, ctrl_val);
	ctrl_val &= ~mask;
	err = checking_wrmsrl(hwc->config_base, ctrl_val);
}

197
static inline void
198
__pmc_generic_disable(struct perf_counter *counter,
199
			   struct hw_perf_counter *hwc, unsigned int idx)
200
{
201
	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
202 203 204
		__pmc_fixed_disable(counter, hwc, idx);
	else
		wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
205 206
}

207
static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
I
Ingo Molnar 已提交
208

209 210 211 212 213 214 215
/*
 * Set the next IRQ period, based on the hwc->period_left value.
 * To be called with the counter disabled in hw:
 */
static void
__hw_perf_counter_set_period(struct perf_counter *counter,
			     struct hw_perf_counter *hwc, int idx)
I
Ingo Molnar 已提交
216
{
217
	s64 left = atomic64_read(&hwc->period_left);
218
	s32 period = hwc->irq_period;
219
	int err;
220 221 222 223 224 225 226 227 228 229 230 231 232

	/*
	 * If we are way outside a reasoable range then just skip forward:
	 */
	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_set(&hwc->period_left, left);
	}
I
Ingo Molnar 已提交
233

234 235 236 237 238 239
	per_cpu(prev_left[idx], smp_processor_id()) = left;

	/*
	 * The hw counter starts counting from this counter offset,
	 * mark it to be able to extra future deltas:
	 */
240
	atomic64_set(&hwc->prev_count, (u64)-left);
241

242 243 244 245 246 247 248 249 250 251 252 253 254
	err = checking_wrmsrl(hwc->counter_base + idx,
			     (u64)(-left) & counter_value_mask);
}

static inline void
__pmc_fixed_enable(struct perf_counter *counter,
		   struct hw_perf_counter *hwc, unsigned int __idx)
{
	int idx = __idx - X86_PMC_IDX_FIXED;
	u64 ctrl_val, bits, mask;
	int err;

	/*
255 256 257
	 * Enable IRQ generation (0x8),
	 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
	 * if requested:
258
	 */
259 260 261
	bits = 0x8ULL;
	if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
		bits |= 0x2;
262 263 264 265 266 267 268 269 270
	if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
		bits |= 0x1;
	bits <<= (idx * 4);
	mask = 0xfULL << (idx * 4);

	rdmsrl(hwc->config_base, ctrl_val);
	ctrl_val &= ~mask;
	ctrl_val |= bits;
	err = checking_wrmsrl(hwc->config_base, ctrl_val);
271 272
}

273
static void
274
__pmc_generic_enable(struct perf_counter *counter,
275
			  struct hw_perf_counter *hwc, int idx)
276
{
277
	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
278 279 280 281
		__pmc_fixed_enable(counter, hwc, idx);
	else
		wrmsr(hwc->config_base + idx,
		      hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
I
Ingo Molnar 已提交
282 283
}

284 285
static int
fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
286
{
287 288 289 290 291 292 293 294 295 296 297 298 299 300
	unsigned int event;

	if (unlikely(hwc->nmi))
		return -1;

	event = hwc->config & ARCH_PERFMON_EVENT_MASK;

	if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_INSTRUCTIONS]))
		return X86_PMC_IDX_FIXED_INSTRUCTIONS;
	if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_CPU_CYCLES]))
		return X86_PMC_IDX_FIXED_CPU_CYCLES;
	if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_BUS_CYCLES]))
		return X86_PMC_IDX_FIXED_BUS_CYCLES;

301 302 303
	return -1;
}

304 305 306
/*
 * Find a PMC slot for the freshly enabled / scheduled in counter:
 */
307
static int pmc_generic_enable(struct perf_counter *counter)
I
Ingo Molnar 已提交
308 309 310
{
	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
	struct hw_perf_counter *hwc = &counter->hw;
311
	int idx;
I
Ingo Molnar 已提交
312

313 314 315 316 317 318 319 320
	idx = fixed_mode_idx(counter, hwc);
	if (idx >= 0) {
		/*
		 * Try to get the fixed counter, if that is already taken
		 * then try to get a generic counter:
		 */
		if (test_and_set_bit(idx, cpuc->used))
			goto try_generic;
321

322 323 324 325 326 327 328
		hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
		/*
		 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
		 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
		 */
		hwc->counter_base =
			MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
I
Ingo Molnar 已提交
329
		hwc->idx = idx;
330 331 332 333 334 335 336 337 338 339 340 341 342 343
	} else {
		idx = hwc->idx;
		/* Try to get the previous generic counter again */
		if (test_and_set_bit(idx, cpuc->used)) {
try_generic:
			idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
			if (idx == nr_counters_generic)
				return -EAGAIN;

			set_bit(idx, cpuc->used);
			hwc->idx = idx;
		}
		hwc->config_base  = MSR_ARCH_PERFMON_EVENTSEL0;
		hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
I
Ingo Molnar 已提交
344 345 346 347
	}

	perf_counters_lapic_init(hwc->nmi);

348
	__pmc_generic_disable(counter, hwc, idx);
I
Ingo Molnar 已提交
349

350
	cpuc->counters[idx] = counter;
351 352 353 354
	/*
	 * Make it visible before enabling the hw:
	 */
	smp_wmb();
355

356
	__hw_perf_counter_set_period(counter, hwc, idx);
357
	__pmc_generic_enable(counter, hwc, idx);
358 359

	return 0;
I
Ingo Molnar 已提交
360 361 362 363
}

void perf_counter_print_debug(void)
{
364
	u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
365
	struct cpu_hw_counters *cpuc;
366 367
	int cpu, idx;

368
	if (!nr_counters_generic)
369
		return;
I
Ingo Molnar 已提交
370 371 372 373

	local_irq_disable();

	cpu = smp_processor_id();
374
	cpuc = &per_cpu(cpu_hw_counters, cpu);
I
Ingo Molnar 已提交
375

376 377 378
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
379
	rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
I
Ingo Molnar 已提交
380 381 382 383 384

	printk(KERN_INFO "\n");
	printk(KERN_INFO "CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
	printk(KERN_INFO "CPU#%d: status:     %016llx\n", cpu, status);
	printk(KERN_INFO "CPU#%d: overflow:   %016llx\n", cpu, overflow);
385
	printk(KERN_INFO "CPU#%d: fixed:      %016llx\n", cpu, fixed);
386
	printk(KERN_INFO "CPU#%d: used:       %016llx\n", cpu, *(u64 *)cpuc->used);
I
Ingo Molnar 已提交
387

388
	for (idx = 0; idx < nr_counters_generic; idx++) {
389 390
		rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
		rdmsrl(MSR_ARCH_PERFMON_PERFCTR0  + idx, pmc_count);
I
Ingo Molnar 已提交
391

392
		prev_left = per_cpu(prev_left[idx], cpu);
I
Ingo Molnar 已提交
393

394
		printk(KERN_INFO "CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
I
Ingo Molnar 已提交
395
			cpu, idx, pmc_ctrl);
396
		printk(KERN_INFO "CPU#%d:   gen-PMC%d count: %016llx\n",
I
Ingo Molnar 已提交
397
			cpu, idx, pmc_count);
398
		printk(KERN_INFO "CPU#%d:   gen-PMC%d left:  %016llx\n",
399
			cpu, idx, prev_left);
I
Ingo Molnar 已提交
400
	}
401 402 403 404 405 406
	for (idx = 0; idx < nr_counters_fixed; idx++) {
		rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);

		printk(KERN_INFO "CPU#%d: fixed-PMC%d count: %016llx\n",
			cpu, idx, pmc_count);
	}
I
Ingo Molnar 已提交
407 408 409
	local_irq_enable();
}

410
static void pmc_generic_disable(struct perf_counter *counter)
I
Ingo Molnar 已提交
411 412 413 414 415
{
	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
	struct hw_perf_counter *hwc = &counter->hw;
	unsigned int idx = hwc->idx;

416
	__pmc_generic_disable(counter, hwc, idx);
I
Ingo Molnar 已提交
417 418

	clear_bit(idx, cpuc->used);
419
	cpuc->counters[idx] = NULL;
420 421 422 423 424
	/*
	 * Make sure the cleared pointer becomes visible before we
	 * (potentially) free the counter:
	 */
	smp_wmb();
I
Ingo Molnar 已提交
425

426 427 428 429 430
	/*
	 * Drain the remaining delta count out of a counter
	 * that we are disabling:
	 */
	x86_perf_counter_update(counter, hwc, idx);
I
Ingo Molnar 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
}

static void perf_store_irq_data(struct perf_counter *counter, u64 data)
{
	struct perf_data *irqdata = counter->irqdata;

	if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
		irqdata->overrun++;
	} else {
		u64 *p = (u64 *) &irqdata->data[irqdata->len];

		*p = data;
		irqdata->len += sizeof(u64);
	}
}

447
/*
448 449
 * Save and restart an expired counter. Called by NMI contexts,
 * so it has to be careful about preempting normal counter ops:
450
 */
I
Ingo Molnar 已提交
451 452 453 454 455
static void perf_save_and_restart(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	int idx = hwc->idx;

456 457
	x86_perf_counter_update(counter, hwc, idx);
	__hw_perf_counter_set_period(counter, hwc, idx);
458

459
	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
460
		__pmc_generic_enable(counter, hwc, idx);
I
Ingo Molnar 已提交
461 462 463
}

static void
464
perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
I
Ingo Molnar 已提交
465
{
466
	struct perf_counter *counter, *group_leader = sibling->group_leader;
I
Ingo Molnar 已提交
467

468
	/*
469
	 * Store sibling timestamps (if any):
470 471
	 */
	list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
472

473
		x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
474
		perf_store_irq_data(sibling, counter->hw_event.type);
475
		perf_store_irq_data(sibling, atomic64_read(&counter->count));
I
Ingo Molnar 已提交
476 477 478
	}
}

479 480 481 482 483
/*
 * Maximum interrupt frequency of 100KHz per CPU
 */
#define PERFMON_MAX_INTERRUPTS 100000/HZ

I
Ingo Molnar 已提交
484 485 486 487 488 489 490
/*
 * This handler is triggered by the local APIC, so the APIC IRQ handling
 * rules apply:
 */
static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
{
	int bit, cpu = smp_processor_id();
491
	u64 ack, status;
492
	struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
493

494
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
I
Ingo Molnar 已提交
495 496

	/* Disable counters globally */
497
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
I
Ingo Molnar 已提交
498 499
	ack_APIC_irq();

500 501 502 503
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	if (!status)
		goto out;

I
Ingo Molnar 已提交
504
again:
505
	inc_irq_stat(apic_perf_irqs);
I
Ingo Molnar 已提交
506
	ack = status;
507
	for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
508
		struct perf_counter *counter = cpuc->counters[bit];
I
Ingo Molnar 已提交
509 510 511 512 513 514 515

		clear_bit(bit, (unsigned long *) &status);
		if (!counter)
			continue;

		perf_save_and_restart(counter);

I
Ingo Molnar 已提交
516
		switch (counter->hw_event.record_type) {
I
Ingo Molnar 已提交
517 518 519 520 521 522 523 524 525 526 527
		case PERF_RECORD_SIMPLE:
			continue;
		case PERF_RECORD_IRQ:
			perf_store_irq_data(counter, instruction_pointer(regs));
			break;
		case PERF_RECORD_GROUP:
			perf_handle_group(counter, &status, &ack);
			break;
		}
		/*
		 * From NMI context we cannot call into the scheduler to
528
		 * do a task wakeup - but we mark these generic as
I
Ingo Molnar 已提交
529 530 531 532 533 534 535 536 537 538
		 * wakeup_pending and initate a wakeup callback:
		 */
		if (nmi) {
			counter->wakeup_pending = 1;
			set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
		} else {
			wake_up(&counter->waitq);
		}
	}

539
	wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
I
Ingo Molnar 已提交
540 541 542 543 544 545 546

	/*
	 * Repeat if there is more work to be done:
	 */
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	if (status)
		goto again;
547
out:
I
Ingo Molnar 已提交
548
	/*
549
	 * Restore - do not reenable when global enable is off or throttled:
I
Ingo Molnar 已提交
550
	 */
551
	if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
552 553 554 555 556 557
		wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
}

void perf_counter_unthrottle(void)
{
	struct cpu_hw_counters *cpuc;
558
	u64 global_enable;
559 560 561 562 563 564 565 566

	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
		return;

	if (unlikely(!perf_counters_initialized))
		return;

	cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
567
	if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
568
		if (printk_ratelimit())
569
			printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
570 571
		wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
	}
572 573 574 575
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable);
	if (unlikely(cpuc->global_enable && !global_enable))
		wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
	cpuc->interrupts = 0;
I
Ingo Molnar 已提交
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
}

void smp_perf_counter_interrupt(struct pt_regs *regs)
{
	irq_enter();
	apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
	__smp_perf_counter_interrupt(regs, 0);

	irq_exit();
}

/*
 * This handler is triggered by NMI contexts:
 */
void perf_counter_notify(struct pt_regs *regs)
{
	struct cpu_hw_counters *cpuc;
	unsigned long flags;
	int bit, cpu;

	local_irq_save(flags);
	cpu = smp_processor_id();
	cpuc = &per_cpu(cpu_hw_counters, cpu);

600 601
	for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) {
		struct perf_counter *counter = cpuc->counters[bit];
I
Ingo Molnar 已提交
602 603 604 605 606 607 608 609 610 611 612 613 614

		if (!counter)
			continue;

		if (counter->wakeup_pending) {
			counter->wakeup_pending = 0;
			wake_up(&counter->waitq);
		}
	}

	local_irq_restore(flags);
}

615
void perf_counters_lapic_init(int nmi)
I
Ingo Molnar 已提交
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
{
	u32 apic_val;

	if (!perf_counters_initialized)
		return;
	/*
	 * Enable the performance counter vector in the APIC LVT:
	 */
	apic_val = apic_read(APIC_LVTERR);

	apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
	if (nmi)
		apic_write(APIC_LVTPC, APIC_DM_NMI);
	else
		apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
	apic_write(APIC_LVTERR, apic_val);
}

static int __kprobes
perf_counter_nmi_handler(struct notifier_block *self,
			 unsigned long cmd, void *__args)
{
	struct die_args *args = __args;
	struct pt_regs *regs;

	if (likely(cmd != DIE_NMI_IPI))
		return NOTIFY_DONE;

	regs = args->regs;

	apic_write(APIC_LVTPC, APIC_DM_NMI);
	__smp_perf_counter_interrupt(regs, 1);

	return NOTIFY_STOP;
}

static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
653 654 655
	.notifier_call		= perf_counter_nmi_handler,
	.next			= NULL,
	.priority		= 1
I
Ingo Molnar 已提交
656 657 658 659 660 661
};

void __init init_hw_perf_counters(void)
{
	union cpuid10_eax eax;
	unsigned int ebx;
662 663
	unsigned int unused;
	union cpuid10_edx edx;
I
Ingo Molnar 已提交
664 665 666 667 668 669 670 671

	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
		return;

	/*
	 * Check whether the Architectural PerfMon supports
	 * Branch Misses Retired Event or not.
	 */
672
	cpuid(10, &eax.full, &ebx, &unused, &edx.full);
I
Ingo Molnar 已提交
673 674 675 676 677
	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
		return;

	printk(KERN_INFO "Intel Performance Monitoring support detected.\n");

678 679
	printk(KERN_INFO "... version:         %d\n", eax.split.version_id);
	printk(KERN_INFO "... num counters:    %d\n", eax.split.num_counters);
680 681 682
	nr_counters_generic = eax.split.num_counters;
	if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
		nr_counters_generic = X86_PMC_MAX_GENERIC;
I
Ingo Molnar 已提交
683
		WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
684
			nr_counters_generic, X86_PMC_MAX_GENERIC);
I
Ingo Molnar 已提交
685
	}
686 687
	perf_counter_mask = (1 << nr_counters_generic) - 1;
	perf_max_counters = nr_counters_generic;
I
Ingo Molnar 已提交
688

689
	printk(KERN_INFO "... bit width:       %d\n", eax.split.bit_width);
690 691 692
	counter_value_mask = (1ULL << eax.split.bit_width) - 1;
	printk(KERN_INFO "... value mask:      %016Lx\n", counter_value_mask);

693 694
	printk(KERN_INFO "... mask length:     %d\n", eax.split.mask_length);

695 696 697
	nr_counters_fixed = edx.split.num_counters_fixed;
	if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
		nr_counters_fixed = X86_PMC_MAX_FIXED;
698
		WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
699
			nr_counters_fixed, X86_PMC_MAX_FIXED);
700
	}
701 702 703
	printk(KERN_INFO "... fixed counters:  %d\n", nr_counters_fixed);

	perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
I
Ingo Molnar 已提交
704

705
	printk(KERN_INFO "... counter mask:    %016Lx\n", perf_counter_mask);
706 707
	perf_counters_initialized = true;

I
Ingo Molnar 已提交
708 709 710
	perf_counters_lapic_init(0);
	register_die_notifier(&perf_counter_nmi_notifier);
}
I
Ingo Molnar 已提交
711

712
static void pmc_generic_read(struct perf_counter *counter)
713 714 715 716
{
	x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
}

717
static const struct hw_perf_counter_ops x86_perf_counter_ops = {
I
Ingo Molnar 已提交
718 719 720
	.enable		= pmc_generic_enable,
	.disable	= pmc_generic_disable,
	.read		= pmc_generic_read,
I
Ingo Molnar 已提交
721 722
};

723 724
const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter)
I
Ingo Molnar 已提交
725 726 727 728 729 730 731 732 733
{
	int err;

	err = __hw_perf_counter_init(counter);
	if (err)
		return NULL;

	return &x86_perf_counter_ops;
}