perf_counter.c 14.7 KB
Newer Older
I
Ingo Molnar 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Performance counter x86 architecture code
 *
 *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
 *
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/perf_counter.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
15
#include <linux/module.h>
I
Ingo Molnar 已提交
16 17 18
#include <linux/kdebug.h>
#include <linux/sched.h>

19
#include <asm/perf_counter.h>
I
Ingo Molnar 已提交
20 21 22 23 24 25 26
#include <asm/apic.h>

static bool perf_counters_initialized __read_mostly;

/*
 * Number of (generic) HW counters:
 */
27 28
static int nr_counters_generic __read_mostly;
static u64 perf_counter_mask __read_mostly;
I
Ingo Molnar 已提交
29

30
static int nr_counters_fixed __read_mostly;
31

I
Ingo Molnar 已提交
32
struct cpu_hw_counters {
33 34
	struct perf_counter	*counters[X86_PMC_IDX_MAX];
	unsigned long		used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
I
Ingo Molnar 已提交
35 36 37 38 39 40 41
};

/*
 * Intel PerfMon v3. Used on Core2 and later.
 */
static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);

42
static const int intel_perfmon_event_map[] =
I
Ingo Molnar 已提交
43 44 45 46 47 48 49 50 51
{
  [PERF_COUNT_CYCLES]			= 0x003c,
  [PERF_COUNT_INSTRUCTIONS]		= 0x00c0,
  [PERF_COUNT_CACHE_REFERENCES]		= 0x4f2e,
  [PERF_COUNT_CACHE_MISSES]		= 0x412e,
  [PERF_COUNT_BRANCH_INSTRUCTIONS]	= 0x00c4,
  [PERF_COUNT_BRANCH_MISSES]		= 0x00c5,
};

52
static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
I
Ingo Molnar 已提交
53

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/*
 * Propagate counter elapsed time into the generic counter.
 * Can only be executed on the CPU where the counter is active.
 * Returns the delta events processed.
 */
static void
x86_perf_counter_update(struct perf_counter *counter,
			struct hw_perf_counter *hwc, int idx)
{
	u64 prev_raw_count, new_raw_count, delta;

	/*
	 * Careful: an NMI might modify the previous counter value.
	 *
	 * Our tactic to handle this is to first atomically read and
	 * exchange a new raw count - then add that new-prev delta
	 * count to the generic counter atomically:
	 */
again:
	prev_raw_count = atomic64_read(&hwc->prev_count);
	rdmsrl(hwc->counter_base + idx, new_raw_count);

	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
					new_raw_count) != prev_raw_count)
		goto again;

	/*
	 * Now we have the new raw value and have updated the prev
	 * timestamp already. We can now calculate the elapsed delta
	 * (counter-)time and add that to the generic counter.
	 *
	 * Careful, not all hw sign-extends above the physical width
	 * of the count, so we do that by clipping the delta to 32 bits:
	 */
	delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);

	atomic64_add(delta, &counter->count);
	atomic64_sub(delta, &hwc->period_left);
}

I
Ingo Molnar 已提交
94 95 96
/*
 * Setup the hardware configuration for a given hw_event_type
 */
I
Ingo Molnar 已提交
97
static int __hw_perf_counter_init(struct perf_counter *counter)
I
Ingo Molnar 已提交
98
{
I
Ingo Molnar 已提交
99
	struct perf_counter_hw_event *hw_event = &counter->hw_event;
I
Ingo Molnar 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	struct hw_perf_counter *hwc = &counter->hw;

	if (unlikely(!perf_counters_initialized))
		return -EINVAL;

	/*
	 * Count user events, and generate PMC IRQs:
	 * (keep 'enabled' bit clear for now)
	 */
	hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT;

	/*
	 * If privileged enough, count OS events too, and allow
	 * NMI events as well:
	 */
	hwc->nmi = 0;
	if (capable(CAP_SYS_ADMIN)) {
		hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
I
Ingo Molnar 已提交
118
		if (hw_event->nmi)
I
Ingo Molnar 已提交
119 120 121
			hwc->nmi = 1;
	}

I
Ingo Molnar 已提交
122 123
	hwc->config_base	= MSR_ARCH_PERFMON_EVENTSEL0;
	hwc->counter_base	= MSR_ARCH_PERFMON_PERFCTR0;
I
Ingo Molnar 已提交
124

I
Ingo Molnar 已提交
125
	hwc->irq_period		= hw_event->irq_period;
I
Ingo Molnar 已提交
126 127 128 129 130
	/*
	 * Intel PMCs cannot be accessed sanely above 32 bit width,
	 * so we install an artificial 1<<31 period regardless of
	 * the generic counter period:
	 */
131
	if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
I
Ingo Molnar 已提交
132 133
		hwc->irq_period = 0x7FFFFFFF;

134
	atomic64_set(&hwc->period_left, hwc->irq_period);
I
Ingo Molnar 已提交
135 136

	/*
137
	 * Raw event type provide the config in the event structure
I
Ingo Molnar 已提交
138
	 */
I
Ingo Molnar 已提交
139 140
	if (hw_event->raw) {
		hwc->config |= hw_event->type;
I
Ingo Molnar 已提交
141
	} else {
I
Ingo Molnar 已提交
142
		if (hw_event->type >= max_intel_perfmon_events)
I
Ingo Molnar 已提交
143 144 145 146
			return -EINVAL;
		/*
		 * The generic map:
		 */
I
Ingo Molnar 已提交
147
		hwc->config |= intel_perfmon_event_map[hw_event->type];
I
Ingo Molnar 已提交
148 149 150 151 152 153 154 155
	}
	counter->wakeup_pending = 0;

	return 0;
}

void hw_perf_enable_all(void)
{
156 157 158
	if (unlikely(!perf_counters_initialized))
		return;

159
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask);
I
Ingo Molnar 已提交
160 161
}

162
u64 hw_perf_save_disable(void)
163 164 165
{
	u64 ctrl;

166 167 168
	if (unlikely(!perf_counters_initialized))
		return 0;

169
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
170
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
171

172
	return ctrl;
I
Ingo Molnar 已提交
173
}
174
EXPORT_SYMBOL_GPL(hw_perf_save_disable);
I
Ingo Molnar 已提交
175

176 177
void hw_perf_restore(u64 ctrl)
{
178 179 180
	if (unlikely(!perf_counters_initialized))
		return;

181
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
182 183 184
}
EXPORT_SYMBOL_GPL(hw_perf_restore);

185
static inline void
186
__pmc_generic_disable(struct perf_counter *counter,
187
			   struct hw_perf_counter *hwc, unsigned int idx)
188
{
189 190 191
	int err;

	err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
192 193
}

194
static DEFINE_PER_CPU(u64, prev_left[X86_PMC_MAX_GENERIC]);
I
Ingo Molnar 已提交
195

196 197 198 199 200 201 202
/*
 * Set the next IRQ period, based on the hwc->period_left value.
 * To be called with the counter disabled in hw:
 */
static void
__hw_perf_counter_set_period(struct perf_counter *counter,
			     struct hw_perf_counter *hwc, int idx)
I
Ingo Molnar 已提交
203
{
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
	s32 left = atomic64_read(&hwc->period_left);
	s32 period = hwc->irq_period;

	/*
	 * If we are way outside a reasoable range then just skip forward:
	 */
	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_set(&hwc->period_left, left);
	}
I
Ingo Molnar 已提交
219

220 221 222 223 224 225 226 227 228
	per_cpu(prev_left[idx], smp_processor_id()) = left;

	/*
	 * The hw counter starts counting from this counter offset,
	 * mark it to be able to extra future deltas:
	 */
	atomic64_set(&hwc->prev_count, (u64)(s64)-left);

	wrmsr(hwc->counter_base + idx, -left, 0);
229 230
}

231
static void
232
__pmc_generic_enable(struct perf_counter *counter,
233
			  struct hw_perf_counter *hwc, int idx)
234 235 236
{
	wrmsr(hwc->config_base + idx,
	      hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
I
Ingo Molnar 已提交
237 238
}

239 240 241 242 243
static int fixed_mode_idx(struct hw_perf_counter *hwc)
{
	return -1;
}

244 245 246
/*
 * Find a PMC slot for the freshly enabled / scheduled in counter:
 */
247
static void pmc_generic_enable(struct perf_counter *counter)
I
Ingo Molnar 已提交
248 249 250 251 252 253 254
{
	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
	struct hw_perf_counter *hwc = &counter->hw;
	int idx = hwc->idx;

	/* Try to get the previous counter again */
	if (test_and_set_bit(idx, cpuc->used)) {
255
		idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
I
Ingo Molnar 已提交
256 257 258 259 260 261
		set_bit(idx, cpuc->used);
		hwc->idx = idx;
	}

	perf_counters_lapic_init(hwc->nmi);

262
	__pmc_generic_disable(counter, hwc, idx);
I
Ingo Molnar 已提交
263

264
	cpuc->counters[idx] = counter;
265

266
	__hw_perf_counter_set_period(counter, hwc, idx);
267
	__pmc_generic_enable(counter, hwc, idx);
I
Ingo Molnar 已提交
268 269 270 271
}

void perf_counter_print_debug(void)
{
272
	u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left;
273 274
	int cpu, idx;

275
	if (!nr_counters_generic)
276
		return;
I
Ingo Molnar 已提交
277 278 279 280 281

	local_irq_disable();

	cpu = smp_processor_id();

282 283 284
	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
I
Ingo Molnar 已提交
285 286 287 288 289 290

	printk(KERN_INFO "\n");
	printk(KERN_INFO "CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
	printk(KERN_INFO "CPU#%d: status:     %016llx\n", cpu, status);
	printk(KERN_INFO "CPU#%d: overflow:   %016llx\n", cpu, overflow);

291
	for (idx = 0; idx < nr_counters_generic; idx++) {
292 293
		rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
		rdmsrl(MSR_ARCH_PERFMON_PERFCTR0  + idx, pmc_count);
I
Ingo Molnar 已提交
294

295
		prev_left = per_cpu(prev_left[idx], cpu);
I
Ingo Molnar 已提交
296 297 298 299 300

		printk(KERN_INFO "CPU#%d: PMC%d ctrl:  %016llx\n",
			cpu, idx, pmc_ctrl);
		printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n",
			cpu, idx, pmc_count);
301 302
		printk(KERN_INFO "CPU#%d: PMC%d left:  %016llx\n",
			cpu, idx, prev_left);
I
Ingo Molnar 已提交
303 304 305 306
	}
	local_irq_enable();
}

307
static void pmc_generic_disable(struct perf_counter *counter)
I
Ingo Molnar 已提交
308 309 310 311 312
{
	struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
	struct hw_perf_counter *hwc = &counter->hw;
	unsigned int idx = hwc->idx;

313
	__pmc_generic_disable(counter, hwc, idx);
I
Ingo Molnar 已提交
314 315

	clear_bit(idx, cpuc->used);
316
	cpuc->counters[idx] = NULL;
I
Ingo Molnar 已提交
317

318 319 320 321 322
	/*
	 * Drain the remaining delta count out of a counter
	 * that we are disabling:
	 */
	x86_perf_counter_update(counter, hwc, idx);
I
Ingo Molnar 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
}

static void perf_store_irq_data(struct perf_counter *counter, u64 data)
{
	struct perf_data *irqdata = counter->irqdata;

	if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
		irqdata->overrun++;
	} else {
		u64 *p = (u64 *) &irqdata->data[irqdata->len];

		*p = data;
		irqdata->len += sizeof(u64);
	}
}

339
/*
340 341
 * Save and restart an expired counter. Called by NMI contexts,
 * so it has to be careful about preempting normal counter ops:
342
 */
I
Ingo Molnar 已提交
343 344 345 346
static void perf_save_and_restart(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	int idx = hwc->idx;
347
	u64 pmc_ctrl;
I
Ingo Molnar 已提交
348

349
	rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
I
Ingo Molnar 已提交
350

351 352
	x86_perf_counter_update(counter, hwc, idx);
	__hw_perf_counter_set_period(counter, hwc, idx);
353 354

	if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE)
355
		__pmc_generic_enable(counter, hwc, idx);
I
Ingo Molnar 已提交
356 357 358
}

static void
359
perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
I
Ingo Molnar 已提交
360
{
361
	struct perf_counter *counter, *group_leader = sibling->group_leader;
I
Ingo Molnar 已提交
362

363
	/*
364
	 * Store sibling timestamps (if any):
365 366
	 */
	list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
367
		x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
368
		perf_store_irq_data(sibling, counter->hw_event.type);
369
		perf_store_irq_data(sibling, atomic64_read(&counter->count));
I
Ingo Molnar 已提交
370 371 372 373 374 375 376 377 378 379
	}
}

/*
 * This handler is triggered by the local APIC, so the APIC IRQ handling
 * rules apply:
 */
static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
{
	int bit, cpu = smp_processor_id();
380
	u64 ack, status, saved_global;
I
Ingo Molnar 已提交
381
	struct cpu_hw_counters *cpuc;
382 383

	rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
I
Ingo Molnar 已提交
384 385

	/* Disable counters globally */
386
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
I
Ingo Molnar 已提交
387 388 389 390
	ack_APIC_irq();

	cpuc = &per_cpu(cpu_hw_counters, cpu);

391 392 393 394
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	if (!status)
		goto out;

I
Ingo Molnar 已提交
395 396
again:
	ack = status;
397 398
	for_each_bit(bit, (unsigned long *) &status, nr_counters_generic) {
		struct perf_counter *counter = cpuc->counters[bit];
I
Ingo Molnar 已提交
399 400 401 402 403 404 405

		clear_bit(bit, (unsigned long *) &status);
		if (!counter)
			continue;

		perf_save_and_restart(counter);

I
Ingo Molnar 已提交
406
		switch (counter->hw_event.record_type) {
I
Ingo Molnar 已提交
407 408 409 410 411 412 413 414 415 416 417
		case PERF_RECORD_SIMPLE:
			continue;
		case PERF_RECORD_IRQ:
			perf_store_irq_data(counter, instruction_pointer(regs));
			break;
		case PERF_RECORD_GROUP:
			perf_handle_group(counter, &status, &ack);
			break;
		}
		/*
		 * From NMI context we cannot call into the scheduler to
418
		 * do a task wakeup - but we mark these generic as
I
Ingo Molnar 已提交
419 420 421 422 423 424 425 426 427 428
		 * wakeup_pending and initate a wakeup callback:
		 */
		if (nmi) {
			counter->wakeup_pending = 1;
			set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
		} else {
			wake_up(&counter->waitq);
		}
	}

429
	wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
I
Ingo Molnar 已提交
430 431 432 433 434 435 436

	/*
	 * Repeat if there is more work to be done:
	 */
	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
	if (status)
		goto again;
437
out:
I
Ingo Molnar 已提交
438
	/*
439
	 * Restore - do not reenable when global enable is off:
I
Ingo Molnar 已提交
440
	 */
441
	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global);
I
Ingo Molnar 已提交
442 443 444 445 446
}

void smp_perf_counter_interrupt(struct pt_regs *regs)
{
	irq_enter();
447
	inc_irq_stat(apic_perf_irqs);
I
Ingo Molnar 已提交
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
	apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
	__smp_perf_counter_interrupt(regs, 0);

	irq_exit();
}

/*
 * This handler is triggered by NMI contexts:
 */
void perf_counter_notify(struct pt_regs *regs)
{
	struct cpu_hw_counters *cpuc;
	unsigned long flags;
	int bit, cpu;

	local_irq_save(flags);
	cpu = smp_processor_id();
	cpuc = &per_cpu(cpu_hw_counters, cpu);

467 468
	for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) {
		struct perf_counter *counter = cpuc->counters[bit];
I
Ingo Molnar 已提交
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526

		if (!counter)
			continue;

		if (counter->wakeup_pending) {
			counter->wakeup_pending = 0;
			wake_up(&counter->waitq);
		}
	}

	local_irq_restore(flags);
}

void __cpuinit perf_counters_lapic_init(int nmi)
{
	u32 apic_val;

	if (!perf_counters_initialized)
		return;
	/*
	 * Enable the performance counter vector in the APIC LVT:
	 */
	apic_val = apic_read(APIC_LVTERR);

	apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
	if (nmi)
		apic_write(APIC_LVTPC, APIC_DM_NMI);
	else
		apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
	apic_write(APIC_LVTERR, apic_val);
}

static int __kprobes
perf_counter_nmi_handler(struct notifier_block *self,
			 unsigned long cmd, void *__args)
{
	struct die_args *args = __args;
	struct pt_regs *regs;

	if (likely(cmd != DIE_NMI_IPI))
		return NOTIFY_DONE;

	regs = args->regs;

	apic_write(APIC_LVTPC, APIC_DM_NMI);
	__smp_perf_counter_interrupt(regs, 1);

	return NOTIFY_STOP;
}

static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
	.notifier_call		= perf_counter_nmi_handler
};

void __init init_hw_perf_counters(void)
{
	union cpuid10_eax eax;
	unsigned int ebx;
527 528
	unsigned int unused;
	union cpuid10_edx edx;
I
Ingo Molnar 已提交
529 530 531 532 533 534 535 536

	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
		return;

	/*
	 * Check whether the Architectural PerfMon supports
	 * Branch Misses Retired Event or not.
	 */
537
	cpuid(10, &eax.full, &ebx, &unused, &edx.full);
I
Ingo Molnar 已提交
538 539 540 541 542
	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
		return;

	printk(KERN_INFO "Intel Performance Monitoring support detected.\n");

543 544
	printk(KERN_INFO "... version:         %d\n", eax.split.version_id);
	printk(KERN_INFO "... num counters:    %d\n", eax.split.num_counters);
545 546 547
	nr_counters_generic = eax.split.num_counters;
	if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
		nr_counters_generic = X86_PMC_MAX_GENERIC;
I
Ingo Molnar 已提交
548
		WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
549
			nr_counters_generic, X86_PMC_MAX_GENERIC);
I
Ingo Molnar 已提交
550
	}
551 552
	perf_counter_mask = (1 << nr_counters_generic) - 1;
	perf_max_counters = nr_counters_generic;
I
Ingo Molnar 已提交
553

554 555 556
	printk(KERN_INFO "... bit width:       %d\n", eax.split.bit_width);
	printk(KERN_INFO "... mask length:     %d\n", eax.split.mask_length);

557 558 559
	nr_counters_fixed = edx.split.num_counters_fixed;
	if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
		nr_counters_fixed = X86_PMC_MAX_FIXED;
560
		WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
561
			nr_counters_fixed, X86_PMC_MAX_FIXED);
562
	}
563 564 565
	printk(KERN_INFO "... fixed counters:  %d\n", nr_counters_fixed);

	perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
I
Ingo Molnar 已提交
566

567
	printk(KERN_INFO "... counter mask:    %016Lx\n", perf_counter_mask);
568 569
	perf_counters_initialized = true;

I
Ingo Molnar 已提交
570 571 572
	perf_counters_lapic_init(0);
	register_die_notifier(&perf_counter_nmi_notifier);
}
I
Ingo Molnar 已提交
573

574
static void pmc_generic_read(struct perf_counter *counter)
575 576 577 578
{
	x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
}

579
static const struct hw_perf_counter_ops x86_perf_counter_ops = {
580 581 582
	.hw_perf_counter_enable		= pmc_generic_enable,
	.hw_perf_counter_disable	= pmc_generic_disable,
	.hw_perf_counter_read		= pmc_generic_read,
I
Ingo Molnar 已提交
583 584
};

585 586
const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter)
I
Ingo Molnar 已提交
587 588 589 590 591 592 593 594 595
{
	int err;

	err = __hw_perf_counter_init(counter);
	if (err)
		return NULL;

	return &x86_perf_counter_ops;
}