perf_event_p6.c 3.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
#ifdef CONFIG_CPU_SUP_INTEL

/*
 * Not sure about some of these
 */
static const u64 p6_perfmon_event_map[] =
{
  [PERF_COUNT_HW_CPU_CYCLES]		= 0x0079,
  [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
  [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0f2e,
  [PERF_COUNT_HW_CACHE_MISSES]		= 0x012e,
  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
  [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
  [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
};

static u64 p6_pmu_event_map(int hw_event)
{
	return p6_perfmon_event_map[hw_event];
}

/*
 * Event setting that is specified not to count anything.
 * We use this to effectively disable a counter.
 *
 * L2_RQSTS with 0 MESI unit mask.
 */
#define P6_NOP_EVENT			0x0000002EULL

static struct event_constraint p6_event_constraints[] =
{
	INTEL_EVENT_CONSTRAINT(0xc1, 0x1),	/* FLOPS */
	INTEL_EVENT_CONSTRAINT(0x10, 0x1),	/* FP_COMP_OPS_EXE */
	INTEL_EVENT_CONSTRAINT(0x11, 0x1),	/* FP_ASSIST */
	INTEL_EVENT_CONSTRAINT(0x12, 0x2),	/* MUL */
	INTEL_EVENT_CONSTRAINT(0x13, 0x2),	/* DIV */
	INTEL_EVENT_CONSTRAINT(0x14, 0x1),	/* CYCLES_DIV_BUSY */
	EVENT_CONSTRAINT_END
};

static void p6_pmu_disable_all(void)
{
	u64 val;

	/* p6 only has one enable register */
	rdmsrl(MSR_P6_EVNTSEL0, val);
47
	val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
48 49 50
	wrmsrl(MSR_P6_EVNTSEL0, val);
}

51
static void p6_pmu_enable_all(int added)
52 53 54 55 56
{
	unsigned long val;

	/* p6 only has one enable register */
	rdmsrl(MSR_P6_EVNTSEL0, val);
57
	val |= ARCH_PERFMON_EVENTSEL_ENABLE;
58 59 60 61
	wrmsrl(MSR_P6_EVNTSEL0, val);
}

static inline void
62
p6_pmu_disable_event(struct perf_event *event)
63 64
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
65
	struct hw_perf_event *hwc = &event->hw;
66 67 68
	u64 val = P6_NOP_EVENT;

	if (cpuc->enabled)
69
		val |= ARCH_PERFMON_EVENTSEL_ENABLE;
70

71
	(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
72 73
}

74
static void p6_pmu_enable_event(struct perf_event *event)
75 76
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
77
	struct hw_perf_event *hwc = &event->hw;
78 79 80 81
	u64 val;

	val = hwc->config;
	if (cpuc->enabled)
82
		val |= ARCH_PERFMON_EVENTSEL_ENABLE;
83

84
	(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
85 86
}

87
static __initconst const struct x86_pmu p6_pmu = {
88 89 90 91 92 93
	.name			= "p6",
	.handle_irq		= x86_pmu_handle_irq,
	.disable_all		= p6_pmu_disable_all,
	.enable_all		= p6_pmu_enable_all,
	.enable			= p6_pmu_enable_event,
	.disable		= p6_pmu_disable_event,
94
	.hw_config		= x86_pmu_hw_config,
95
	.schedule_events	= x86_schedule_events,
96 97 98 99 100 101 102
	.eventsel		= MSR_P6_EVNTSEL0,
	.perfctr		= MSR_P6_PERFCTR0,
	.event_map		= p6_pmu_event_map,
	.max_events		= ARRAY_SIZE(p6_perfmon_event_map),
	.apic			= 1,
	.max_period		= (1ULL << 31) - 1,
	.version		= 0,
103
	.num_counters		= 2,
104 105 106 107 108 109 110
	/*
	 * Events have 40 bits implemented. However they are designed such
	 * that bits [32-39] are sign extensions of bit 31. As such the
	 * effective width of a event for P6-like PMU is 32 bits only.
	 *
	 * See IA-32 Intel Architecture Software developer manual Vol 3B
	 */
111 112
	.cntval_bits		= 32,
	.cntval_mask		= (1ULL << 32) - 1,
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
	.get_event_constraints	= x86_get_event_constraints,
	.event_constraints	= p6_event_constraints,
};

static __init int p6_pmu_init(void)
{
	switch (boot_cpu_data.x86_model) {
	case 1:
	case 3:  /* Pentium Pro */
	case 5:
	case 6:  /* Pentium II */
	case 7:
	case 8:
	case 11: /* Pentium III */
	case 9:
	case 13:
		/* Pentium M */
		break;
	default:
		pr_cont("unsupported p6 CPU model %d ",
			boot_cpu_data.x86_model);
		return -ENODEV;
	}

	x86_pmu = p6_pmu;

	return 0;
}

#endif /* CONFIG_CPU_SUP_INTEL */