perf_event_p6.c 3.9 KB
Newer Older
1 2 3 4
#include <linux/perf_event.h>
#include <linux/types.h>

#include "perf_event.h"
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36

/*
 * Not sure about some of these
 */
static const u64 p6_perfmon_event_map[] =
{
  [PERF_COUNT_HW_CPU_CYCLES]		= 0x0079,
  [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
  [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0f2e,
  [PERF_COUNT_HW_CACHE_MISSES]		= 0x012e,
  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
  [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
  [PERF_COUNT_HW_BUS_CYCLES]		= 0x0062,
};

static u64 p6_pmu_event_map(int hw_event)
{
	return p6_perfmon_event_map[hw_event];
}

/*
 * Event setting that is specified not to count anything.
 * We use this to effectively disable a counter.
 *
 * L2_RQSTS with 0 MESI unit mask.
 */
#define P6_NOP_EVENT			0x0000002EULL

static struct event_constraint p6_event_constraints[] =
{
	INTEL_EVENT_CONSTRAINT(0xc1, 0x1),	/* FLOPS */
	INTEL_EVENT_CONSTRAINT(0x10, 0x1),	/* FP_COMP_OPS_EXE */
37
	INTEL_EVENT_CONSTRAINT(0x11, 0x2),	/* FP_ASSIST */
38 39 40 41 42 43 44 45 46 47 48 49
	INTEL_EVENT_CONSTRAINT(0x12, 0x2),	/* MUL */
	INTEL_EVENT_CONSTRAINT(0x13, 0x2),	/* DIV */
	INTEL_EVENT_CONSTRAINT(0x14, 0x1),	/* CYCLES_DIV_BUSY */
	EVENT_CONSTRAINT_END
};

static void p6_pmu_disable_all(void)
{
	u64 val;

	/* p6 only has one enable register */
	rdmsrl(MSR_P6_EVNTSEL0, val);
50
	val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
51 52 53
	wrmsrl(MSR_P6_EVNTSEL0, val);
}

54
static void p6_pmu_enable_all(int added)
55 56 57 58 59
{
	unsigned long val;

	/* p6 only has one enable register */
	rdmsrl(MSR_P6_EVNTSEL0, val);
60
	val |= ARCH_PERFMON_EVENTSEL_ENABLE;
61 62 63 64
	wrmsrl(MSR_P6_EVNTSEL0, val);
}

static inline void
65
p6_pmu_disable_event(struct perf_event *event)
66 67
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
68
	struct hw_perf_event *hwc = &event->hw;
69 70 71
	u64 val = P6_NOP_EVENT;

	if (cpuc->enabled)
72
		val |= ARCH_PERFMON_EVENTSEL_ENABLE;
73

74
	(void)wrmsrl_safe(hwc->config_base, val);
75 76
}

77
static void p6_pmu_enable_event(struct perf_event *event)
78 79
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
80
	struct hw_perf_event *hwc = &event->hw;
81 82 83 84
	u64 val;

	val = hwc->config;
	if (cpuc->enabled)
85
		val |= ARCH_PERFMON_EVENTSEL_ENABLE;
86

87
	(void)wrmsrl_safe(hwc->config_base, val);
88 89
}

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
PMU_FORMAT_ATTR(event,	"config:0-7"	);
PMU_FORMAT_ATTR(umask,	"config:8-15"	);
PMU_FORMAT_ATTR(edge,	"config:18"	);
PMU_FORMAT_ATTR(pc,	"config:19"	);
PMU_FORMAT_ATTR(inv,	"config:23"	);
PMU_FORMAT_ATTR(cmask,	"config:24-31"	);

static struct attribute *intel_p6_formats_attr[] = {
	&format_attr_event.attr,
	&format_attr_umask.attr,
	&format_attr_edge.attr,
	&format_attr_pc.attr,
	&format_attr_inv.attr,
	&format_attr_cmask.attr,
	NULL,
};

107
static __initconst const struct x86_pmu p6_pmu = {
108 109 110 111 112 113
	.name			= "p6",
	.handle_irq		= x86_pmu_handle_irq,
	.disable_all		= p6_pmu_disable_all,
	.enable_all		= p6_pmu_enable_all,
	.enable			= p6_pmu_enable_event,
	.disable		= p6_pmu_disable_event,
114
	.hw_config		= x86_pmu_hw_config,
115
	.schedule_events	= x86_schedule_events,
116 117 118 119 120 121 122
	.eventsel		= MSR_P6_EVNTSEL0,
	.perfctr		= MSR_P6_PERFCTR0,
	.event_map		= p6_pmu_event_map,
	.max_events		= ARRAY_SIZE(p6_perfmon_event_map),
	.apic			= 1,
	.max_period		= (1ULL << 31) - 1,
	.version		= 0,
123
	.num_counters		= 2,
124 125 126 127 128 129 130
	/*
	 * Events have 40 bits implemented. However they are designed such
	 * that bits [32-39] are sign extensions of bit 31. As such the
	 * effective width of a event for P6-like PMU is 32 bits only.
	 *
	 * See IA-32 Intel Architecture Software developer manual Vol 3B
	 */
131 132
	.cntval_bits		= 32,
	.cntval_mask		= (1ULL << 32) - 1,
133 134
	.get_event_constraints	= x86_get_event_constraints,
	.event_constraints	= p6_event_constraints,
135 136

	.format_attrs		= intel_p6_formats_attr,
137 138
};

139
__init int p6_pmu_init(void)
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
{
	switch (boot_cpu_data.x86_model) {
	case 1:
	case 3:  /* Pentium Pro */
	case 5:
	case 6:  /* Pentium II */
	case 7:
	case 8:
	case 11: /* Pentium III */
	case 9:
	case 13:
		/* Pentium M */
		break;
	default:
		pr_cont("unsupported p6 CPU model %d ",
			boot_cpu_data.x86_model);
		return -ENODEV;
	}

	x86_pmu = p6_pmu;

	return 0;
}