perf_event.c 5.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 * Performance event support for s390x
 *
5
 *  Copyright IBM Corp. 2012, 2013
6 7 8 9 10 11 12
 *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
 */
#define KMSG_COMPONENT	"perf"
#define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt

#include <linux/kernel.h>
#include <linux/perf_event.h>
13
#include <linux/kvm_host.h>
14 15
#include <linux/percpu.h>
#include <linux/export.h>
16
#include <linux/seq_file.h>
17
#include <linux/spinlock.h>
18
#include <linux/sysfs.h>
19 20 21 22
#include <asm/irq.h>
#include <asm/cpu_mf.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
23
#include <asm/sysinfo.h>
24 25 26 27

const char *perf_pmu_name(void)
{
	if (cpum_cf_avail() || cpum_sf_avail())
28
		return "CPU-Measurement Facilities (CPU-MF)";
29 30 31 32 33 34 35 36 37 38
	return "pmu";
}
EXPORT_SYMBOL(perf_pmu_name);

int perf_num_counters(void)
{
	int num = 0;

	if (cpum_cf_avail())
		num += PERF_CPUM_CF_MAX_CTR;
39 40
	if (cpum_sf_avail())
		num += PERF_CPUM_SF_MAX_CTR;
41 42 43 44 45

	return num;
}
EXPORT_SYMBOL(perf_num_counters);

46 47 48 49 50 51 52 53 54 55 56 57 58 59
static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
{
	struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];

	if (!stack)
		return NULL;

	return (struct kvm_s390_sie_block *) stack->empty1[0];
}

static bool is_in_guest(struct pt_regs *regs)
{
	if (user_mode(regs))
		return false;
60
#if IS_ENABLED(CONFIG_KVM)
61 62 63 64
	return instruction_pointer(regs) == (unsigned long) &sie_exit;
#else
	return false;
#endif
65 66 67 68 69 70 71 72 73
}

static unsigned long guest_is_user_mode(struct pt_regs *regs)
{
	return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
}

static unsigned long instruction_pointer_guest(struct pt_regs *regs)
{
74
	return sie_block(regs)->gpsw.addr;
75 76 77 78 79 80 81 82 83 84 85 86 87 88
}

unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
	return is_in_guest(regs) ? instruction_pointer_guest(regs)
				 : instruction_pointer(regs);
}

static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
{
	return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
					: PERF_RECORD_MISC_GUEST_KERNEL;
}

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
static unsigned long perf_misc_flags_sf(struct pt_regs *regs)
{
	struct perf_sf_sde_regs *sde_regs;
	unsigned long flags;

	sde_regs = (struct perf_sf_sde_regs *) &regs->int_parm_long;
	if (sde_regs->in_guest)
		flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
					: PERF_RECORD_MISC_GUEST_KERNEL;
	else
		flags = user_mode(regs) ? PERF_RECORD_MISC_USER
					: PERF_RECORD_MISC_KERNEL;
	return flags;
}

104 105
unsigned long perf_misc_flags(struct pt_regs *regs)
{
106 107 108 109 110 111 112 113
	/* Check if the cpum_sf PMU has created the pt_regs structure.
	 * In this case, perf misc flags can be easily extracted.  Otherwise,
	 * do regular checks on the pt_regs content.
	 */
	if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA)
		if (!regs->gprs[15])
			return perf_misc_flags_sf(regs);

114 115 116 117 118 119 120
	if (is_in_guest(regs))
		return perf_misc_guest_flags(regs);

	return user_mode(regs) ? PERF_RECORD_MISC_USER
			       : PERF_RECORD_MISC_KERNEL;
}

121
static void print_debug_cf(void)
122 123
{
	struct cpumf_ctr_info cf_info;
124
	int cpu = smp_processor_id();
125 126

	memset(&cf_info, 0, sizeof(cf_info));
127
	if (!qctri(&cf_info))
128 129 130
		pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
			cpu, cf_info.cfvn, cf_info.csvn,
			cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
131
}
132

133 134 135 136 137 138
static void print_debug_sf(void)
{
	struct hws_qsi_info_block si;
	int cpu = smp_processor_id();

	memset(&si, 0, sizeof(si));
139
	if (qsi(&si))
140 141
		return;

142
	pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n",
143 144 145 146 147
		cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
		si.cpu_speed);

	if (si.as)
		pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i"
148
			" bsdes=%i tear=%016lx dear=%016lx\n", cpu,
149 150 151
			si.as, si.es, si.cs, si.bsdes, si.tear, si.dear);
	if (si.ad)
		pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i"
152
			" dsdes=%i tear=%016lx dear=%016lx\n", cpu,
153
			si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear);
154 155 156 157 158 159 160 161 162 163 164
}

void perf_event_print_debug(void)
{
	unsigned long flags;

	local_irq_save(flags);
	if (cpum_cf_avail())
		print_debug_cf();
	if (cpum_sf_avail())
		print_debug_sf();
165 166 167
	local_irq_restore(flags);
}

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
/* Service level infrastructure */
static void sl_print_counter(struct seq_file *m)
{
	struct cpumf_ctr_info ci;

	memset(&ci, 0, sizeof(ci));
	if (qctri(&ci))
		return;

	seq_printf(m, "CPU-MF: Counter facility: version=%u.%u "
		   "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl);
}

static void sl_print_sampling(struct seq_file *m)
{
	struct hws_qsi_info_block si;

	memset(&si, 0, sizeof(si));
	if (qsi(&si))
		return;

	if (!si.as && !si.ad)
		return;

	seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu"
		   " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate,
		   si.cpu_speed);
	if (si.as)
		seq_printf(m, "CPU-MF: Sampling facility: mode=basic"
			   " sample_size=%u\n", si.bsdes);
	if (si.ad)
		seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic"
			   " sample_size=%u\n", si.dsdes);
}

static void service_level_perf_print(struct seq_file *m,
				     struct service_level *sl)
{
	if (cpum_cf_avail())
		sl_print_counter(m);
	if (cpum_sf_avail())
		sl_print_sampling(m);
}

static struct service_level service_level_perf = {
	.seq_print = service_level_perf_print,
};

static int __init service_level_perf_register(void)
{
	return register_service_level(&service_level_perf);
}
arch_initcall(service_level_perf_register);

222
static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
223
{
224
	struct perf_callchain_entry_ctx *entry = data;
225 226 227

	perf_callchain_store(entry, address);
	return 0;
228 229
}

230
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
231 232 233 234
			   struct pt_regs *regs)
{
	if (user_mode(regs))
		return;
235
	dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
236
}
237

238
/* Perf definitions for PMU event attributes in sysfs */
239 240 241 242 243 244
ssize_t cpumf_events_sysfs_show(struct device *dev,
				struct device_attribute *attr, char *page)
{
	struct perf_pmu_events_attr *pmu_attr;

	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
245
	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
246
}