callchain.c 5.7 KB
Newer Older
1 2 3 4 5
/*
 * Performance events callchain code, extracted from core.c:
 *
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6
 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 8 9 10 11 12 13
 *  Copyright    2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 *
 * For licensing details see kernel-base/COPYING
 */

#include <linux/perf_event.h>
#include <linux/slab.h>
14 15
#include <linux/sched/task_stack.h>

16 17 18 19 20 21 22
#include "internal.h"

struct callchain_cpus_entries {
	struct rcu_head			rcu_head;
	struct perf_callchain_entry	*cpu_entries[0];
};

23
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
24
int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
25 26 27 28

static inline size_t perf_callchain_entry__sizeof(void)
{
	return (sizeof(struct perf_callchain_entry) +
29 30
		sizeof(__u64) * (sysctl_perf_event_max_stack +
				 sysctl_perf_event_max_contexts_per_stack));
31 32
}

33 34 35 36 37 38
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
static atomic_t nr_callchain_events;
static DEFINE_MUTEX(callchain_mutex);
static struct callchain_cpus_entries *callchain_cpus_entries;


39
__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
40 41 42 43
				  struct pt_regs *regs)
{
}

44
__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
				struct pt_regs *regs)
{
}

static void release_callchain_buffers_rcu(struct rcu_head *head)
{
	struct callchain_cpus_entries *entries;
	int cpu;

	entries = container_of(head, struct callchain_cpus_entries, rcu_head);

	for_each_possible_cpu(cpu)
		kfree(entries->cpu_entries[cpu]);

	kfree(entries);
}

static void release_callchain_buffers(void)
{
	struct callchain_cpus_entries *entries;

	entries = callchain_cpus_entries;
67
	RCU_INIT_POINTER(callchain_cpus_entries, NULL);
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
}

static int alloc_callchain_buffers(void)
{
	int cpu;
	int size;
	struct callchain_cpus_entries *entries;

	/*
	 * We can't use the percpu allocation API for data that can be
	 * accessed from NMI. Use a temporary manual per cpu allocation
	 * until that gets sorted out.
	 */
	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);

	entries = kzalloc(size, GFP_KERNEL);
	if (!entries)
		return -ENOMEM;

88
	size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108

	for_each_possible_cpu(cpu) {
		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
							 cpu_to_node(cpu));
		if (!entries->cpu_entries[cpu])
			goto fail;
	}

	rcu_assign_pointer(callchain_cpus_entries, entries);

	return 0;

fail:
	for_each_possible_cpu(cpu)
		kfree(entries->cpu_entries[cpu]);
	kfree(entries);

	return -ENOMEM;
}

109
int get_callchain_buffers(int event_max_stack)
110 111 112 113 114 115 116 117 118 119 120 121
{
	int err = 0;
	int count;

	mutex_lock(&callchain_mutex);

	count = atomic_inc_return(&nr_callchain_events);
	if (WARN_ON_ONCE(count < 1)) {
		err = -EINVAL;
		goto exit;
	}

122 123 124 125 126 127 128 129 130 131 132 133
	/*
	 * If requesting per event more than the global cap,
	 * return a different error to help userspace figure
	 * this out.
	 *
	 * And also do it here so that we have &callchain_mutex held.
	 */
	if (event_max_stack > sysctl_perf_event_max_stack) {
		err = -EOVERFLOW;
		goto exit;
	}

134 135
	if (count == 1)
		err = alloc_callchain_buffers();
136
exit:
137 138
	if (err)
		atomic_dec(&nr_callchain_events);
139

140 141
	mutex_unlock(&callchain_mutex);

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	return err;
}

void put_callchain_buffers(void)
{
	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
		release_callchain_buffers();
		mutex_unlock(&callchain_mutex);
	}
}

static struct perf_callchain_entry *get_callchain_entry(int *rctx)
{
	int cpu;
	struct callchain_cpus_entries *entries;

158
	*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
159 160 161 162 163 164 165 166 167
	if (*rctx == -1)
		return NULL;

	entries = rcu_dereference(callchain_cpus_entries);
	if (!entries)
		return NULL;

	cpu = smp_processor_id();

168 169
	return (((void *)entries->cpu_entries[cpu]) +
		(*rctx * perf_callchain_entry__sizeof()));
170 171 172 173 174
}

static void
put_callchain_entry(int rctx)
{
175
	put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
176 177
}

178 179
struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
180
		   u32 max_stack, bool crosstask, bool add_mark)
181 182
{
	struct perf_callchain_entry *entry;
183
	struct perf_callchain_entry_ctx ctx;
184 185
	int rctx;

186 187 188 189 190 191 192
	entry = get_callchain_entry(&rctx);
	if (rctx == -1)
		return NULL;

	if (!entry)
		goto exit_put;

193 194
	ctx.entry     = entry;
	ctx.max_stack = max_stack;
195
	ctx.nr	      = entry->nr = init_nr;
196 197
	ctx.contexts       = 0;
	ctx.contexts_maxed = false;
198

199
	if (kernel && !user_mode(regs)) {
200
		if (add_mark)
201
			perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
202
		perf_callchain_kernel(&ctx, regs);
203 204
	}

205 206 207 208 209 210 211 212 213
	if (user) {
		if (!user_mode(regs)) {
			if  (current->mm)
				regs = task_pt_regs(current);
			else
				regs = NULL;
		}

		if (regs) {
214 215
			mm_segment_t fs;

216
			if (crosstask)
217 218
				goto exit_put;

219
			if (add_mark)
220
				perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
221 222 223

			fs = get_fs();
			set_fs(USER_DS);
224
			perf_callchain_user(&ctx, regs);
225
			set_fs(fs);
226
		}
227 228 229 230 231 232 233
	}

exit_put:
	put_callchain_entry(rctx);

	return entry;
}
234

235 236 237 238
/*
 * Used for sysctl_perf_event_max_stack and
 * sysctl_perf_event_max_contexts_per_stack.
 */
239 240 241
int perf_event_max_stack_handler(struct ctl_table *table, int write,
				 void __user *buffer, size_t *lenp, loff_t *ppos)
{
242 243
	int *value = table->data;
	int new_value = *value, ret;
244 245 246 247 248 249 250 251 252 253 254
	struct ctl_table new_table = *table;

	new_table.data = &new_value;
	ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
	if (ret || !write)
		return ret;

	mutex_lock(&callchain_mutex);
	if (atomic_read(&nr_callchain_events))
		ret = -EBUSY;
	else
255
		*value = new_value;
256 257 258 259 260

	mutex_unlock(&callchain_mutex);

	return ret;
}