cpu_buffer.c 6.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/**
 * @file cpu_buffer.c
 *
 * @remark Copyright 2002 OProfile authors
 * @remark Read the file COPYING
 *
 * @author John Levon <levon@movementarian.org>
 *
 * Each CPU has a local buffer that stores PC value/event
 * pairs. We also log context switches when we notice them.
 * Eventually each CPU's buffer is processed into the global
 * event buffer by sync_buffer().
 *
 * We use a local buffer for two reasons: an NMI or similar
 * interrupt cannot synchronise, and high sampling rates
 * would lead to catastrophic global synchronisation if
 * a global buffer was used.
 */

#include <linux/sched.h>
#include <linux/oprofile.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
 
#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
#include "oprof.h"

30
DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
L
Linus Torvalds 已提交
31

D
David Howells 已提交
32
static void wq_sync_buffer(struct work_struct *work);
L
Linus Torvalds 已提交
33 34 35 36 37 38 39 40

#define DEFAULT_TIMER_EXPIRE (HZ / 10)
static int work_enabled;

void free_cpu_buffers(void)
{
	int i;
 
41
	for_each_online_cpu(i)
42
		vfree(per_cpu(cpu_buffer, i).buffer);
L
Linus Torvalds 已提交
43
}
44

L
Linus Torvalds 已提交
45 46 47 48 49 50 51
int alloc_cpu_buffers(void)
{
	int i;
 
	unsigned long buffer_size = fs_cpu_buffer_size;
 
	for_each_online_cpu(i) {
52
		struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
L
Linus Torvalds 已提交
53
 
54 55
		b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
			cpu_to_node(i));
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63 64 65 66
		if (!b->buffer)
			goto fail;
 
		b->last_task = NULL;
		b->last_is_kernel = -1;
		b->tracing = 0;
		b->buffer_size = buffer_size;
		b->tail_pos = 0;
		b->head_pos = 0;
		b->sample_received = 0;
		b->sample_lost_overflow = 0;
67 68
		b->backtrace_aborted = 0;
		b->sample_invalid_eip = 0;
L
Linus Torvalds 已提交
69
		b->cpu = i;
D
David Howells 已提交
70
		INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
L
Linus Torvalds 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
	}
	return 0;

fail:
	free_cpu_buffers();
	return -ENOMEM;
}

void start_cpu_work(void)
{
	int i;

	work_enabled = 1;

	for_each_online_cpu(i) {
86
		struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
L
Linus Torvalds 已提交
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102

		/*
		 * Spread the work by 1 jiffy per cpu so they dont all
		 * fire at once.
		 */
		schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
	}
}

void end_cpu_work(void)
{
	int i;

	work_enabled = 0;

	for_each_online_cpu(i) {
103
		struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
L
Linus Torvalds 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147

		cancel_delayed_work(&b->work);
	}

	flush_scheduled_work();
}

/* Resets the cpu buffer to a sane state. */
void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
{
	/* reset these to invalid values; the next sample
	 * collected will populate the buffer with proper
	 * values to initialize the buffer
	 */
	cpu_buf->last_is_kernel = -1;
	cpu_buf->last_task = NULL;
}

/* compute number of available slots in cpu_buffer queue */
static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
{
	unsigned long head = b->head_pos;
	unsigned long tail = b->tail_pos;

	if (tail > head)
		return (tail - head) - 1;

	return tail + (b->buffer_size - head) - 1;
}

static void increment_head(struct oprofile_cpu_buffer * b)
{
	unsigned long new_head = b->head_pos + 1;

	/* Ensure anything written to the slot before we
	 * increment is visible */
	wmb();

	if (new_head < b->buffer_size)
		b->head_pos = new_head;
	else
		b->head_pos = 0;
}

148
static inline void
L
Linus Torvalds 已提交
149 150 151 152 153 154 155 156 157
add_sample(struct oprofile_cpu_buffer * cpu_buf,
           unsigned long pc, unsigned long event)
{
	struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
	entry->eip = pc;
	entry->event = event;
	increment_head(cpu_buf);
}

158
static inline void
L
Linus Torvalds 已提交
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
{
	add_sample(buffer, ESCAPE_CODE, value);
}

/* This must be safe from any context. It's safe writing here
 * because of the head/tail separation of the writer and reader
 * of the CPU buffer.
 *
 * is_kernel is needed because on some architectures you cannot
 * tell if you are in kernel or user space simply by looking at
 * pc. We tag this in the buffer by generating kernel enter/exit
 * events whenever is_kernel changes
 */
static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
		      int is_kernel, unsigned long event)
{
	struct task_struct * task;

	cpu_buf->sample_received++;

180 181 182 183 184
	if (pc == ESCAPE_CODE) {
		cpu_buf->sample_invalid_eip++;
		return 0;
	}

L
Linus Torvalds 已提交
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	if (nr_available_slots(cpu_buf) < 3) {
		cpu_buf->sample_lost_overflow++;
		return 0;
	}

	is_kernel = !!is_kernel;

	task = current;

	/* notice a switch from user->kernel or vice versa */
	if (cpu_buf->last_is_kernel != is_kernel) {
		cpu_buf->last_is_kernel = is_kernel;
		add_code(cpu_buf, is_kernel);
	}

	/* notice a task switch */
	if (cpu_buf->last_task != task) {
		cpu_buf->last_task = task;
		add_code(cpu_buf, (unsigned long)task);
	}
 
	add_sample(cpu_buf, pc, event);
	return 1;
}

static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
{
	if (nr_available_slots(cpu_buf) < 4) {
		cpu_buf->sample_lost_overflow++;
		return 0;
	}

	add_code(cpu_buf, CPU_TRACE_BEGIN);
	cpu_buf->tracing = 1;
	return 1;
}

static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
{
	cpu_buf->tracing = 0;
}

227 228
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
				unsigned long event, int is_kernel)
L
Linus Torvalds 已提交
229
{
230
	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

	if (!backtrace_depth) {
		log_sample(cpu_buf, pc, is_kernel, event);
		return;
	}

	if (!oprofile_begin_trace(cpu_buf))
		return;

	/* if log_sample() fail we can't backtrace since we lost the source
	 * of this event */
	if (log_sample(cpu_buf, pc, is_kernel, event))
		oprofile_ops.backtrace(regs, backtrace_depth);
	oprofile_end_trace(cpu_buf);
}

247 248 249 250 251 252 253 254
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
	int is_kernel = !user_mode(regs);
	unsigned long pc = profile_pc(regs);

	oprofile_add_ext_sample(pc, regs, event, is_kernel);
}

L
Linus Torvalds 已提交
255 256
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
257
	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
L
Linus Torvalds 已提交
258 259 260 261 262
	log_sample(cpu_buf, pc, is_kernel, event);
}

void oprofile_add_trace(unsigned long pc)
{
263
	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
L
Linus Torvalds 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291

	if (!cpu_buf->tracing)
		return;

	if (nr_available_slots(cpu_buf) < 1) {
		cpu_buf->tracing = 0;
		cpu_buf->sample_lost_overflow++;
		return;
	}

	/* broken frame can give an eip with the same value as an escape code,
	 * abort the trace if we get it */
	if (pc == ESCAPE_CODE) {
		cpu_buf->tracing = 0;
		cpu_buf->backtrace_aborted++;
		return;
	}

	add_sample(cpu_buf, pc, 0);
}

/*
 * This serves to avoid cpu buffer overflow, and makes sure
 * the task mortuary progresses
 *
 * By using schedule_delayed_work_on and then schedule_delayed_work
 * we guarantee this will stay on the correct cpu
 */
D
David Howells 已提交
292
static void wq_sync_buffer(struct work_struct *work)
L
Linus Torvalds 已提交
293
{
D
David Howells 已提交
294 295
	struct oprofile_cpu_buffer * b =
		container_of(work, struct oprofile_cpu_buffer, work.work);
L
Linus Torvalds 已提交
296 297 298 299 300 301 302 303 304 305
	if (b->cpu != smp_processor_id()) {
		printk("WQ on CPU%d, prefer CPU%d\n",
		       smp_processor_id(), b->cpu);
	}
	sync_buffer(b->cpu);

	/* don't re-add the work if we're shutting down */
	if (work_enabled)
		schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
}