ftrace.c 8.4 KB
Newer Older
1 2 3 4
/*
 * Code for replacing ftrace calls with jumps.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5
 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6
 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7 8 9 10 11 12 13 14
 *
 * Thanks goes to Steven Rostedt for writing the original x86 version.
 */

#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/ftrace.h>

15 16
#include <asm/asm.h>
#include <asm/asm-offsets.h>
17 18
#include <asm/cacheflush.h>
#include <asm/uasm.h>
19

20
#include <asm-generic/sections.h>
21

22 23 24 25 26
#ifdef CONFIG_DYNAMIC_FTRACE

#define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */

27
#define INSN_NOP 0x00000000	/* nop */
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#define INSN_JAL(addr)	\
	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))

static unsigned int insn_jal_ftrace_caller __read_mostly;
static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;

static inline void ftrace_dyn_arch_init_insns(void)
{
	u32 *buf;
	unsigned int v1;

	/* lui v1, hi16_mcount */
	v1 = 3;
	buf = (u32 *)&insn_lui_v1_hi16_mcount;
	UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);

	/* jal (ftrace_caller + 8), jump over the first two instruction */
	buf = (u32 *)&insn_jal_ftrace_caller;
	uasm_i_jal(&buf, (FTRACE_ADDR + 8));

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	/* j ftrace_graph_caller */
	buf = (u32 *)&insn_j_ftrace_graph_caller;
	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
#endif
}
55

56 57 58 59 60 61 62 63 64 65 66 67 68 69
/*
 * Check if the address is in kernel space
 *
 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
 */
static inline int in_kernel_space(unsigned long ip)
{
	if (ip >= (unsigned long)_stext &&
	    ip <= (unsigned long)_etext)
		return 1;
	return 0;
}

70 71
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
72 73 74 75 76 77 78
	int faulted;

	/* *(unsigned int *)ip = new_code; */
	safe_store_code(new_code, ip, faulted);

	if (unlikely(faulted))
		return -EFAULT;
79 80 81 82 83 84

	flush_icache_range(ip, ip + 8);

	return 0;
}

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
/*
 * The details about the calling site of mcount on MIPS
 *
 * 1. For kernel:
 *
 * move at, ra
 * jal _mcount		--> nop
 *
 * 2. For modules:
 *
 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
 *
 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
 * addiu v1, v1, low_16bit_of_mcount
 * move at, ra
 * move $12, ra_address
 * jalr v1
 *  sub sp, sp, 8
 *                                  1: offset = 5 instructions
 * 2.2 For the Other situations
 *
 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
 * addiu v1, v1, low_16bit_of_mcount
 * move at, ra
 * jalr v1
 *  nop | move $12, ra_address | sub sp, sp, 8
 *                                  1: offset = 4 instructions
 */

#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
#define MCOUNT_OFFSET_INSNS 5
#else
#define MCOUNT_OFFSET_INSNS 4
#endif
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)

121 122 123 124 125 126
int ftrace_make_nop(struct module *mod,
		    struct dyn_ftrace *rec, unsigned long addr)
{
	unsigned int new;
	unsigned long ip = rec->ip;

127
	/*
128 129
	 * If ip is in kernel space, no long call, otherwise, long call is
	 * needed.
130
	 */
131 132
	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;

133 134 135 136 137 138 139 140
	return ftrace_modify_code(ip, new);
}

int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
	unsigned int new;
	unsigned long ip = rec->ip;

141 142
	new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
		insn_lui_v1_hi16_mcount;
143 144 145 146 147 148 149 150 151 152

	return ftrace_modify_code(ip, new);
}

#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))

int ftrace_update_ftrace_func(ftrace_func_t func)
{
	unsigned int new;

153
	new = INSN_JAL((unsigned long)func);
154 155 156 157 158 159

	return ftrace_modify_code(FTRACE_CALL_IP, new);
}

int __init ftrace_dyn_arch_init(void *data)
{
160 161 162 163 164 165
	/* Encode the instructions when booting */
	ftrace_dyn_arch_init_insns();

	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);

166 167 168 169 170
	/* The return code is retured via data */
	*(unsigned long *)data = 0;

	return 0;
}
171
#endif	/* CONFIG_DYNAMIC_FTRACE */
172 173 174

#ifdef CONFIG_FUNCTION_GRAPH_TRACER

175 176 177 178 179 180 181 182
#ifdef CONFIG_DYNAMIC_FTRACE

extern void ftrace_graph_call(void);
#define FTRACE_GRAPH_CALL_IP	((unsigned long)(&ftrace_graph_call))

int ftrace_enable_ftrace_graph_caller(void)
{
	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
183
			insn_j_ftrace_graph_caller);
184 185 186 187
}

int ftrace_disable_ftrace_graph_caller(void)
{
188
	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
189 190
}

191
#endif	/* CONFIG_DYNAMIC_FTRACE */
192

193
#ifndef KBUILD_MCOUNT_RA_ADDRESS
194

195 196 197 198
#define S_RA_SP	(0xafbf << 16)	/* s{d,w} ra, offset(sp) */
#define S_R_SP	(0xafb0 << 16)  /* s{d,w} R, offset(sp) */
#define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */

199 200
unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
201
{
202
	unsigned long sp, ip, tmp;
203
	unsigned int code;
204
	int faulted;
205

206
	/*
207
	 * For module, move the ip from the return address after the
208 209
	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
	 * kernel, move after the instruction "move ra, at"(offset is 16)
210
	 */
211
	ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
212

213 214 215 216
	/*
	 * search the text until finding the non-store instruction or "s{d,w}
	 * ra, offset(sp)" instruction
	 */
217
	do {
218 219 220 221 222
		/* get the code at "ip": code = *(unsigned int *)ip; */
		safe_load_code(code, ip, faulted);

		if (unlikely(faulted))
			return 0;
223 224
		/*
		 * If we hit the non-store instruction before finding where the
225
		 * ra is stored, then this is a leaf function and it does not
226 227
		 * store the ra on the stack
		 */
228
		if ((code & S_R_SP) != S_R_SP)
229
			return parent_ra_addr;
230

231 232 233
		/* Move to the next instruction */
		ip -= 4;
	} while ((code & S_RA_SP) != S_RA_SP);
234 235

	sp = fp + (code & OFFSET_MASK);
236

237 238
	/* tmp = *(unsigned long *)sp; */
	safe_load_stack(tmp, sp, faulted);
239 240
	if (unlikely(faulted))
		return 0;
241

242
	if (tmp == old_parent_ra)
243 244 245 246
		return sp;
	return 0;
}

247
#endif	/* !KBUILD_MCOUNT_RA_ADDRESS */
248

249 250 251 252
/*
 * Hook the return address and push it in the stack of return addrs
 * in current thread info.
 */
253
void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
254 255
			   unsigned long fp)
{
256
	unsigned long old_parent_ra;
257 258 259
	struct ftrace_graph_ent trace;
	unsigned long return_hooker = (unsigned long)
	    &return_to_handler;
260
	int faulted, insns;
261 262 263 264

	if (unlikely(atomic_read(&current->tracing_graph_pause)))
		return;

265
	/*
266 267
	 * "parent_ra_addr" is the stack address saved the return address of
	 * the caller of _mcount.
268 269 270 271 272 273
	 *
	 * if the gcc < 4.5, a leaf function does not save the return address
	 * in the stack address, so, we "emulate" one in _mcount's stack space,
	 * and hijack it directly, but for a non-leaf function, it save the
	 * return address to the its own stack space, we can not hijack it
	 * directly, but need to find the real stack address,
274
	 * ftrace_get_parent_addr() does it!
275 276 277 278 279
	 *
	 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
	 * non-leaf function, the location of the return address will be saved
	 * to $12 for us, and for a leaf function, only put a zero into $12. we
	 * do it in ftrace_graph_caller of mcount.S.
280 281
	 */

282 283
	/* old_parent_ra = *parent_ra_addr; */
	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
284 285
	if (unlikely(faulted))
		goto out;
286
#ifndef KBUILD_MCOUNT_RA_ADDRESS
287 288
	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
			old_parent_ra, (unsigned long)parent_ra_addr, fp);
289 290 291 292
	/*
	 * If fails when getting the stack address of the non-leaf function's
	 * ra, stop function graph tracer and return
	 */
293
	if (parent_ra_addr == 0)
294
		goto out;
295
#endif
296 297
	/* *parent_ra_addr = return_hooker; */
	safe_store_stack(return_hooker, parent_ra_addr, faulted);
298 299
	if (unlikely(faulted))
		goto out;
300

301 302 303
	if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
	    == -EBUSY) {
		*parent_ra_addr = old_parent_ra;
304 305 306
		return;
	}

307 308 309 310 311 312 313 314
	/*
	 * Get the recorded ip of the current mcount calling site in the
	 * __mcount_loc section, which will be used to filter the function
	 * entries configured through the tracing/set_graph_function interface.
	 */

	insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
	trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
315 316 317 318

	/* Only trace if the calling function expects to */
	if (!ftrace_graph_entry(&trace)) {
		current->curr_ret_stack--;
319
		*parent_ra_addr = old_parent_ra;
320
	}
321 322 323 324
	return;
out:
	ftrace_graph_stop();
	WARN_ON(1);
325
}
326
#endif	/* CONFIG_FUNCTION_GRAPH_TRACER */