ftrace.c 10.2 KB
Newer Older
1 2 3 4
/*
 * Code for replacing ftrace calls with jumps.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5
 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6
 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7 8 9 10 11 12 13
 *
 * Thanks goes to Steven Rostedt for writing the original x86 version.
 */

#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/ftrace.h>
R
Ralf Baechle 已提交
14
#include <linux/syscalls.h>
15

16 17
#include <asm/asm.h>
#include <asm/asm-offsets.h>
18
#include <asm/cacheflush.h>
R
Ralf Baechle 已提交
19
#include <asm/syscall.h>
20
#include <asm/uasm.h>
R
Ralf Baechle 已提交
21
#include <asm/unistd.h>
22

23
#include <asm-generic/sections.h>
24

25 26 27 28 29 30
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
#define MCOUNT_OFFSET_INSNS 5
#else
#define MCOUNT_OFFSET_INSNS 4
#endif

31 32
#ifdef CONFIG_DYNAMIC_FTRACE

33 34 35 36 37 38
/* Arch override because MIPS doesn't need to run this from stop_machine() */
void arch_ftrace_update_code(int command)
{
	ftrace_modify_all_code(command);
}

39 40
#endif

41 42 43 44 45 46 47 48 49 50 51 52 53 54
/*
 * Check if the address is in kernel space
 *
 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
 */
static inline int in_kernel_space(unsigned long ip)
{
	if (ip >= (unsigned long)_stext &&
	    ip <= (unsigned long)_etext)
		return 1;
	return 0;
}

55 56 57 58
#ifdef CONFIG_DYNAMIC_FTRACE

#define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
59
#define JUMP_RANGE_MASK ((1UL << 28) - 1)
60

61
#define INSN_NOP 0x00000000	/* nop */
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
#define INSN_JAL(addr)	\
	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))

static unsigned int insn_jal_ftrace_caller __read_mostly;
static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;

static inline void ftrace_dyn_arch_init_insns(void)
{
	u32 *buf;
	unsigned int v1;

	/* lui v1, hi16_mcount */
	v1 = 3;
	buf = (u32 *)&insn_lui_v1_hi16_mcount;
	UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);

	/* jal (ftrace_caller + 8), jump over the first two instruction */
	buf = (u32 *)&insn_jal_ftrace_caller;
81
	uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
82 83 84 85

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	/* j ftrace_graph_caller */
	buf = (u32 *)&insn_j_ftrace_graph_caller;
86
	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
87 88
#endif
}
89 90 91

static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
92 93 94 95 96 97 98
	int faulted;

	/* *(unsigned int *)ip = new_code; */
	safe_store_code(new_code, ip, faulted);

	if (unlikely(faulted))
		return -EFAULT;
99 100 101 102 103 104

	flush_icache_range(ip, ip + 8);

	return 0;
}

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
#ifndef CONFIG_64BIT
static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
				unsigned int new_code2)
{
	int faulted;

	safe_store_code(new_code1, ip, faulted);
	if (unlikely(faulted))
		return -EFAULT;
	ip += 4;
	safe_store_code(new_code2, ip, faulted);
	if (unlikely(faulted))
		return -EFAULT;
	flush_icache_range(ip, ip + 8); /* original ip + 12 */
	return 0;
}
#endif

123 124 125 126 127 128 129 130 131 132 133 134
/*
 * The details about the calling site of mcount on MIPS
 *
 * 1. For kernel:
 *
 * move at, ra
 * jal _mcount		--> nop
 *
 * 2. For modules:
 *
 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
 *
R
Ralf Baechle 已提交
135
 * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000005)
136 137 138 139 140
 * addiu v1, v1, low_16bit_of_mcount
 * move at, ra
 * move $12, ra_address
 * jalr v1
 *  sub sp, sp, 8
R
Ralf Baechle 已提交
141
 *				    1: offset = 5 instructions
142 143
 * 2.2 For the Other situations
 *
R
Ralf Baechle 已提交
144
 * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000004)
145 146 147 148
 * addiu v1, v1, low_16bit_of_mcount
 * move at, ra
 * jalr v1
 *  nop | move $12, ra_address | sub sp, sp, 8
R
Ralf Baechle 已提交
149
 *				    1: offset = 4 instructions
150 151 152 153
 */

#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)

154 155 156 157 158 159
int ftrace_make_nop(struct module *mod,
		    struct dyn_ftrace *rec, unsigned long addr)
{
	unsigned int new;
	unsigned long ip = rec->ip;

160
	/*
161 162
	 * If ip is in kernel space, no long call, otherwise, long call is
	 * needed.
163
	 */
164
	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
165
#ifdef CONFIG_64BIT
166
	return ftrace_modify_code(ip, new);
167 168 169 170 171 172 173 174 175 176
#else
	/*
	 * On 32 bit MIPS platforms, gcc adds a stack adjust
	 * instruction in the delay slot after the branch to
	 * mcount and expects mcount to restore the sp on return.
	 * This is based on a legacy API and does nothing but
	 * waste instructions so it's being removed at runtime.
	 */
	return ftrace_modify_code_2(ip, new, INSN_NOP);
#endif
177 178 179 180 181 182 183
}

int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
	unsigned int new;
	unsigned long ip = rec->ip;

184 185
	new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
		insn_lui_v1_hi16_mcount;
186 187 188 189 190 191 192 193 194 195

	return ftrace_modify_code(ip, new);
}

#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))

int ftrace_update_ftrace_func(ftrace_func_t func)
{
	unsigned int new;

196
	new = INSN_JAL((unsigned long)func);
197 198 199 200 201 202

	return ftrace_modify_code(FTRACE_CALL_IP, new);
}

int __init ftrace_dyn_arch_init(void *data)
{
203 204 205 206 207 208
	/* Encode the instructions when booting */
	ftrace_dyn_arch_init_insns();

	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);

209 210 211 212 213
	/* The return code is retured via data */
	*(unsigned long *)data = 0;

	return 0;
}
214
#endif	/* CONFIG_DYNAMIC_FTRACE */
215 216 217

#ifdef CONFIG_FUNCTION_GRAPH_TRACER

218 219 220 221 222 223 224 225
#ifdef CONFIG_DYNAMIC_FTRACE

extern void ftrace_graph_call(void);
#define FTRACE_GRAPH_CALL_IP	((unsigned long)(&ftrace_graph_call))

int ftrace_enable_ftrace_graph_caller(void)
{
	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
226
			insn_j_ftrace_graph_caller);
227 228 229 230
}

int ftrace_disable_ftrace_graph_caller(void)
{
231
	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
232 233
}

234
#endif	/* CONFIG_DYNAMIC_FTRACE */
235

236
#ifndef KBUILD_MCOUNT_RA_ADDRESS
237

R
Ralf Baechle 已提交
238 239
#define S_RA_SP (0xafbf << 16)	/* s{d,w} ra, offset(sp) */
#define S_R_SP	(0xafb0 << 16)	/* s{d,w} R, offset(sp) */
240 241
#define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */

242 243
unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
244
{
245
	unsigned long sp, ip, tmp;
246
	unsigned int code;
247
	int faulted;
248

249
	/*
250
	 * For module, move the ip from the return address after the
251 252
	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
	 * kernel, move after the instruction "move ra, at"(offset is 16)
253
	 */
254
	ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
255

256 257 258 259
	/*
	 * search the text until finding the non-store instruction or "s{d,w}
	 * ra, offset(sp)" instruction
	 */
260
	do {
261 262 263 264 265
		/* get the code at "ip": code = *(unsigned int *)ip; */
		safe_load_code(code, ip, faulted);

		if (unlikely(faulted))
			return 0;
266 267
		/*
		 * If we hit the non-store instruction before finding where the
268
		 * ra is stored, then this is a leaf function and it does not
269 270
		 * store the ra on the stack
		 */
271
		if ((code & S_R_SP) != S_R_SP)
272
			return parent_ra_addr;
273

274 275 276
		/* Move to the next instruction */
		ip -= 4;
	} while ((code & S_RA_SP) != S_RA_SP);
277 278

	sp = fp + (code & OFFSET_MASK);
279

280 281
	/* tmp = *(unsigned long *)sp; */
	safe_load_stack(tmp, sp, faulted);
282 283
	if (unlikely(faulted))
		return 0;
284

285
	if (tmp == old_parent_ra)
286 287 288 289
		return sp;
	return 0;
}

290
#endif	/* !KBUILD_MCOUNT_RA_ADDRESS */
291

292 293 294 295
/*
 * Hook the return address and push it in the stack of return addrs
 * in current thread info.
 */
296
void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
297 298
			   unsigned long fp)
{
299
	unsigned long old_parent_ra;
300 301 302
	struct ftrace_graph_ent trace;
	unsigned long return_hooker = (unsigned long)
	    &return_to_handler;
303
	int faulted, insns;
304 305 306 307

	if (unlikely(atomic_read(&current->tracing_graph_pause)))
		return;

308
	/*
309 310
	 * "parent_ra_addr" is the stack address saved the return address of
	 * the caller of _mcount.
311 312 313 314 315 316
	 *
	 * if the gcc < 4.5, a leaf function does not save the return address
	 * in the stack address, so, we "emulate" one in _mcount's stack space,
	 * and hijack it directly, but for a non-leaf function, it save the
	 * return address to the its own stack space, we can not hijack it
	 * directly, but need to find the real stack address,
317
	 * ftrace_get_parent_addr() does it!
318 319 320 321 322
	 *
	 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
	 * non-leaf function, the location of the return address will be saved
	 * to $12 for us, and for a leaf function, only put a zero into $12. we
	 * do it in ftrace_graph_caller of mcount.S.
323 324
	 */

325 326
	/* old_parent_ra = *parent_ra_addr; */
	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
327 328
	if (unlikely(faulted))
		goto out;
329
#ifndef KBUILD_MCOUNT_RA_ADDRESS
330 331
	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
			old_parent_ra, (unsigned long)parent_ra_addr, fp);
332 333 334 335
	/*
	 * If fails when getting the stack address of the non-leaf function's
	 * ra, stop function graph tracer and return
	 */
336
	if (parent_ra_addr == 0)
337
		goto out;
338
#endif
339 340
	/* *parent_ra_addr = return_hooker; */
	safe_store_stack(return_hooker, parent_ra_addr, faulted);
341 342
	if (unlikely(faulted))
		goto out;
343

344 345 346
	if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
	    == -EBUSY) {
		*parent_ra_addr = old_parent_ra;
347 348 349
		return;
	}

350 351 352 353 354 355 356 357
	/*
	 * Get the recorded ip of the current mcount calling site in the
	 * __mcount_loc section, which will be used to filter the function
	 * entries configured through the tracing/set_graph_function interface.
	 */

	insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
	trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
358 359 360 361

	/* Only trace if the calling function expects to */
	if (!ftrace_graph_entry(&trace)) {
		current->curr_ret_stack--;
362
		*parent_ra_addr = old_parent_ra;
363
	}
364 365 366 367
	return;
out:
	ftrace_graph_stop();
	WARN_ON(1);
368
}
369
#endif	/* CONFIG_FUNCTION_GRAPH_TRACER */
R
Ralf Baechle 已提交
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399

#ifdef CONFIG_FTRACE_SYSCALLS

#ifdef CONFIG_32BIT
unsigned long __init arch_syscall_addr(int nr)
{
	return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
}
#endif

#ifdef CONFIG_64BIT

unsigned long __init arch_syscall_addr(int nr)
{
#ifdef CONFIG_MIPS32_N32
	if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
		return (unsigned long)sysn32_call_table[(nr - __NR_N32_Linux) * 2];
#endif
	if (nr >= __NR_64_Linux  && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
		return (unsigned long)sys_call_table[nr - __NR_64_Linux];
#ifdef CONFIG_MIPS32_O32
	if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
		return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
#endif

	return (unsigned long) &sys_ni_syscall;
}
#endif

#endif /* CONFIG_FTRACE_SYSCALLS */