ftrace.c 2.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Code for replacing ftrace calls with jumps.
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 *
 * Thanks goes to Ingo Molnar, for suggesting the idea.
 * Mathieu Desnoyers, for suggesting postponing the modifications.
 * Arjan van de Ven, for keeping me straight, and explaining to me
 * the dangers of modifying code on the run.
 */

#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>

S
Steven Rostedt 已提交
19
#include <asm/alternative.h>
20

S
Steven Rostedt 已提交
21
#define CALL_BACK		5
22

S
Steven Rostedt 已提交
23 24
/* Long is fine, even if it is only 4 bytes ;-) */
static long *ftrace_nop;
25 26 27 28 29 30 31 32 33

union ftrace_code_union {
	char code[5];
	struct {
		char e8;
		int offset;
	} __attribute__((packed));
};

34
notrace int ftrace_ip_converted(unsigned long ip)
35
{
S
Steven Rostedt 已提交
36
	unsigned long save;
37 38

	ip -= CALL_BACK;
S
Steven Rostedt 已提交
39
	save = *(long *)ip;
40

41 42
	return save == *ftrace_nop;
}
43

44 45 46 47
static int notrace ftrace_calc_offset(long ip, long addr)
{
	return (int)(addr - ip);
}
48

49 50 51 52 53 54 55 56
notrace unsigned char *ftrace_nop_replace(void)
{
	return (char *)ftrace_nop;
}

notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
	static union ftrace_code_union calc;
57

58 59 60 61 62 63 64 65
	calc.e8		= 0xe8;
	calc.offset	= ftrace_calc_offset(ip, addr);

	/*
	 * No locking needed, this must be called via kstop_machine
	 * which in essence is like running on a uniprocessor machine.
	 */
	return calc.code;
66 67
}

68
notrace int
69 70 71
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
		   unsigned char *new_code)
{
S
Steven Rostedt 已提交
72 73 74 75
	unsigned replaced;
	unsigned old = *(unsigned *)old_code; /* 4 bytes */
	unsigned new = *(unsigned *)new_code; /* 4 bytes */
	unsigned char newch = new_code[4];
76 77
	int faulted = 0;

78 79 80
	/* move the IP back to the start of the call */
	ip -= CALL_BACK;

81 82 83 84 85 86 87 88 89 90
	/*
	 * Note: Due to modules and __init, code can
	 *  disappear and change, we need to protect against faulting
	 *  as well as code changing.
	 *
	 * No real locking needed, this code is run through
	 * kstop_machine.
	 */
	asm volatile (
		"1: lock\n"
S
Steven Rostedt 已提交
91 92 93
		"   cmpxchg %3, (%2)\n"
		"   jnz 2f\n"
		"   movb %b4, 4(%2)\n"
94 95 96 97 98 99 100
		"2:\n"
		".section .fixup, \"ax\"\n"
		"	movl $1, %0\n"
		"3:	jmp 2b\n"
		".previous\n"
		_ASM_EXTABLE(1b, 3b)
		: "=r"(faulted), "=a"(replaced)
S
Steven Rostedt 已提交
101 102
		: "r"(ip), "r"(new), "r"(newch),
		  "0"(faulted), "a"(old)
103 104 105
		: "memory");
	sync_core();

S
Steven Rostedt 已提交
106
	if (replaced != old && replaced != new)
107 108 109 110 111
		faulted = 2;

	return faulted;
}

112
int __init ftrace_dyn_arch_init(void)
113
{
S
Steven Rostedt 已提交
114
	const unsigned char *const *noptable = find_nop_table();
115

S
Steven Rostedt 已提交
116 117
	ftrace_nop = (unsigned long *)noptable[CALL_BACK];

118 119
	return 0;
}
120