ftrace.h 1.6 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_FTRACE_H
#define _ASM_X86_FTRACE_H
3

4 5
#ifdef __ASSEMBLY__

6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
	/* skip is set if the stack was already partially adjusted */
	.macro MCOUNT_SAVE_FRAME skip=0
	 /*
	  * We add enough stack to save all regs.
	  */
	subq $(SS+8-\skip), %rsp
	movq %rax, RAX(%rsp)
	movq %rcx, RCX(%rsp)
	movq %rdx, RDX(%rsp)
	movq %rsi, RSI(%rsp)
	movq %rdi, RDI(%rsp)
	movq %r8, R8(%rsp)
	movq %r9, R9(%rsp)
	 /* Move RIP to its proper location */
	movq SS+8(%rsp), %rdx
	movq %rdx, RIP(%rsp)
22 23
	.endm

24 25 26 27 28 29 30 31 32
	.macro MCOUNT_RESTORE_FRAME skip=0
	movq R9(%rsp), %r9
	movq R8(%rsp), %r8
	movq RDI(%rsp), %rdi
	movq RSI(%rsp), %rsi
	movq RDX(%rsp), %rdx
	movq RCX(%rsp), %rcx
	movq RAX(%rsp), %rax
	addq $(SS+8-\skip), %rsp
33 34 35 36
	.endm

#endif

37
#ifdef CONFIG_FUNCTION_TRACER
38 39 40 41 42
#ifdef CC_USING_FENTRY
# define MCOUNT_ADDR		((long)(__fentry__))
#else
# define MCOUNT_ADDR		((long)(mcount))
#endif
43 44
#define MCOUNT_INSN_SIZE	5 /* sizeof mcount call */

45
#ifdef CONFIG_DYNAMIC_FTRACE
46
#define ARCH_SUPPORTS_FTRACE_OPS 1
47
#endif
48

49 50
#ifndef __ASSEMBLY__
extern void mcount(void);
51
extern atomic_t modifying_ftrace_code;
52
extern void __fentry__(void);
53 54 55 56

static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
	/*
57 58
	 * addr is the address of the mcount call instruction.
	 * recordmcount does the necessary offset calculation.
59
	 */
60
	return addr;
61
}
62 63 64 65 66 67 68

#ifdef CONFIG_DYNAMIC_FTRACE

struct dyn_arch_ftrace {
	/* No extra data needed for x86 */
};

69 70
int ftrace_int3_handler(struct pt_regs *regs);

71
#endif /*  CONFIG_DYNAMIC_FTRACE */
S
Steven Rostedt 已提交
72
#endif /* __ASSEMBLY__ */
73
#endif /* CONFIG_FUNCTION_TRACER */
74

H
H. Peter Anvin 已提交
75
#endif /* _ASM_X86_FTRACE_H */