提交 26f45274 编写于 作者: I Ingo Molnar

Merge branch 'tip/perf/core' of...

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

Pull tracing updates from Steve Rostedt.
Signed-off-by: NIngo Molnar <mingo@kernel.org>
......@@ -1148,7 +1148,6 @@ ENTRY(ftrace_regs_caller)
* ip location, and move flags into the return ip location.
*/
pushl 4(%esp) /* save return ip into ip slot */
subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */
pushl $0 /* Load 0 into orig_ax */
pushl %gs
......@@ -1169,6 +1168,7 @@ ENTRY(ftrace_regs_caller)
movl $__KERNEL_CS,13*4(%esp)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
......@@ -1180,7 +1180,6 @@ GLOBAL(ftrace_regs_call)
movl 14*4(%esp), %eax /* Move flags back into cs */
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
movl 12*4(%esp), %eax /* Get return ip from regs->ip */
addl $MCOUNT_INSN_SIZE, %eax
movl %eax, 14*4(%esp) /* Put return ip back for ret */
popl %ebx
......
......@@ -165,6 +165,10 @@ GLOBAL(ftrace_regs_call)
movq EFLAGS(%rsp), %rax
movq %rax, SS(%rsp)
/* Handlers can change the RIP */
movq RIP(%rsp), %rax
movq %rax, SS+8(%rsp)
/* restore the rest of pt_regs */
movq R15(%rsp), %r15
movq R14(%rsp), %r14
......
......@@ -541,6 +541,8 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
return 1;
}
static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb);
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled throughout this function.
......@@ -599,6 +601,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
} else if (kprobe_running()) {
p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
#ifdef KPROBES_CAN_USE_FTRACE
if (kprobe_ftrace(p)) {
skip_singlestep(p, regs, kcb);
return 1;
}
#endif
setup_singlestep(p, regs, kcb, 0);
return 1;
}
......@@ -1053,6 +1061,21 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
}
#ifdef KPROBES_CAN_USE_FTRACE
static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
/*
* Emulate singlestep (and also recover regs->ip)
* as if there is a 5byte nop
*/
regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
__this_cpu_write(current_kprobe, NULL);
}
/* Ftrace callback handler for kprobes */
void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
......@@ -1072,21 +1095,17 @@ void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
regs->ip += sizeof(kprobe_opcode_t);
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler)
p->pre_handler(p, regs);
if (unlikely(p->post_handler)) {
/* Emulate singlestep as if there is a 5byte nop */
regs->ip = ip + MCOUNT_INSN_SIZE;
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
__this_cpu_write(current_kprobe, NULL);
regs->ip = ip; /* Recover for next callback */
if (!p->pre_handler || !p->pre_handler(p, regs))
skip_singlestep(p, regs, kcb);
/*
* If pre_handler returns !0, it sets regs->ip and
* resets current kprobe.
*/
}
end:
local_irq_restore(flags);
......
......@@ -98,7 +98,7 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o
obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_TRACE_CLOCK) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_IRQ_WORK) += irq_work.o
......
......@@ -1418,9 +1418,6 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
/* Given address is not on the instruction boundary */
if ((unsigned long)p->addr != ftrace_addr)
return -EILSEQ;
/* break_handler (jprobe) can not work with ftrace */
if (p->break_handler)
return -EINVAL;
p->flags |= KPROBE_FLAG_FTRACE;
#else /* !KPROBES_CAN_USE_FTRACE */
return -EINVAL;
......
......@@ -62,8 +62,12 @@ config HAVE_C_RECORDMCOUNT
config TRACER_MAX_TRACE
bool
config TRACE_CLOCK
bool
config RING_BUFFER
bool
select TRACE_CLOCK
config FTRACE_NMI_ENTER
bool
......@@ -114,6 +118,7 @@ config TRACING
select NOP_TRACER
select BINARY_PRINTF
select EVENT_TRACING
select TRACE_CLOCK
config GENERIC_TRACER
bool
......
......@@ -19,11 +19,7 @@ endif
CFLAGS_trace_events_filter.o := -I$(src)
#
# Make the trace clocks available generally: it's infrastructure
# relied on by ptrace for example:
#
obj-y += trace_clock.o
obj-$(CONFIG_TRACE_CLOCK) += trace_clock.o
obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
......
......@@ -1646,9 +1646,11 @@ static __init void event_trace_self_tests(void)
event_test_stuff();
ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
if (WARN_ON_ONCE(ret))
if (WARN_ON_ONCE(ret)) {
pr_warning("error disabling system %s\n",
system->name);
continue;
}
pr_cont("OK\n");
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册