提交 3737a127 编写于 作者: L Linus Torvalds

Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull more perf updates from Ingo Molnar:
 "A second round of perf updates:

   - wide reaching kprobes sanitization and robustization, with the hope
     of fixing all 'probe this function crashes the kernel' bugs, by
     Masami Hiramatsu.

   - uprobes updates from Oleg Nesterov: tmpfs support, corner case
     fixes and robustization work.

   - perf tooling updates and fixes from Jiri Olsa, Namhyung Ki, Arnaldo
     et al:
        * Add support to accumulate hist periods (Namhyung Kim)
        * various fixes, refactorings and enhancements"

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (101 commits)
  perf: Differentiate exec() and non-exec() comm events
  perf: Fix perf_event_comm() vs. exec() assumption
  uprobes/x86: Rename arch_uprobe->def to ->defparam, minor comment updates
  perf/documentation: Add description for conditional branch filter
  perf/x86: Add conditional branch filtering support
  perf/tool: Add conditional branch filter 'cond' to perf record
  perf: Add new conditional branch filter 'PERF_SAMPLE_BRANCH_COND'
  uprobes: Teach copy_insn() to support tmpfs
  uprobes: Shift ->readpage check from __copy_insn() to uprobe_register()
  perf/x86: Use common PMU interrupt disabled code
  perf/ARM: Use common PMU interrupt disabled code
  perf: Disable sampled events if no PMU interrupt
  perf: Fix use after free in perf_remove_from_context()
  perf tools: Fix 'make help' message error
  perf record: Fix poll return value propagation
  perf tools: Move elide bool into perf_hpp_fmt struct
  perf tools: Remove elide setup for SORT_MODE__MEMORY mode
  perf tools: Fix "==" into "=" in ui_browser__warning assignment
  perf tools: Allow overriding sysfs and proc finding with env var
  perf tools: Consider header files outside perf directory in tags target
  ...
......@@ -22,8 +22,9 @@ Appendix B: The kprobes sysctl interface
Kprobes enables you to dynamically break into any kernel routine and
collect debugging and performance information non-disruptively. You
can trap at almost any kernel code address, specifying a handler
can trap at almost any kernel code address(*), specifying a handler
routine to be invoked when the breakpoint is hit.
(*: some parts of the kernel code can not be trapped, see 1.5 Blacklist)
There are currently three types of probes: kprobes, jprobes, and
kretprobes (also called return probes). A kprobe can be inserted
......@@ -273,6 +274,19 @@ using one of the following techniques:
or
- Execute 'sysctl -w debug.kprobes_optimization=n'
1.5 Blacklist
Kprobes can probe most of the kernel except itself. This means
that there are some functions where kprobes cannot probe. Probing
(trapping) such functions can cause a recursive trap (e.g. double
fault) or the nested probe handler may never be called.
Kprobes manages such functions as a blacklist.
If you want to add a function into the blacklist, you just need
to (1) include linux/kprobes.h and (2) use NOKPROBE_SYMBOL() macro
to specify a blacklisted function.
Kprobes checks the given probe address against the blacklist and
rejects registering it, if the given address is in the blacklist.
2. Architectures Supported
Kprobes, jprobes, and return probes are implemented on the following
......
......@@ -410,7 +410,7 @@ __hw_perf_event_init(struct perf_event *event)
*/
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
if (!is_sampling_event(event)) {
/*
* For non-sampling runs, limit the sample_period to half
* of the counter width. That way, the new counter value
......
......@@ -126,8 +126,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
irqs = min(pmu_device->num_resources, num_possible_cpus());
if (irqs < 1) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
return 0;
}
irq = platform_get_irq(pmu_device, 0);
......@@ -191,6 +191,10 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
/* Ensure the PMU has sane values out of reset. */
if (cpu_pmu->reset)
on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
/* If no interrupts available, set the corresponding capability flag */
if (!platform_get_irq(cpu_pmu->plat_device, 0))
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
}
/*
......
......@@ -57,6 +57,12 @@
.long (from) - . ; \
.long (to) - . + 0x7ffffff0 ; \
.popsection
# define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \
_ASM_ALIGN ; \
_ASM_PTR (entry); \
.popsection
#else
# define _ASM_EXTABLE(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
......@@ -71,6 +77,7 @@
" .long (" #from ") - .\n" \
" .long (" #to ") - . + 0x7ffffff0\n" \
" .popsection\n"
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
#endif /* _ASM_X86_ASM_H */
......@@ -116,4 +116,6 @@ struct kprobe_ctlblk {
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
extern int kprobe_int3_handler(struct pt_regs *regs);
extern int kprobe_debug_handler(struct pt_regs *regs);
#endif /* _ASM_X86_KPROBES_H */
......@@ -68,7 +68,7 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
dotraplinkage void do_stack_segment(struct pt_regs *, long);
#ifdef CONFIG_X86_64
dotraplinkage void do_double_fault(struct pt_regs *, long);
asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *);
asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
#endif
dotraplinkage void do_general_protection(struct pt_regs *, long);
dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
......@@ -103,7 +103,6 @@ static inline int get_si_code(unsigned long condition)
extern int panic_on_unrecovered_nmi;
void math_error(struct pt_regs *, int, int);
void math_emulate(struct math_emu_info *);
#ifndef CONFIG_X86_32
asmlinkage void smp_thermal_interrupt(void);
......
......@@ -41,18 +41,18 @@ struct arch_uprobe {
u8 ixol[MAX_UINSN_BYTES];
};
u16 fixups;
const struct uprobe_xol_ops *ops;
union {
#ifdef CONFIG_X86_64
unsigned long rip_rela_target_address;
#endif
struct {
s32 offs;
u8 ilen;
u8 opc1;
} branch;
} branch;
struct {
u8 fixups;
u8 ilen;
} defparam;
};
};
......
......@@ -5,7 +5,6 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/stringify.h>
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/memory.h>
......@@ -551,7 +550,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
*
* Note: Must be called under text_mutex.
*/
void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
void *text_poke(void *addr, const void *opcode, size_t len)
{
unsigned long flags;
char *vaddr;
......
......@@ -60,7 +60,7 @@ void arch_trigger_all_cpu_backtrace(void)
smp_mb__after_atomic();
}
static int __kprobes
static int
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
{
int cpu;
......@@ -80,6 +80,7 @@ arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
return NMI_DONE;
}
NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler);
static int __init register_trigger_all_cpu_backtrace(void)
{
......
......@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
#include <linux/smp.h>
#include <linux/io.h>
......@@ -1193,6 +1194,7 @@ int is_debug_stack(unsigned long addr)
(addr <= __get_cpu_var(debug_stack_addr) &&
addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
}
NOKPROBE_SYMBOL(is_debug_stack);
DEFINE_PER_CPU(u32, debug_idt_ctr);
......@@ -1201,6 +1203,7 @@ void debug_stack_set_zero(void)
this_cpu_inc(debug_idt_ctr);
load_current_idt();
}
NOKPROBE_SYMBOL(debug_stack_set_zero);
void debug_stack_reset(void)
{
......@@ -1209,6 +1212,7 @@ void debug_stack_reset(void)
if (this_cpu_dec_return(debug_idt_ctr) == 0)
load_current_idt();
}
NOKPROBE_SYMBOL(debug_stack_reset);
#else /* CONFIG_X86_64 */
......
......@@ -303,15 +303,6 @@ int x86_setup_perfctr(struct perf_event *event)
hwc->sample_period = x86_pmu.max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
} else {
/*
* If we have a PMU initialized but no APIC
* interrupts, we cannot sample hardware
* events (user-space has to fall back and
* sample via a hrtimer based software event):
*/
if (!x86_pmu.apic)
return -EOPNOTSUPP;
}
if (attr->type == PERF_TYPE_RAW)
......@@ -1293,7 +1284,7 @@ void perf_events_lapic_init(void)
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
static int __kprobes
static int
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
u64 start_clock;
......@@ -1311,6 +1302,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
return ret;
}
NOKPROBE_SYMBOL(perf_event_nmi_handler);
struct event_constraint emptyconstraint;
struct event_constraint unconstrained;
......@@ -1366,6 +1358,15 @@ static void __init pmu_check_apic(void)
x86_pmu.apic = 0;
pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
pr_info("no hardware sampling interrupt available.\n");
/*
* If we have a PMU initialized but no APIC
* interrupts, we cannot sample hardware
* events (user-space has to fall back and
* sample via a hrtimer based software event):
*/
pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
}
static struct attribute_group x86_pmu_format_group = {
......
......@@ -593,7 +593,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
return 1;
}
static int __kprobes
static int
perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
int handled = 0;
......@@ -606,6 +606,7 @@ perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
return handled;
}
NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
{
......
......@@ -384,6 +384,9 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
mask |= X86_BR_NO_TX;
if (br_type & PERF_SAMPLE_BRANCH_COND)
mask |= X86_BR_JCC;
/*
* stash actual user request into reg, it may
* be used by fixup code for some CPU
......@@ -678,6 +681,7 @@ static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
* NHM/WSM erratum: must include IND_JMP to capture IND_CALL
*/
[PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP,
[PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
};
static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
......@@ -689,6 +693,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
[PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL
| LBR_FAR,
[PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL,
[PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
};
/* core */
......
......@@ -200,7 +200,7 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
unsigned __kprobes long oops_begin(void)
unsigned long oops_begin(void)
{
int cpu;
unsigned long flags;
......@@ -223,8 +223,9 @@ unsigned __kprobes long oops_begin(void)
return flags;
}
EXPORT_SYMBOL_GPL(oops_begin);
NOKPROBE_SYMBOL(oops_begin);
void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
if (regs && kexec_should_crash(current))
crash_kexec(regs);
......@@ -247,8 +248,9 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
panic("Fatal exception");
do_exit(signr);
}
NOKPROBE_SYMBOL(oops_end);
int __kprobes __die(const char *str, struct pt_regs *regs, long err)
int __die(const char *str, struct pt_regs *regs, long err)
{
#ifdef CONFIG_X86_32
unsigned short ss;
......@@ -291,6 +293,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
#endif
return 0;
}
NOKPROBE_SYMBOL(__die);
/*
* This is gone through when something in the kernel has done something bad
......
......@@ -314,10 +314,6 @@ ENTRY(ret_from_kernel_thread)
CFI_ENDPROC
ENDPROC(ret_from_kernel_thread)
/*
* Interrupt exit functions should be protected against kprobes
*/
.pushsection .kprobes.text, "ax"
/*
* Return to user mode is not as complex as all this looks,
* but we want the default path for a system call return to
......@@ -372,10 +368,6 @@ need_resched:
END(resume_kernel)
#endif
CFI_ENDPROC
/*
* End of kprobes section
*/
.popsection
/* SYSENTER_RETURN points to after the "sysenter" instruction in
the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
......@@ -495,10 +487,6 @@ sysexit_audit:
PTGS_TO_GS_EX
ENDPROC(ia32_sysenter_target)
/*
* syscall stub including irq exit should be protected against kprobes
*/
.pushsection .kprobes.text, "ax"
# system call handler stub
ENTRY(system_call)
RING0_INT_FRAME # can't unwind into user space anyway
......@@ -690,10 +678,6 @@ syscall_badsys:
jmp resume_userspace
END(syscall_badsys)
CFI_ENDPROC
/*
* End of kprobes section
*/
.popsection
.macro FIXUP_ESPFIX_STACK
/*
......@@ -784,10 +768,6 @@ common_interrupt:
ENDPROC(common_interrupt)
CFI_ENDPROC
/*
* Irq entries should be protected against kprobes
*/
.pushsection .kprobes.text, "ax"
#define BUILD_INTERRUPT3(name, nr, fn) \
ENTRY(name) \
RING0_INT_FRAME; \
......@@ -964,10 +944,6 @@ ENTRY(spurious_interrupt_bug)
jmp error_code
CFI_ENDPROC
END(spurious_interrupt_bug)
/*
* End of kprobes section
*/
.popsection
#ifdef CONFIG_XEN
/* Xen doesn't set %esp to be precisely what the normal sysenter
......@@ -1242,11 +1218,6 @@ return_to_handler:
jmp *%ecx
#endif
/*
* Some functions should be protected against kprobes
*/
.pushsection .kprobes.text, "ax"
#ifdef CONFIG_TRACING
ENTRY(trace_page_fault)
RING0_EC_FRAME
......@@ -1460,7 +1431,3 @@ ENTRY(async_page_fault)
END(async_page_fault)
#endif
/*
* End of kprobes section
*/
.popsection
......@@ -284,8 +284,6 @@ ENDPROC(native_usergs_sysret64)
TRACE_IRQS_OFF
.endm
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
ENTRY(save_paranoid)
XCPT_FRAME 1 RDI+8
cld
......@@ -314,7 +312,6 @@ ENTRY(save_paranoid)
1: ret
CFI_ENDPROC
END(save_paranoid)
.popsection
/*
* A newly forked process directly context switches into this address.
......@@ -772,10 +769,6 @@ END(interrupt)
call \func
.endm
/*
* Interrupt entry/exit should be protected against kprobes
*/
.pushsection .kprobes.text, "ax"
/*
* The interrupt stubs push (~vector+0x80) onto the stack and
* then jump to common_interrupt.
......@@ -982,11 +975,6 @@ END(__do_double_fault)
# define __do_double_fault do_double_fault
#endif
/*
* End of kprobes section
*/
.popsection
/*
* APIC interrupts.
*/
......@@ -1321,11 +1309,6 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
hyperv_callback_vector hyperv_vector_handler
#endif /* CONFIG_HYPERV */
/*
* Some functions should be protected against kprobes
*/
.pushsection .kprobes.text, "ax"
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry stack_segment do_stack_segment has_error_code=1 paranoid=1
......@@ -1742,7 +1725,3 @@ ENTRY(ignore_sysret)
CFI_ENDPROC
END(ignore_sysret)
/*
* End of kprobes section
*/
.popsection
......@@ -32,7 +32,6 @@
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/percpu.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
......@@ -424,7 +423,7 @@ EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
* NOTIFY_STOP returned for all other cases
*
*/
static int __kprobes hw_breakpoint_handler(struct die_args *args)
static int hw_breakpoint_handler(struct die_args *args)
{
int i, cpu, rc = NOTIFY_STOP;
struct perf_event *bp;
......@@ -511,7 +510,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
/*
* Handle debug exception notifications.
*/
int __kprobes hw_breakpoint_exceptions_notify(
int hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data)
{
if (val != DIE_DEBUG)
......
......@@ -112,7 +112,8 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
static nokprobe_inline void
__synthesize_relative_insn(void *from, void *to, u8 op)
{
struct __arch_relative_insn {
u8 op;
......@@ -125,21 +126,23 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
}
/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
void __kprobes synthesize_reljump(void *from, void *to)
void synthesize_reljump(void *from, void *to)
{
__synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
}
NOKPROBE_SYMBOL(synthesize_reljump);
/* Insert a call instruction at address 'from', which calls address 'to'.*/
void __kprobes synthesize_relcall(void *from, void *to)
void synthesize_relcall(void *from, void *to)
{
__synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
}
NOKPROBE_SYMBOL(synthesize_relcall);
/*
* Skip the prefixes of the instruction.
*/
static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
{
insn_attr_t attr;
......@@ -154,12 +157,13 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
#endif
return insn;
}
NOKPROBE_SYMBOL(skip_prefixes);
/*
* Returns non-zero if opcode is boostable.
* RIP relative instructions are adjusted at copying time in 64 bits mode
*/
int __kprobes can_boost(kprobe_opcode_t *opcodes)
int can_boost(kprobe_opcode_t *opcodes)
{
kprobe_opcode_t opcode;
kprobe_opcode_t *orig_opcodes = opcodes;
......@@ -260,7 +264,7 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add
}
/* Check if paddr is at an instruction boundary */
static int __kprobes can_probe(unsigned long paddr)
static int can_probe(unsigned long paddr)
{
unsigned long addr, __addr, offset = 0;
struct insn insn;
......@@ -299,7 +303,7 @@ static int __kprobes can_probe(unsigned long paddr)
/*
* Returns non-zero if opcode modifies the interrupt flag.
*/
static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
static int is_IF_modifier(kprobe_opcode_t *insn)
{
/* Skip prefixes */
insn = skip_prefixes(insn);
......@@ -322,7 +326,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
* If not, return null.
* Only applicable to 64-bit x86.
*/
int __kprobes __copy_instruction(u8 *dest, u8 *src)
int __copy_instruction(u8 *dest, u8 *src)
{
struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
......@@ -365,7 +369,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
return insn.length;
}
static int __kprobes arch_copy_kprobe(struct kprobe *p)
static int arch_copy_kprobe(struct kprobe *p)
{
int ret;
......@@ -392,7 +396,7 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p)
return 0;
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
int arch_prepare_kprobe(struct kprobe *p)
{
if (alternatives_text_reserved(p->addr, p->addr))
return -EINVAL;
......@@ -407,17 +411,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return arch_copy_kprobe(p);
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
void arch_arm_kprobe(struct kprobe *p)
{
text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
void arch_disarm_kprobe(struct kprobe *p)
{
text_poke(p->addr, &p->opcode, 1);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
void arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.insn) {
free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
......@@ -425,7 +429,8 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
}
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static nokprobe_inline void
save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
......@@ -433,7 +438,8 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
static nokprobe_inline void
restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
......@@ -441,8 +447,9 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
}
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
static nokprobe_inline void
set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
......@@ -451,7 +458,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
}
static void __kprobes clear_btf(void)
static nokprobe_inline void clear_btf(void)
{
if (test_thread_flag(TIF_BLOCKSTEP)) {
unsigned long debugctl = get_debugctlmsr();
......@@ -461,7 +468,7 @@ static void __kprobes clear_btf(void)
}
}
static void __kprobes restore_btf(void)
static nokprobe_inline void restore_btf(void)
{
if (test_thread_flag(TIF_BLOCKSTEP)) {
unsigned long debugctl = get_debugctlmsr();
......@@ -471,8 +478,7 @@ static void __kprobes restore_btf(void)
}
}
void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long *sara = stack_addr(regs);
......@@ -481,9 +487,10 @@ arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
}
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static void __kprobes
setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter)
static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
{
if (setup_detour_execution(p, regs, reenter))
return;
......@@ -519,22 +526,24 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
else
regs->ip = (unsigned long)p->ainsn.insn;
}
NOKPROBE_SYMBOL(setup_singlestep);
/*
* We have reentered the kprobe_handler(), since another probe was hit while
* within the handler. We save the original kprobes variables and just single
* step on the instruction of the new probe without calling any user handlers.
*/
static int __kprobes
reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
switch (kcb->kprobe_status) {
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SS:
kprobes_inc_nmissed_count(p);
setup_singlestep(p, regs, kcb, 1);
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/* A probe has been hit in the codepath leading up to, or just
* after, single-stepping of a probed instruction. This entire
* codepath should strictly reside in .kprobes.text section.
......@@ -553,12 +562,13 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
return 1;
}
NOKPROBE_SYMBOL(reenter_kprobe);
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled throughout this function.
*/
static int __kprobes kprobe_handler(struct pt_regs *regs)
int kprobe_int3_handler(struct pt_regs *regs)
{
kprobe_opcode_t *addr;
struct kprobe *p;
......@@ -621,12 +631,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
preempt_enable_no_resched();
return 0;
}
NOKPROBE_SYMBOL(kprobe_int3_handler);
/*
* When a retprobed function returns, this code saves registers and
* calls trampoline_handler() runs, which calls the kretprobe's handler.
*/
static void __used __kprobes kretprobe_trampoline_holder(void)
static void __used kretprobe_trampoline_holder(void)
{
asm volatile (
".global kretprobe_trampoline\n"
......@@ -657,11 +668,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
#endif
" ret\n");
}
NOKPROBE_SYMBOL(kretprobe_trampoline_holder);
NOKPROBE_SYMBOL(kretprobe_trampoline);
/*
* Called from kretprobe_trampoline
*/
__visible __used __kprobes void *trampoline_handler(struct pt_regs *regs)
__visible __used void *trampoline_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
......@@ -747,6 +760,7 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs)
}
return (void *)orig_ret_address;
}
NOKPROBE_SYMBOL(trampoline_handler);
/*
* Called after single-stepping. p->addr is the address of the
......@@ -775,8 +789,8 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs)
* jump instruction after the copied instruction, that jumps to the next
* instruction after the probepoint.
*/
static void __kprobes
resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
static void resume_execution(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
unsigned long *tos = stack_addr(regs);
unsigned long copy_ip = (unsigned long)p->ainsn.insn;
......@@ -851,12 +865,13 @@ resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
no_change:
restore_btf();
}
NOKPROBE_SYMBOL(resume_execution);
/*
* Interrupts are disabled on entry as trap1 is an interrupt gate and they
* remain disabled throughout this function.
*/
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
int kprobe_debug_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
......@@ -891,8 +906,9 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
return 1;
}
NOKPROBE_SYMBOL(kprobe_debug_handler);
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
......@@ -949,12 +965,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
return 0;
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
/*
* Wrapper routine for handling exceptions.
*/
int __kprobes
kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data)
int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
void *data)
{
struct die_args *args = data;
int ret = NOTIFY_DONE;
......@@ -962,22 +979,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
if (args->regs && user_mode_vm(args->regs))
return ret;
switch (val) {
case DIE_INT3:
if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_DEBUG:
if (post_kprobe_handler(args->regs)) {
/*
* Reset the BS bit in dr6 (pointed by args->err) to
* denote completion of processing
*/
(*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
ret = NOTIFY_STOP;
}
break;
case DIE_GPF:
if (val == DIE_GPF) {
/*
* To be potentially processing a kprobe fault and to
* trust the result from kprobe_running(), we have
......@@ -986,14 +988,12 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
if (!preemptible() && kprobe_running() &&
kprobe_fault_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
break;
default:
break;
}
return ret;
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr;
......@@ -1017,8 +1017,9 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->ip = (unsigned long)(jp->entry);
return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);
void __kprobes jprobe_return(void)
void jprobe_return(void)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
......@@ -1034,8 +1035,10 @@ void __kprobes jprobe_return(void)
" nop \n"::"b"
(kcb->jprobe_saved_sp):"memory");
}
NOKPROBE_SYMBOL(jprobe_return);
NOKPROBE_SYMBOL(jprobe_return_end);
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
u8 *addr = (u8 *) (regs->ip - 1);
......@@ -1063,13 +1066,22 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
}
return 0;
}
NOKPROBE_SYMBOL(longjmp_break_handler);
bool arch_within_kprobe_blacklist(unsigned long addr)
{
return (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end) ||
(addr >= (unsigned long)__entry_text_start &&
addr < (unsigned long)__entry_text_end);
}
int __init arch_init_kprobes(void)
{
return 0;
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
int arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
}
......@@ -25,8 +25,9 @@
#include "common.h"
static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
static nokprobe_inline
int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
/*
* Emulate singlestep (and also recover regs->ip)
......@@ -41,18 +42,19 @@ static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
return 1;
}
int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
if (kprobe_ftrace(p))
return __skip_singlestep(p, regs, kcb);
else
return 0;
}
NOKPROBE_SYMBOL(skip_singlestep);
/* Ftrace callback handler for kprobes */
void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
......@@ -84,8 +86,9 @@ void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
end:
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
p->ainsn.boostable = -1;
......
......@@ -77,7 +77,7 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
}
/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
{
#ifdef CONFIG_X86_64
*addr++ = 0x48;
......@@ -138,7 +138,8 @@ asm (
#define INT3_SIZE sizeof(kprobe_opcode_t)
/* Optimized kprobe call back function: called from optinsn */
static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
static void
optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long flags;
......@@ -168,8 +169,9 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_
}
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(optimized_callback);
static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
static int copy_optimized_instructions(u8 *dest, u8 *src)
{
int len = 0, ret;
......@@ -189,7 +191,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
}
/* Check whether insn is indirect jump */
static int __kprobes insn_is_indirect_jump(struct insn *insn)
static int insn_is_indirect_jump(struct insn *insn)
{
return ((insn->opcode.bytes[0] == 0xff &&
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
......@@ -224,7 +226,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
}
/* Decode whole function to ensure any instructions don't jump into target */
static int __kprobes can_optimize(unsigned long paddr)
static int can_optimize(unsigned long paddr)
{
unsigned long addr, size = 0, offset = 0;
struct insn insn;
......@@ -275,7 +277,7 @@ static int __kprobes can_optimize(unsigned long paddr)
}
/* Check optimized_kprobe can actually be optimized. */
int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
int arch_check_optimized_kprobe(struct optimized_kprobe *op)
{
int i;
struct kprobe *p;
......@@ -290,15 +292,15 @@ int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
}
/* Check the addr is within the optimized instructions. */
int __kprobes
arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
int arch_within_optimized_kprobe(struct optimized_kprobe *op,
unsigned long addr)
{
return ((unsigned long)op->kp.addr <= addr &&
(unsigned long)op->kp.addr + op->optinsn.size > addr);
}
/* Free optimized instruction slot */
static __kprobes
static
void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
{
if (op->optinsn.insn) {
......@@ -308,7 +310,7 @@ void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
}
}
void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
{
__arch_remove_optimized_kprobe(op, 1);
}
......@@ -318,7 +320,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
* Target instructions MUST be relocatable (checked inside)
* This is called when new aggr(opt)probe is allocated or reused.
*/
int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
{
u8 *buf;
int ret;
......@@ -372,7 +374,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
* Replace breakpoints (int3) with relative jumps.
* Caller must call with locking kprobe_mutex and text_mutex.
*/
void __kprobes arch_optimize_kprobes(struct list_head *oplist)
void arch_optimize_kprobes(struct list_head *oplist)
{
struct optimized_kprobe *op, *tmp;
u8 insn_buf[RELATIVEJUMP_SIZE];
......@@ -398,7 +400,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
}
/* Replace a relative jump with a breakpoint (int3). */
void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
u8 insn_buf[RELATIVEJUMP_SIZE];
......@@ -424,8 +426,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
}
}
int __kprobes
setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
{
struct optimized_kprobe *op;
......@@ -441,3 +442,4 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
}
return 0;
}
NOKPROBE_SYMBOL(setup_detour_execution);
......@@ -251,8 +251,9 @@ u32 kvm_read_and_reset_pf_reason(void)
return reason;
}
EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
dotraplinkage void __kprobes
dotraplinkage void
do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
{
enum ctx_state prev_state;
......@@ -276,6 +277,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
break;
}
}
NOKPROBE_SYMBOL(do_async_page_fault);
static void __init paravirt_ops_setup(void)
{
......
......@@ -110,7 +110,7 @@ static void nmi_max_handler(struct irq_work *w)
a->handler, whole_msecs, decimal_msecs);
}
static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
{
struct nmi_desc *desc = nmi_to_desc(type);
struct nmiaction *a;
......@@ -146,6 +146,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
/* return total number of NMI events handled */
return handled;
}
NOKPROBE_SYMBOL(nmi_handle);
int __register_nmi_handler(unsigned int type, struct nmiaction *action)
{
......@@ -208,7 +209,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
}
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
static __kprobes void
static void
pci_serr_error(unsigned char reason, struct pt_regs *regs)
{
/* check to see if anyone registered against these types of errors */
......@@ -238,8 +239,9 @@ pci_serr_error(unsigned char reason, struct pt_regs *regs)
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
outb(reason, NMI_REASON_PORT);
}
NOKPROBE_SYMBOL(pci_serr_error);
static __kprobes void
static void
io_check_error(unsigned char reason, struct pt_regs *regs)
{
unsigned long i;
......@@ -269,8 +271,9 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
reason &= ~NMI_REASON_CLEAR_IOCHK;
outb(reason, NMI_REASON_PORT);
}
NOKPROBE_SYMBOL(io_check_error);
static __kprobes void
static void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{
int handled;
......@@ -298,11 +301,12 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
pr_emerg("Dazed and confused, but trying to continue\n");
}
NOKPROBE_SYMBOL(unknown_nmi_error);
static DEFINE_PER_CPU(bool, swallow_nmi);
static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
static __kprobes void default_do_nmi(struct pt_regs *regs)
static void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
int handled;
......@@ -401,6 +405,7 @@ static __kprobes void default_do_nmi(struct pt_regs *regs)
else
unknown_nmi_error(reason, regs);
}
NOKPROBE_SYMBOL(default_do_nmi);
/*
* NMIs can hit breakpoints which will cause it to lose its
......@@ -520,7 +525,7 @@ static inline void nmi_nesting_postprocess(void)
}
#endif
dotraplinkage notrace __kprobes void
dotraplinkage notrace void
do_nmi(struct pt_regs *regs, long error_code)
{
nmi_nesting_preprocess(regs);
......@@ -537,6 +542,7 @@ do_nmi(struct pt_regs *regs, long error_code)
/* On i386, may loop back to preprocess */
nmi_nesting_postprocess();
}
NOKPROBE_SYMBOL(do_nmi);
void stop_nmi(void)
{
......
......@@ -23,6 +23,7 @@
#include <linux/efi.h>
#include <linux/bcd.h>
#include <linux/highmem.h>
#include <linux/kprobes.h>
#include <asm/bug.h>
#include <asm/paravirt.h>
......@@ -389,6 +390,11 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
.end_context_switch = paravirt_nop,
};
/* At this point, native_get/set_debugreg has real function entries */
NOKPROBE_SYMBOL(native_get_debugreg);
NOKPROBE_SYMBOL(native_set_debugreg);
NOKPROBE_SYMBOL(native_load_idt);
struct pv_apic_ops pv_apic_ops = {
#ifdef CONFIG_X86_LOCAL_APIC
.startup_ipi_hook = paravirt_nop,
......
......@@ -413,12 +413,11 @@ void set_personality_ia32(bool x32)
set_thread_flag(TIF_ADDR32);
/* Mark the associated mm as containing 32-bit tasks. */
if (current->mm)
current->mm->context.ia32_compat = 1;
if (x32) {
clear_thread_flag(TIF_IA32);
set_thread_flag(TIF_X32);
if (current->mm)
current->mm->context.ia32_compat = TIF_X32;
current->personality &= ~READ_IMPLIES_EXEC;
/* is_compat_task() uses the presence of the x32
syscall bit flag to determine compat status */
......@@ -426,6 +425,8 @@ void set_personality_ia32(bool x32)
} else {
set_thread_flag(TIF_IA32);
clear_thread_flag(TIF_X32);
if (current->mm)
current->mm->context.ia32_compat = TIF_IA32;
current->personality |= force_personality32;
/* Prepare the first "return" to user space */
current_thread_info()->status |= TS_COMPAT;
......
......@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
......@@ -106,7 +107,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
preempt_count_dec();
}
static int __kprobes
static nokprobe_inline int
do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
struct pt_regs *regs, long error_code)
{
......@@ -136,7 +137,38 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
return -1;
}
static void __kprobes
static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
siginfo_t *info)
{
unsigned long siaddr;
int sicode;
switch (trapnr) {
default:
return SEND_SIG_PRIV;
case X86_TRAP_DE:
sicode = FPE_INTDIV;
siaddr = uprobe_get_trap_addr(regs);
break;
case X86_TRAP_UD:
sicode = ILL_ILLOPN;
siaddr = uprobe_get_trap_addr(regs);
break;
case X86_TRAP_AC:
sicode = BUS_ADRALN;
siaddr = 0;
break;
}
info->si_signo = signr;
info->si_errno = 0;
info->si_code = sicode;
info->si_addr = (void __user *)siaddr;
return info;
}
static void
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
long error_code, siginfo_t *info)
{
......@@ -168,60 +200,43 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
}
#endif
if (info)
force_sig_info(signr, info, tsk);
else
force_sig(signr, tsk);
force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
}
NOKPROBE_SYMBOL(do_trap);
#define DO_ERROR(trapnr, signr, str, name) \
dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
{ \
enum ctx_state prev_state; \
\
prev_state = exception_enter(); \
if (notify_die(DIE_TRAP, str, regs, error_code, \
trapnr, signr) == NOTIFY_STOP) { \
exception_exit(prev_state); \
return; \
} \
conditional_sti(regs); \
do_trap(trapnr, signr, str, regs, error_code, NULL); \
exception_exit(prev_state); \
static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
unsigned long trapnr, int signr)
{
enum ctx_state prev_state = exception_enter();
siginfo_t info;
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) {
conditional_sti(regs);
do_trap(trapnr, signr, str, regs, error_code,
fill_trap_info(regs, signr, trapnr, &info));
}
exception_exit(prev_state);
}
#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
#define DO_ERROR(trapnr, signr, str, name) \
dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
{ \
siginfo_t info; \
enum ctx_state prev_state; \
\
info.si_signo = signr; \
info.si_errno = 0; \
info.si_code = sicode; \
info.si_addr = (void __user *)siaddr; \
prev_state = exception_enter(); \
if (notify_die(DIE_TRAP, str, regs, error_code, \
trapnr, signr) == NOTIFY_STOP) { \
exception_exit(prev_state); \
return; \
} \
conditional_sti(regs); \
do_trap(trapnr, signr, str, regs, error_code, &info); \
exception_exit(prev_state); \
do_error_trap(regs, error_code, str, trapnr, signr); \
}
DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip )
DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow )
DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds )
DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip )
DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun )
DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS )
DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present )
DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error)
DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op)
DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun)
DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
#ifdef CONFIG_X86_32
DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment )
DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
#endif
DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 )
DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check)
#ifdef CONFIG_X86_64
/* Runs on IST stack */
......@@ -263,7 +278,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
}
#endif
dotraplinkage void __kprobes
dotraplinkage void
do_general_protection(struct pt_regs *regs, long error_code)
{
struct task_struct *tsk;
......@@ -305,13 +320,14 @@ do_general_protection(struct pt_regs *regs, long error_code)
pr_cont("\n");
}
force_sig(SIGSEGV, tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
exit:
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_general_protection);
/* May run on IST stack. */
dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
{
enum ctx_state prev_state;
......@@ -327,13 +343,18 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
if (poke_int3_handler(regs))
return;
prev_state = exception_enter();
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
SIGTRAP) == NOTIFY_STOP)
goto exit;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
#ifdef CONFIG_KPROBES
if (kprobe_int3_handler(regs))
return;
#endif
prev_state = exception_enter();
if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
SIGTRAP) == NOTIFY_STOP)
goto exit;
......@@ -350,6 +371,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
exit:
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_int3);
#ifdef CONFIG_X86_64
/*
......@@ -357,7 +379,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
* for scheduling or signal handling. The actual stack switch is done in
* entry.S
*/
asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
{
struct pt_regs *regs = eregs;
/* Did already sync */
......@@ -376,6 +398,7 @@ asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
*regs = *eregs;
return regs;
}
NOKPROBE_SYMBOL(sync_regs);
#endif
/*
......@@ -402,7 +425,7 @@ asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
*
* May run on IST stack.
*/
dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
{
struct task_struct *tsk = current;
enum ctx_state prev_state;
......@@ -410,8 +433,6 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
unsigned long dr6;
int si_code;
prev_state = exception_enter();
get_debugreg(dr6, 6);
/* Filter out all the reserved bits which are preset to 1 */
......@@ -440,6 +461,12 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
/* Store the virtualized DR6 value */
tsk->thread.debugreg6 = dr6;
#ifdef CONFIG_KPROBES
if (kprobe_debug_handler(regs))
goto exit;
#endif
prev_state = exception_enter();
if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
SIGTRAP) == NOTIFY_STOP)
goto exit;
......@@ -482,13 +509,14 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
exit:
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_debug);
/*
* Note that we play around with the 'TS' bit in an attempt to get
* the correct behaviour even in the presence of the asynchronous
* IRQ13 behaviour
*/
void math_error(struct pt_regs *regs, int error_code, int trapnr)
static void math_error(struct pt_regs *regs, int error_code, int trapnr)
{
struct task_struct *task = current;
siginfo_t info;
......@@ -518,7 +546,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
task->thread.error_code = error_code;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *)regs->ip;
info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
if (trapnr == X86_TRAP_MF) {
unsigned short cwd, swd;
/*
......@@ -645,7 +673,7 @@ void math_state_restore(void)
*/
if (unlikely(restore_fpu_checking(tsk))) {
drop_init_fpu(tsk);
force_sig(SIGSEGV, tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
return;
}
......@@ -653,7 +681,7 @@ void math_state_restore(void)
}
EXPORT_SYMBOL_GPL(math_state_restore);
dotraplinkage void __kprobes
dotraplinkage void
do_device_not_available(struct pt_regs *regs, long error_code)
{
enum ctx_state prev_state;
......@@ -679,6 +707,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
#endif
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_device_not_available);
#ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
......
此差异已折叠。
......@@ -4,8 +4,8 @@
* (inspired by Andi Kleen's thunk_64.S)
* Subject to the GNU public license, v.2. No warranty of any kind.
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#ifdef CONFIG_TRACE_IRQFLAGS
/* put return address in eax (arg1) */
......@@ -22,6 +22,7 @@
popl %ecx
popl %eax
ret
_ASM_NOKPROBE(\name)
.endm
thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller
......
......@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/calling.h>
#include <asm/asm.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
......@@ -25,6 +26,7 @@
call \func
jmp restore
CFI_ENDPROC
_ASM_NOKPROBE(\name)
.endm
#ifdef CONFIG_TRACE_IRQFLAGS
......@@ -43,3 +45,4 @@ restore:
RESTORE_ARGS
ret
CFI_ENDPROC
_ASM_NOKPROBE(restore)
......@@ -8,7 +8,7 @@
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/module.h> /* search_exception_table */
#include <linux/bootmem.h> /* max_low_pfn */
#include <linux/kprobes.h> /* __kprobes, ... */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
#include <linux/hugetlb.h> /* hstate_index_to_shift */
......@@ -46,7 +46,7 @@ enum x86_pf_error_code {
* Returns 0 if mmiotrace is disabled, or if the fault is not
* handled by mmiotrace:
*/
static inline int __kprobes
static nokprobe_inline int
kmmio_fault(struct pt_regs *regs, unsigned long addr)
{
if (unlikely(is_kmmio_active()))
......@@ -55,7 +55,7 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
return 0;
}
static inline int __kprobes kprobes_fault(struct pt_regs *regs)
static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
{
int ret = 0;
......@@ -262,7 +262,7 @@ void vmalloc_sync_all(void)
*
* Handle a fault on the vmalloc or module mapping area
*/
static noinline __kprobes int vmalloc_fault(unsigned long address)
static noinline int vmalloc_fault(unsigned long address)
{
unsigned long pgd_paddr;
pmd_t *pmd_k;
......@@ -292,6 +292,7 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
return 0;
}
NOKPROBE_SYMBOL(vmalloc_fault);
/*
* Did it hit the DOS screen memory VA from vm86 mode?
......@@ -359,7 +360,7 @@ void vmalloc_sync_all(void)
*
* This assumes no large pages in there.
*/
static noinline __kprobes int vmalloc_fault(unsigned long address)
static noinline int vmalloc_fault(unsigned long address)
{
pgd_t *pgd, *pgd_ref;
pud_t *pud, *pud_ref;
......@@ -426,6 +427,7 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
return 0;
}
NOKPROBE_SYMBOL(vmalloc_fault);
#ifdef CONFIG_CPU_SUP_AMD
static const char errata93_warning[] =
......@@ -928,7 +930,7 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
* There are no security implications to leaving a stale TLB when
* increasing the permissions on a page.
*/
static noinline __kprobes int
static noinline int
spurious_fault(unsigned long error_code, unsigned long address)
{
pgd_t *pgd;
......@@ -976,6 +978,7 @@ spurious_fault(unsigned long error_code, unsigned long address)
return ret;
}
NOKPROBE_SYMBOL(spurious_fault);
int show_unhandled_signals = 1;
......@@ -1031,7 +1034,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
* {,trace_}do_page_fault() have notrace on. Having this an actual function
* guarantees there's a function trace entry.
*/
static void __kprobes noinline
static noinline void
__do_page_fault(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
......@@ -1254,8 +1257,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
up_read(&mm->mmap_sem);
}
NOKPROBE_SYMBOL(__do_page_fault);
dotraplinkage void __kprobes notrace
dotraplinkage void notrace
do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
unsigned long address = read_cr2(); /* Get the faulting address */
......@@ -1273,10 +1277,12 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
__do_page_fault(regs, error_code, address);
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(do_page_fault);
#ifdef CONFIG_TRACING
static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
unsigned long error_code)
static nokprobe_inline void
trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
unsigned long error_code)
{
if (user_mode(regs))
trace_page_fault_user(address, regs, error_code);
......@@ -1284,7 +1290,7 @@ static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs
trace_page_fault_kernel(address, regs, error_code);
}
dotraplinkage void __kprobes notrace
dotraplinkage void notrace
trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
/*
......@@ -1301,4 +1307,5 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
__do_page_fault(regs, error_code, address);
exception_exit(prev_state);
}
NOKPROBE_SYMBOL(trace_do_page_fault);
#endif /* CONFIG_TRACING */
......@@ -1046,13 +1046,13 @@ EXPORT_SYMBOL_GPL(get_task_comm);
* so that a new one can be started
*/
void set_task_comm(struct task_struct *tsk, const char *buf)
void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
{
task_lock(tsk);
trace_task_rename(tsk, buf);
strlcpy(tsk->comm, buf, sizeof(tsk->comm));
task_unlock(tsk);
perf_event_comm(tsk);
perf_event_comm(tsk, exec);
}
int flush_old_exec(struct linux_binprm * bprm)
......@@ -1110,7 +1110,8 @@ void setup_new_exec(struct linux_binprm * bprm)
else
set_dumpable(current->mm, suid_dumpable);
set_task_comm(current, kbasename(bprm->filename));
perf_event_exec();
__set_task_comm(current, kbasename(bprm->filename), true);
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
......
......@@ -109,6 +109,15 @@
#define BRANCH_PROFILE()
#endif
#ifdef CONFIG_KPROBES
#define KPROBE_BLACKLIST() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
*(_kprobe_blacklist) \
VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
#else
#define KPROBE_BLACKLIST()
#endif
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
......@@ -478,6 +487,7 @@
*(.init.rodata) \
FTRACE_EVENTS() \
TRACE_SYSCALLS() \
KPROBE_BLACKLIST() \
MEM_DISCARD(init.rodata) \
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
......
......@@ -383,7 +383,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text")))
# define nokprobe_inline __always_inline
#else
# define __kprobes
# define nokprobe_inline inline
#endif
#endif /* __LINUX_COMPILER_H */
......@@ -205,10 +205,10 @@ struct kretprobe_blackpoint {
void *addr;
};
struct kprobe_blackpoint {
const char *name;
struct kprobe_blacklist_entry {
struct list_head list;
unsigned long start_addr;
unsigned long range;
unsigned long end_addr;
};
#ifdef CONFIG_KPROBES
......@@ -265,6 +265,7 @@ extern void arch_disarm_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
struct kprobe_insn_cache {
struct mutex mutex;
......@@ -476,4 +477,18 @@ static inline int enable_jprobe(struct jprobe *jp)
return enable_kprobe(&jp->kp);
}
#ifdef CONFIG_KPROBES
/*
* Blacklist ganerating macro. Specify functions which is not probed
* by using this macro.
*/
#define __NOKPROBE_SYMBOL(fname) \
static unsigned long __used \
__attribute__((section("_kprobe_blacklist"))) \
_kbl_addr_##fname = (unsigned long)fname;
#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
#else
#define NOKPROBE_SYMBOL(fname)
#endif
#endif /* _LINUX_KPROBES_H */
......@@ -166,6 +166,11 @@ struct perf_event;
*/
#define PERF_EVENT_TXN 0x1
/**
* pmu::capabilities flags
*/
#define PERF_PMU_CAP_NO_INTERRUPT 0x01
/**
* struct pmu - generic performance monitoring unit
*/
......@@ -178,6 +183,11 @@ struct pmu {
const char *name;
int type;
/*
* various common per-pmu feature flags
*/
int capabilities;
int * __percpu pmu_disable_count;
struct perf_cpu_context * __percpu pmu_cpu_context;
int task_ctx_nr;
......@@ -696,7 +706,8 @@ extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern void perf_event_comm(struct task_struct *tsk);
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */
......@@ -773,7 +784,7 @@ extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern int __perf_event_disable(void *info);
extern void perf_event_task_tick(void);
#else
#else /* !CONFIG_PERF_EVENTS: */
static inline void
perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) { }
......@@ -803,7 +814,8 @@ static inline int perf_unregister_guest_info_callbacks
(struct perf_guest_info_callbacks *callbacks) { return 0; }
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { }
static inline void perf_event_exec(void) { }
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
......
......@@ -2421,7 +2421,11 @@ extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, i
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern void set_task_comm(struct task_struct *tsk, const char *from);
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
__set_task_comm(tsk, from, false);
}
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
......
......@@ -103,6 +103,7 @@ extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, u
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
......@@ -133,6 +134,9 @@ extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
#define uprobe_get_trap_addr(regs) instruction_pointer(regs)
static inline int
uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
{
......
......@@ -163,8 +163,9 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */
PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */
PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */
PERF_SAMPLE_BRANCH_COND = 1U << 10, /* conditional branches */
PERF_SAMPLE_BRANCH_MAX = 1U << 10, /* non-ABI */
PERF_SAMPLE_BRANCH_MAX = 1U << 11, /* non-ABI */
};
#define PERF_SAMPLE_BRANCH_PLM_ALL \
......@@ -301,8 +302,8 @@ struct perf_event_attr {
exclude_callchain_kernel : 1, /* exclude kernel callchains */
exclude_callchain_user : 1, /* exclude user callchains */
mmap2 : 1, /* include mmap with inode data */
__reserved_1 : 40;
comm_exec : 1, /* flag comm events that are due to an exec */
__reserved_1 : 39;
union {
__u32 wakeup_events; /* wakeup every n events */
......@@ -501,7 +502,12 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
/*
* PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
* different events so can reuse the same bit position.
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
/*
* Indicates that the content of PERF_SAMPLE_IP points to
* the actual instruction that triggered the event. See also
......
......@@ -2974,6 +2974,22 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
local_irq_restore(flags);
}
void perf_event_exec(void)
{
struct perf_event_context *ctx;
int ctxn;
rcu_read_lock();
for_each_task_context_nr(ctxn) {
ctx = current->perf_event_ctxp[ctxn];
if (!ctx)
continue;
perf_event_enable_on_exec(ctx);
}
rcu_read_unlock();
}
/*
* Cross CPU call to read the hardware event
*/
......@@ -5075,21 +5091,9 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
NULL);
}
void perf_event_comm(struct task_struct *task)
void perf_event_comm(struct task_struct *task, bool exec)
{
struct perf_comm_event comm_event;
struct perf_event_context *ctx;
int ctxn;
rcu_read_lock();
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
perf_event_enable_on_exec(ctx);
}
rcu_read_unlock();
if (!atomic_read(&nr_comm_events))
return;
......@@ -5101,7 +5105,7 @@ void perf_event_comm(struct task_struct *task)
.event_id = {
.header = {
.type = PERF_RECORD_COMM,
.misc = 0,
.misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
/* .size */
},
/* .pid */
......@@ -7122,6 +7126,13 @@ SYSCALL_DEFINE5(perf_event_open,
}
}
if (is_sampling_event(event)) {
if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
err = -ENOTSUPP;
goto err_alloc;
}
}
account_event(event);
/*
......@@ -7433,7 +7444,7 @@ __perf_event_exit_task(struct perf_event *child_event,
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event *child_event;
struct perf_event *child_event, *next;
struct perf_event_context *child_ctx;
unsigned long flags;
......@@ -7487,7 +7498,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
*/
mutex_lock(&child_ctx->mutex);
list_for_each_entry_rcu(child_event, &child_ctx->event_list, event_entry)
list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
__perf_event_exit_task(child_event, child_ctx, child);
mutex_unlock(&child_ctx->mutex);
......
......@@ -36,6 +36,7 @@
#include "../../mm/internal.h" /* munlock_vma_page */
#include <linux/percpu-rwsem.h>
#include <linux/task_work.h>
#include <linux/shmem_fs.h>
#include <linux/uprobes.h>
......@@ -127,7 +128,7 @@ struct xol_area {
*/
static bool valid_vma(struct vm_area_struct *vma, bool is_register)
{
vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED;
vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
if (is_register)
flags |= VM_WRITE;
......@@ -279,18 +280,13 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t
* supported by that architecture then we need to modify is_trap_at_addr and
* uprobe_write_opcode accordingly. This would never be a problem for archs
* that have fixed length instructions.
*/
/*
*
* uprobe_write_opcode - write the opcode at a given virtual address.
* @mm: the probed process address space.
* @vaddr: the virtual address to store the opcode.
* @opcode: opcode to be written at @vaddr.
*
* Called with mm->mmap_sem held (for read and with a reference to
* mm).
*
* For mm @mm, write the opcode at @vaddr.
* Called with mm->mmap_sem held for write.
* Return 0 (success) or a negative errno.
*/
int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
......@@ -310,21 +306,25 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
if (ret <= 0)
goto put_old;
ret = anon_vma_prepare(vma);
if (ret)
goto put_old;
ret = -ENOMEM;
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
if (!new_page)
goto put_old;
__SetPageUptodate(new_page);
if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))
goto put_new;
__SetPageUptodate(new_page);
copy_highpage(new_page, old_page);
copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
ret = anon_vma_prepare(vma);
if (ret)
goto put_new;
ret = __replace_page(vma, vaddr, old_page, new_page);
if (ret)
mem_cgroup_uncharge_page(new_page);
put_new:
page_cache_release(new_page);
......@@ -537,14 +537,15 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
void *insn, int nbytes, loff_t offset)
{
struct page *page;
if (!mapping->a_ops->readpage)
return -EIO;
/*
* Ensure that the page that has the original instruction is
* populated and in page-cache.
* Ensure that the page that has the original instruction is populated
* and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
* see uprobe_register().
*/
page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
if (mapping->a_ops->readpage)
page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
else
page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
......@@ -880,6 +881,9 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
if (!uc->handler && !uc->ret_handler)
return -EINVAL;
/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
return -EIO;
/* Racy, just to catch the obvious mistakes */
if (offset > i_size_read(inode))
return -EINVAL;
......@@ -1361,6 +1365,16 @@ unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
}
unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
if (unlikely(utask && utask->active_uprobe))
return utask->vaddr;
return instruction_pointer(regs);
}
/*
* Called with no locks held.
* Called in context of a exiting or a exec-ing thread.
......
此差异已折叠。
......@@ -71,9 +71,9 @@ static int notifier_chain_unregister(struct notifier_block **nl,
* @returns: notifier_call_chain returns the value returned by the
* last notifier function called.
*/
static int __kprobes notifier_call_chain(struct notifier_block **nl,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
static int notifier_call_chain(struct notifier_block **nl,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
{
int ret = NOTIFY_DONE;
struct notifier_block *nb, *next_nb;
......@@ -102,6 +102,7 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
}
return ret;
}
NOKPROBE_SYMBOL(notifier_call_chain);
/*
* Atomic notifier chain routines. Registration and unregistration
......@@ -172,9 +173,9 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
* Otherwise the return value is the return value
* of the last notifier function called.
*/
int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
{
int ret;
......@@ -184,13 +185,15 @@ int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
return ret;
}
EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
NOKPROBE_SYMBOL(__atomic_notifier_call_chain);
int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v)
int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v)
{
return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
}
EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
NOKPROBE_SYMBOL(atomic_notifier_call_chain);
/*
* Blocking notifier chain routines. All access to the chain is
......@@ -527,7 +530,7 @@ EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
static ATOMIC_NOTIFIER_HEAD(die_chain);
int notrace __kprobes notify_die(enum die_val val, const char *str,
int notrace notify_die(enum die_val val, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
struct die_args args = {
......@@ -540,6 +543,7 @@ int notrace __kprobes notify_die(enum die_val val, const char *str,
};
return atomic_notifier_call_chain(&die_chain, val, &args);
}
NOKPROBE_SYMBOL(notify_die);
int register_die_notifier(struct notifier_block *nb)
{
......
......@@ -2527,7 +2527,7 @@ notrace unsigned long get_parent_ip(unsigned long addr)
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
void __kprobes preempt_count_add(int val)
void preempt_count_add(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
......@@ -2553,8 +2553,9 @@ void __kprobes preempt_count_add(int val)
}
}
EXPORT_SYMBOL(preempt_count_add);
NOKPROBE_SYMBOL(preempt_count_add);
void __kprobes preempt_count_sub(int val)
void preempt_count_sub(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
......@@ -2575,6 +2576,7 @@ void __kprobes preempt_count_sub(int val)
__preempt_count_sub(val);
}
EXPORT_SYMBOL(preempt_count_sub);
NOKPROBE_SYMBOL(preempt_count_sub);
#endif
......@@ -2857,6 +2859,7 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
barrier();
} while (need_resched());
}
NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule);
#endif /* CONFIG_PREEMPT */
......
......@@ -248,8 +248,8 @@ void perf_trace_del(struct perf_event *p_event, int flags)
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
}
__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
struct pt_regs *regs, int *rctxp)
void *perf_trace_buf_prepare(int size, unsigned short type,
struct pt_regs *regs, int *rctxp)
{
struct trace_entry *entry;
unsigned long flags;
......@@ -281,6 +281,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
return raw_data;
}
EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
NOKPROBE_SYMBOL(perf_trace_buf_prepare);
#ifdef CONFIG_FUNCTION_TRACER
static void
......
......@@ -40,27 +40,27 @@ struct trace_kprobe {
(sizeof(struct probe_arg) * (n)))
static __kprobes bool trace_kprobe_is_return(struct trace_kprobe *tk)
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
{
return tk->rp.handler != NULL;
}
static __kprobes const char *trace_kprobe_symbol(struct trace_kprobe *tk)
static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
{
return tk->symbol ? tk->symbol : "unknown";
}
static __kprobes unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
{
return tk->rp.kp.offset;
}
static __kprobes bool trace_kprobe_has_gone(struct trace_kprobe *tk)
static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
{
return !!(kprobe_gone(&tk->rp.kp));
}
static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk,
static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
struct module *mod)
{
int len = strlen(mod->name);
......@@ -68,7 +68,7 @@ static __kprobes bool trace_kprobe_within_module(struct trace_kprobe *tk,
return strncmp(mod->name, name, len) == 0 && name[len] == ':';
}
static __kprobes bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
{
return !!strchr(trace_kprobe_symbol(tk), ':');
}
......@@ -132,19 +132,21 @@ struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
* Kprobes-specific fetch functions
*/
#define DEFINE_FETCH_stack(type) \
static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
void *offset, void *dest) \
{ \
*(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
(unsigned int)((unsigned long)offset)); \
}
} \
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
DEFINE_BASIC_FETCH_FUNCS(stack)
/* No string on the stack entry */
#define fetch_stack_string NULL
#define fetch_stack_string_size NULL
#define DEFINE_FETCH_memory(type) \
static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
void *addr, void *dest) \
{ \
type retval; \
......@@ -152,14 +154,16 @@ static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
*(type *)dest = 0; \
else \
*(type *)dest = retval; \
}
} \
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
DEFINE_BASIC_FETCH_FUNCS(memory)
/*
* Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
* length and relative data location.
*/
static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
void *addr, void *dest)
static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
void *addr, void *dest)
{
long ret;
int maxlen = get_rloc_len(*(u32 *)dest);
......@@ -193,10 +197,11 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
get_rloc_offs(*(u32 *)dest));
}
}
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
/* Return the length of string -- including null terminal byte */
static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
void *addr, void *dest)
static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
void *addr, void *dest)
{
mm_segment_t old_fs;
int ret, len = 0;
......@@ -219,17 +224,19 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
else
*(u32 *)dest = len;
}
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
#define DEFINE_FETCH_symbol(type) \
__kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, \
void *data, void *dest) \
void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
{ \
struct symbol_cache *sc = data; \
if (sc->addr) \
fetch_memory_##type(regs, (void *)sc->addr, dest); \
else \
*(type *)dest = 0; \
}
} \
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
DEFINE_BASIC_FETCH_FUNCS(symbol)
DEFINE_FETCH_symbol(string)
DEFINE_FETCH_symbol(string_size)
......@@ -907,7 +914,7 @@ static const struct file_operations kprobe_profile_ops = {
};
/* Kprobe handler */
static __kprobes void
static nokprobe_inline void
__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
struct ftrace_event_file *ftrace_file)
{
......@@ -943,7 +950,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
entry, irq_flags, pc, regs);
}
static __kprobes void
static void
kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
{
struct event_file_link *link;
......@@ -951,9 +958,10 @@ kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
list_for_each_entry_rcu(link, &tk->tp.files, list)
__kprobe_trace_func(tk, regs, link->file);
}
NOKPROBE_SYMBOL(kprobe_trace_func);
/* Kretprobe handler */
static __kprobes void
static nokprobe_inline void
__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
struct pt_regs *regs,
struct ftrace_event_file *ftrace_file)
......@@ -991,7 +999,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry, irq_flags, pc, regs);
}
static __kprobes void
static void
kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
struct pt_regs *regs)
{
......@@ -1000,6 +1008,7 @@ kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
list_for_each_entry_rcu(link, &tk->tp.files, list)
__kretprobe_trace_func(tk, ri, regs, link->file);
}
NOKPROBE_SYMBOL(kretprobe_trace_func);
/* Event entry printers */
static enum print_line_t
......@@ -1131,7 +1140,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
#ifdef CONFIG_PERF_EVENTS
/* Kprobe profile handler */
static __kprobes void
static void
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
{
struct ftrace_event_call *call = &tk->tp.call;
......@@ -1158,9 +1167,10 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
}
NOKPROBE_SYMBOL(kprobe_perf_func);
/* Kretprobe profile handler */
static __kprobes void
static void
kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
struct pt_regs *regs)
{
......@@ -1188,6 +1198,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
}
NOKPROBE_SYMBOL(kretprobe_perf_func);
#endif /* CONFIG_PERF_EVENTS */
/*
......@@ -1196,9 +1207,8 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
* kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
* lockless, but we can't race with this __init function.
*/
static __kprobes
int kprobe_register(struct ftrace_event_call *event,
enum trace_reg type, void *data)
static int kprobe_register(struct ftrace_event_call *event,
enum trace_reg type, void *data)
{
struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
struct ftrace_event_file *file = data;
......@@ -1224,8 +1234,7 @@ int kprobe_register(struct ftrace_event_call *event,
return 0;
}
static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
......@@ -1239,9 +1248,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
#endif
return 0; /* We don't tweek kernel, so just return 0 */
}
NOKPROBE_SYMBOL(kprobe_dispatcher);
static __kprobes
int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
static int
kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
{
struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
......@@ -1255,6 +1265,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
#endif
return 0; /* We don't tweek kernel, so just return 0 */
}
NOKPROBE_SYMBOL(kretprobe_dispatcher);
static struct trace_event_functions kretprobe_funcs = {
.trace = print_kretprobe_event
......
......@@ -37,13 +37,13 @@ const char *reserved_field_names[] = {
/* Printing in basic type function template */
#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt) \
__kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \
const char *name, \
void *data, void *ent) \
int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \
void *data, void *ent) \
{ \
return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \
} \
const char PRINT_TYPE_FMT_NAME(type)[] = fmt;
const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \
NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type));
DEFINE_BASIC_PRINT_TYPE_FUNC(u8 , "0x%x")
DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "0x%x")
......@@ -55,9 +55,8 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%d")
DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%Ld")
/* Print type function for string type */
__kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s,
const char *name,
void *data, void *ent)
int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name,
void *data, void *ent)
{
int len = *(u32 *)data >> 16;
......@@ -67,6 +66,7 @@ __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s,
return trace_seq_printf(s, " %s=\"%s\"", name,
(const char *)get_loc_data(data, ent));
}
NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string));
const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
......@@ -81,23 +81,24 @@ const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
/* Data fetch function templates */
#define DEFINE_FETCH_reg(type) \
__kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \
void *offset, void *dest) \
void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, void *offset, void *dest) \
{ \
*(type *)dest = (type)regs_get_register(regs, \
(unsigned int)((unsigned long)offset)); \
}
} \
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(reg, type));
DEFINE_BASIC_FETCH_FUNCS(reg)
/* No string on the register */
#define fetch_reg_string NULL
#define fetch_reg_string_size NULL
#define DEFINE_FETCH_retval(type) \
__kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \
void *dummy, void *dest) \
void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \
void *dummy, void *dest) \
{ \
*(type *)dest = (type)regs_return_value(regs); \
}
} \
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(retval, type));
DEFINE_BASIC_FETCH_FUNCS(retval)
/* No string on the retval */
#define fetch_retval_string NULL
......@@ -112,8 +113,8 @@ struct deref_fetch_param {
};
#define DEFINE_FETCH_deref(type) \
__kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \
void *data, void *dest) \
void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \
void *data, void *dest) \
{ \
struct deref_fetch_param *dprm = data; \
unsigned long addr; \
......@@ -123,12 +124,13 @@ __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \
dprm->fetch(regs, (void *)addr, dest); \
} else \
*(type *)dest = 0; \
}
} \
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, type));
DEFINE_BASIC_FETCH_FUNCS(deref)
DEFINE_FETCH_deref(string)
__kprobes void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs,
void *data, void *dest)
void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs,
void *data, void *dest)
{
struct deref_fetch_param *dprm = data;
unsigned long addr;
......@@ -140,16 +142,18 @@ __kprobes void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs,
} else
*(string_size *)dest = 0;
}
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, string_size));
static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data)
static void update_deref_fetch_param(struct deref_fetch_param *data)
{
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
update_deref_fetch_param(data->orig.data);
else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
update_symbol_cache(data->orig.data);
}
NOKPROBE_SYMBOL(update_deref_fetch_param);
static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
static void free_deref_fetch_param(struct deref_fetch_param *data)
{
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
free_deref_fetch_param(data->orig.data);
......@@ -157,6 +161,7 @@ static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
free_symbol_cache(data->orig.data);
kfree(data);
}
NOKPROBE_SYMBOL(free_deref_fetch_param);
/* Bitfield fetch function */
struct bitfield_fetch_param {
......@@ -166,8 +171,8 @@ struct bitfield_fetch_param {
};
#define DEFINE_FETCH_bitfield(type) \
__kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \
void *data, void *dest) \
void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \
void *data, void *dest) \
{ \
struct bitfield_fetch_param *bprm = data; \
type buf = 0; \
......@@ -177,13 +182,13 @@ __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \
buf >>= bprm->low_shift; \
} \
*(type *)dest = buf; \
}
} \
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(bitfield, type));
DEFINE_BASIC_FETCH_FUNCS(bitfield)
#define fetch_bitfield_string NULL
#define fetch_bitfield_string_size NULL
static __kprobes void
static void
update_bitfield_fetch_param(struct bitfield_fetch_param *data)
{
/*
......@@ -196,7 +201,7 @@ update_bitfield_fetch_param(struct bitfield_fetch_param *data)
update_symbol_cache(data->orig.data);
}
static __kprobes void
static void
free_bitfield_fetch_param(struct bitfield_fetch_param *data)
{
/*
......@@ -255,17 +260,17 @@ static const struct fetch_type *find_fetch_type(const char *type,
}
/* Special function : only accept unsigned long */
static __kprobes void fetch_kernel_stack_address(struct pt_regs *regs,
void *dummy, void *dest)
static void fetch_kernel_stack_address(struct pt_regs *regs, void *dummy, void *dest)
{
*(unsigned long *)dest = kernel_stack_pointer(regs);
}
NOKPROBE_SYMBOL(fetch_kernel_stack_address);
static __kprobes void fetch_user_stack_address(struct pt_regs *regs,
void *dummy, void *dest)
static void fetch_user_stack_address(struct pt_regs *regs, void *dummy, void *dest)
{
*(unsigned long *)dest = user_stack_pointer(regs);
}
NOKPROBE_SYMBOL(fetch_user_stack_address);
static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
fetch_func_t orig_fn,
......
......@@ -81,13 +81,13 @@
*/
#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs))
static inline void *get_rloc_data(u32 *dl)
static nokprobe_inline void *get_rloc_data(u32 *dl)
{
return (u8 *)dl + get_rloc_offs(*dl);
}
/* For data_loc conversion */
static inline void *get_loc_data(u32 *dl, void *ent)
static nokprobe_inline void *get_loc_data(u32 *dl, void *ent)
{
return (u8 *)ent + get_rloc_offs(*dl);
}
......@@ -136,9 +136,8 @@ typedef u32 string_size;
/* Printing in basic type function template */
#define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \
__kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \
const char *name, \
void *data, void *ent); \
int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \
void *data, void *ent); \
extern const char PRINT_TYPE_FMT_NAME(type)[]
DECLARE_BASIC_PRINT_TYPE_FUNC(u8);
......@@ -303,7 +302,7 @@ static inline bool trace_probe_is_registered(struct trace_probe *tp)
return !!(tp->flags & TP_FLAG_REGISTERED);
}
static inline __kprobes void call_fetch(struct fetch_param *fprm,
static nokprobe_inline void call_fetch(struct fetch_param *fprm,
struct pt_regs *regs, void *dest)
{
return fprm->fn(regs, fprm->data, dest);
......@@ -351,7 +350,7 @@ extern ssize_t traceprobe_probes_write(struct file *file,
extern int traceprobe_command(const char *buf, int (*createfn)(int, char**));
/* Sum up total data length for dynamic arraies (strings) */
static inline __kprobes int
static nokprobe_inline int
__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
{
int i, ret = 0;
......@@ -367,7 +366,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
}
/* Store the value of each argument */
static inline __kprobes void
static nokprobe_inline void
store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs,
u8 *data, int maxlen)
{
......
......@@ -108,8 +108,8 @@ static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
* Uprobes-specific fetch functions
*/
#define DEFINE_FETCH_stack(type) \
static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
void *offset, void *dest) \
static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
void *offset, void *dest) \
{ \
*(type *)dest = (type)get_user_stack_nth(regs, \
((unsigned long)offset)); \
......@@ -120,8 +120,8 @@ DEFINE_BASIC_FETCH_FUNCS(stack)
#define fetch_stack_string_size NULL
#define DEFINE_FETCH_memory(type) \
static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
void *addr, void *dest) \
static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
void *addr, void *dest) \
{ \
type retval; \
void __user *vaddr = (void __force __user *) addr; \
......@@ -136,8 +136,8 @@ DEFINE_BASIC_FETCH_FUNCS(memory)
* Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
* length and relative data location.
*/
static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
void *addr, void *dest)
static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
void *addr, void *dest)
{
long ret;
u32 rloc = *(u32 *)dest;
......@@ -158,8 +158,8 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
}
}
static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
void *addr, void *dest)
static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
void *addr, void *dest)
{
int len;
void __user *vaddr = (void __force __user *) addr;
......@@ -184,8 +184,8 @@ static unsigned long translate_user_vaddr(void *file_offset)
}
#define DEFINE_FETCH_file_offset(type) \
static __kprobes void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,\
void *offset, void *dest) \
static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
void *offset, void *dest)\
{ \
void *vaddr = (void *)translate_user_vaddr(offset); \
\
......@@ -1009,56 +1009,60 @@ uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
}
static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
{
bool done;
write_lock(&tu->filter.rwlock);
if (event->hw.tp_target) {
/*
* event->parent != NULL means copy_process(), we can avoid
* uprobe_apply(). current->mm must be probed and we can rely
* on dup_mmap() which preserves the already installed bp's.
*
* attr.enable_on_exec means that exec/mmap will install the
* breakpoints we need.
*/
list_del(&event->hw.tp_list);
done = tu->filter.nr_systemwide ||
event->parent || event->attr.enable_on_exec ||
(event->hw.tp_target->flags & PF_EXITING) ||
uprobe_filter_event(tu, event);
list_add(&event->hw.tp_list, &tu->filter.perf_events);
} else {
tu->filter.nr_systemwide--;
done = tu->filter.nr_systemwide;
tu->filter.nr_systemwide++;
}
write_unlock(&tu->filter.rwlock);
if (!done)
uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
return 0;
}
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
{
bool done;
int err;
write_lock(&tu->filter.rwlock);
if (event->hw.tp_target) {
list_del(&event->hw.tp_list);
/*
* event->parent != NULL means copy_process(), we can avoid
* uprobe_apply(). current->mm must be probed and we can rely
* on dup_mmap() which preserves the already installed bp's.
*
* attr.enable_on_exec means that exec/mmap will install the
* breakpoints we need.
*/
done = tu->filter.nr_systemwide ||
(event->hw.tp_target->flags & PF_EXITING) ||
event->parent || event->attr.enable_on_exec ||
uprobe_filter_event(tu, event);
list_add(&event->hw.tp_list, &tu->filter.perf_events);
} else {
tu->filter.nr_systemwide--;
done = tu->filter.nr_systemwide;
tu->filter.nr_systemwide++;
}
write_unlock(&tu->filter.rwlock);
if (!done)
uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
return 0;
err = 0;
if (!done) {
err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
if (err)
uprobe_perf_close(tu, event);
}
return err;
}
static bool uprobe_perf_filter(struct uprobe_consumer *uc,
......
/* TODO merge/factor in debugfs.c here */
#include <ctype.h>
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/vfs.h>
......@@ -96,12 +98,51 @@ static bool fs__check_mounts(struct fs *fs)
return false;
}
static void mem_toupper(char *f, size_t len)
{
while (len) {
*f = toupper(*f);
f++;
len--;
}
}
/*
* Check for "NAME_PATH" environment variable to override fs location (for
* testing). This matches the recommendation in Documentation/sysfs-rules.txt
* for SYSFS_PATH.
*/
static bool fs__env_override(struct fs *fs)
{
char *override_path;
size_t name_len = strlen(fs->name);
/* name + "_PATH" + '\0' */
char upper_name[name_len + 5 + 1];
memcpy(upper_name, fs->name, name_len);
mem_toupper(upper_name, name_len);
strcpy(&upper_name[name_len], "_PATH");
override_path = getenv(upper_name);
if (!override_path)
return false;
fs->found = true;
strncpy(fs->path, override_path, sizeof(fs->path));
return true;
}
static const char *fs__get_mountpoint(struct fs *fs)
{
if (fs__env_override(fs))
return fs->path;
if (fs__check_mounts(fs))
return fs->path;
return fs__read_mounts(fs) ? fs->path : NULL;
if (fs__read_mounts(fs))
return fs->path;
return NULL;
}
static const char *fs__mountpoint(int idx)
......
......@@ -184,9 +184,10 @@ following filters are defined:
- in_tx: only when the target is in a hardware transaction
- no_tx: only when the target is not in a hardware transaction
- abort_tx: only when the target is a hardware transaction abort
- cond: conditional branches
+
The option requires at least one branch type among any, any_call, any_ret, ind_call.
The option requires at least one branch type among any, any_call, any_ret, ind_call, cond.
The privilege levels may be omitted, in which case, the privilege levels of the associated
event are applied to the branch filter. Both kernel (k) and hypervisor (hv) privilege
levels are subject to permissions. When sampling on multiple events, branch stack sampling
......
......@@ -111,7 +111,7 @@ OPTIONS
--fields=::
Specify output field - multiple keys can be specified in CSV format.
Following fields are available:
overhead, overhead_sys, overhead_us, sample and period.
overhead, overhead_sys, overhead_us, overhead_children, sample and period.
Also it can contain any sort key(s).
By default, every sort keys not specified in -F will be appended
......@@ -163,6 +163,11 @@ OPTIONS
Default: fractal,0.5,callee,function.
--children::
Accumulate callchain of children to parent entry so that then can
show up in the output. The output will have a new "Children" column
and will be sorted on the data. It requires callchains are recorded.
--max-stack::
Set the stack depth limit when parsing the callchain, anything
beyond the specified depth will be ignored. This is a trade-off
......
......@@ -119,7 +119,7 @@ Default is to monitor all CPUS.
--fields=::
Specify output field - multiple keys can be specified in CSV format.
Following fields are available:
overhead, overhead_sys, overhead_us, sample and period.
overhead, overhead_sys, overhead_us, overhead_children, sample and period.
Also it can contain any sort key(s).
By default, every sort keys not specified in --field will be appended
......@@ -161,6 +161,12 @@ Default is to monitor all CPUS.
Setup and enable call-graph (stack chain/backtrace) recording,
implies -g.
--children::
Accumulate callchain of children to parent entry so that then can
show up in the output. The output will have a new "Children" column
and will be sorted on the data. It requires -g/--call-graph option
enabled.
--max-stack::
Set the stack depth limit when parsing the callchain, anything
beyond the specified depth will be ignored. This is a trade-off
......
......@@ -400,6 +400,7 @@ LIB_OBJS += $(OUTPUT)tests/hists_common.o
LIB_OBJS += $(OUTPUT)tests/hists_link.o
LIB_OBJS += $(OUTPUT)tests/hists_filter.o
LIB_OBJS += $(OUTPUT)tests/hists_output.o
LIB_OBJS += $(OUTPUT)tests/hists_cumulate.o
LIB_OBJS += $(OUTPUT)tests/python-use.o
LIB_OBJS += $(OUTPUT)tests/bp_signal.o
LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o
......@@ -788,8 +789,8 @@ help:
@echo ''
@echo 'Perf install targets:'
@echo ' NOTE: documentation build requires asciidoc, xmlto packages to be installed'
@echo ' HINT: use "make prefix=<path> <install target>" to install to a particular'
@echo ' path like make prefix=/usr/local install install-doc'
@echo ' HINT: use "prefix" or "DESTDIR" to install to a particular'
@echo ' path like "make prefix=/usr/local install install-doc"'
@echo ' install - install compiled binaries'
@echo ' install-doc - install *all* documentation'
@echo ' install-man - install manpage documentation'
......@@ -814,17 +815,20 @@ INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
$(DOC_TARGETS):
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol
TAG_FILES= ../../include/uapi/linux/perf_event.h
TAGS:
$(RM) TAGS
$(FIND) . -name '*.[hcS]' -print | xargs etags -a
$(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs etags -a $(TAG_FILES)
tags:
$(RM) tags
$(FIND) . -name '*.[hcS]' -print | xargs ctags -a
$(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs ctags -a $(TAG_FILES)
cscope:
$(RM) cscope*
$(FIND) . -name '*.[hcS]' -print | xargs cscope -b
$(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs cscope -b $(TAG_FILES)
### Detect prefix changes
TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):\
......
......@@ -65,12 +65,13 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
return 0;
}
he = __hists__add_entry(&evsel->hists, al, NULL, NULL, NULL, 1, 1, 0);
he = __hists__add_entry(&evsel->hists, al, NULL, NULL, NULL, 1, 1, 0,
true);
if (he == NULL)
return -ENOMEM;
ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
hists__inc_nr_samples(&evsel->hists, true);
return ret;
}
......
......@@ -315,7 +315,7 @@ static int hists__add_entry(struct hists *hists,
u64 weight, u64 transaction)
{
if (__hists__add_entry(hists, al, NULL, NULL, NULL, period, weight,
transaction) != NULL)
transaction, true) != NULL)
return 0;
return -ENOMEM;
}
......
......@@ -454,7 +454,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (done)
break;
err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
if (err < 0 && errno == EINTR)
/*
* Propagate error, only if there's any. Ignore positive
* number of returned events and interrupt error.
*/
if (err > 0 || (err < 0 && errno == EINTR))
err = 0;
waking++;
}
......@@ -544,6 +548,7 @@ static const struct branch_mode branch_modes[] = {
BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
BRANCH_END
};
......
......@@ -72,6 +72,10 @@ static int report__config(const char *var, const char *value, void *cb)
rep->min_percent = strtof(value, NULL);
return 0;
}
if (!strcmp(var, "report.children")) {
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
return 0;
}
return perf_default_config(var, value, cb);
}
......@@ -85,156 +89,52 @@ static void report__inc_stats(struct report *rep, struct hist_entry *he)
*/
if (he->stat.nr_events == 1)
rep->nr_entries++;
/*
* Only counts number of samples at this stage as it's more
* natural to do it here and non-sample events are also
* counted in perf_session_deliver_event(). The dump_trace
* requires this info is ready before going to the output tree.
*/
hists__inc_nr_events(he->hists, PERF_RECORD_SAMPLE);
if (!he->filtered)
he->hists->stats.nr_non_filtered_samples++;
}
static int report__add_mem_hist_entry(struct report *rep, struct addr_location *al,
struct perf_sample *sample, struct perf_evsel *evsel)
static int hist_iter__report_callback(struct hist_entry_iter *iter,
struct addr_location *al, bool single,
void *arg)
{
struct symbol *parent = NULL;
struct hist_entry *he;
struct mem_info *mi, *mx;
uint64_t cost;
int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
if (err)
return err;
int err = 0;
struct report *rep = arg;
struct hist_entry *he = iter->he;
struct perf_evsel *evsel = iter->evsel;
struct mem_info *mi;
struct branch_info *bi;
mi = sample__resolve_mem(sample, al);
if (!mi)
return -ENOMEM;
report__inc_stats(rep, he);
if (rep->hide_unresolved && !al->sym)
if (!ui__has_annotation())
return 0;
cost = sample->weight;
if (!cost)
cost = 1;
/*
* must pass period=weight in order to get the correct
* sorting from hists__collapse_resort() which is solely
* based on periods. We want sorting be done on nr_events * weight
* and this is indirectly achieved by passing period=weight here
* and the he_stat__add_period() function.
*/
he = __hists__add_entry(&evsel->hists, al, parent, NULL, mi,
cost, cost, 0);
if (!he)
return -ENOMEM;
if (ui__has_annotation()) {
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
if (err)
goto out;
mx = he->mem_info;
err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
if (sort__mode == SORT_MODE__BRANCH) {
bi = he->branch_info;
err = addr_map_symbol__inc_samples(&bi->from, evsel->idx);
if (err)
goto out;
}
report__inc_stats(rep, he);
err = hist_entry__append_callchain(he, sample);
out:
return err;
}
static int report__add_branch_hist_entry(struct report *rep, struct addr_location *al,
struct perf_sample *sample, struct perf_evsel *evsel)
{
struct symbol *parent = NULL;
unsigned i;
struct hist_entry *he;
struct branch_info *bi, *bx;
int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
if (err)
return err;
bi = sample__resolve_bstack(sample, al);
if (!bi)
return -ENOMEM;
for (i = 0; i < sample->branch_stack->nr; i++) {
if (rep->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
continue;
err = addr_map_symbol__inc_samples(&bi->to, evsel->idx);
err = -ENOMEM;
/* overwrite the 'al' to branch-to info */
al->map = bi[i].to.map;
al->sym = bi[i].to.sym;
al->addr = bi[i].to.addr;
/*
* The report shows the percentage of total branches captured
* and not events sampled. Thus we use a pseudo period of 1.
*/
he = __hists__add_entry(&evsel->hists, al, parent, &bi[i], NULL,
1, 1, 0);
if (he) {
if (ui__has_annotation()) {
bx = he->branch_info;
err = addr_map_symbol__inc_samples(&bx->from,
evsel->idx);
if (err)
goto out;
err = addr_map_symbol__inc_samples(&bx->to,
evsel->idx);
if (err)
goto out;
}
report__inc_stats(rep, he);
} else
} else if (rep->mem_mode) {
mi = he->mem_info;
err = addr_map_symbol__inc_samples(&mi->daddr, evsel->idx);
if (err)
goto out;
}
err = 0;
out:
free(bi);
return err;
}
static int report__add_hist_entry(struct report *rep, struct perf_evsel *evsel,
struct addr_location *al, struct perf_sample *sample)
{
struct symbol *parent = NULL;
struct hist_entry *he;
int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
if (err)
return err;
he = __hists__add_entry(&evsel->hists, al, parent, NULL, NULL,
sample->period, sample->weight,
sample->transaction);
if (he == NULL)
return -ENOMEM;
err = hist_entry__append_callchain(he, sample);
if (err)
goto out;
if (ui__has_annotation())
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
report__inc_stats(rep, he);
} else if (symbol_conf.cumulate_callchain) {
if (single)
err = hist_entry__inc_addr_samples(he, evsel->idx,
al->addr);
} else {
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
}
out:
return err;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
......@@ -243,6 +143,10 @@ static int process_sample_event(struct perf_tool *tool,
{
struct report *rep = container_of(tool, struct report, tool);
struct addr_location al;
struct hist_entry_iter iter = {
.hide_unresolved = rep->hide_unresolved,
.add_entry_cb = hist_iter__report_callback,
};
int ret;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
......@@ -257,22 +161,23 @@ static int process_sample_event(struct perf_tool *tool,
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
return 0;
if (sort__mode == SORT_MODE__BRANCH) {
ret = report__add_branch_hist_entry(rep, &al, sample, evsel);
if (ret < 0)
pr_debug("problem adding lbr entry, skipping event\n");
} else if (rep->mem_mode == 1) {
ret = report__add_mem_hist_entry(rep, &al, sample, evsel);
if (ret < 0)
pr_debug("problem adding mem entry, skipping event\n");
} else {
if (al.map != NULL)
al.map->dso->hit = 1;
if (sort__mode == SORT_MODE__BRANCH)
iter.ops = &hist_iter_branch;
else if (rep->mem_mode)
iter.ops = &hist_iter_mem;
else if (symbol_conf.cumulate_callchain)
iter.ops = &hist_iter_cumulative;
else
iter.ops = &hist_iter_normal;
if (al.map != NULL)
al.map->dso->hit = 1;
ret = hist_entry_iter__add(&iter, &al, evsel, sample, rep->max_stack,
rep);
if (ret < 0)
pr_debug("problem adding hist entry, skipping event\n");
ret = report__add_hist_entry(rep, evsel, &al, sample);
if (ret < 0)
pr_debug("problem incrementing symbol period, skipping event\n");
}
return ret;
}
......@@ -329,6 +234,14 @@ static int report__setup_sample_type(struct report *rep)
}
}
if (symbol_conf.cumulate_callchain) {
/* Silently ignore if callchain is missing */
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
symbol_conf.cumulate_callchain = false;
perf_hpp__cancel_cumulate();
}
}
if (sort__mode == SORT_MODE__BRANCH) {
if (!is_pipe &&
!(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
......@@ -712,6 +625,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order",
"Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit, callchain order, key (function or address). "
"Default: fractal,0.5,callee,function", &report_parse_callchain_opt, callchain_default_opt),
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
"Accumulate callchains of children and show total overhead as well"),
OPT_INTEGER(0, "max-stack", &report.max_stack,
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
......@@ -804,8 +719,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
has_br_stack = perf_header__has_feat(&session->header,
HEADER_BRANCH_STACK);
if (branch_mode == -1 && has_br_stack)
if (branch_mode == -1 && has_br_stack) {
sort__mode = SORT_MODE__BRANCH;
symbol_conf.cumulate_callchain = false;
}
if (report.mem_mode) {
if (sort__mode == SORT_MODE__BRANCH) {
......@@ -813,6 +730,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
goto error;
}
sort__mode = SORT_MODE__MEMORY;
symbol_conf.cumulate_callchain = false;
}
if (setup_sorting() < 0) {
......
......@@ -1428,7 +1428,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_
int err = 0;
evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
hists__inc_nr_samples(&evsel->hists, true);
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
......
此差异已折叠。
......@@ -447,6 +447,7 @@ else
ifneq ($(feature-libperl), 1)
CFLAGS += -DNO_LIBPERL
NO_LIBPERL := 1
msg := $(warning Missing perl devel files. Disabling perl scripting support, consider installing perl-ExtUtils-Embed);
else
LDFLAGS += $(PERL_EMBED_LDFLAGS)
EXTLIBS += $(PERL_EMBED_LIBADD)
......@@ -599,7 +600,7 @@ endif
# Make the path relative to DESTDIR, not to prefix
ifndef DESTDIR
prefix = $(HOME)
prefix ?= $(HOME)
endif
bindir_relative = bin
bindir = $(prefix)/$(bindir_relative)
......
......@@ -481,14 +481,18 @@ int main(int argc, const char **argv)
fprintf(stderr, "cannot handle %s internally", cmd);
goto out;
}
#ifdef HAVE_LIBAUDIT_SUPPORT
if (!prefixcmp(cmd, "trace")) {
#ifdef HAVE_LIBAUDIT_SUPPORT
set_buildid_dir();
setup_path();
argv[0] = "trace";
return cmd_trace(argc, argv, NULL);
}
#else
fprintf(stderr,
"trace command not available: missing audit-libs devel package at build time.\n");
goto out;
#endif
}
/* Look for flags.. */
argv++;
argc--;
......
......@@ -139,6 +139,10 @@ static struct test {
.desc = "Test output sorting of hist entries",
.func = test__hists_output,
},
{
.desc = "Test cumulation of child hist entries",
.func = test__hists_cumulate,
},
{
.func = NULL,
},
......
此差异已折叠。
......@@ -4,6 +4,34 @@
struct machine;
struct machines;
#define FAKE_PID_PERF1 100
#define FAKE_PID_PERF2 200
#define FAKE_PID_BASH 300
#define FAKE_MAP_PERF 0x400000
#define FAKE_MAP_BASH 0x400000
#define FAKE_MAP_LIBC 0x500000
#define FAKE_MAP_KERNEL 0xf00000
#define FAKE_MAP_LENGTH 0x100000
#define FAKE_SYM_OFFSET1 700
#define FAKE_SYM_OFFSET2 800
#define FAKE_SYM_OFFSET3 900
#define FAKE_SYM_LENGTH 100
#define FAKE_IP_PERF_MAIN FAKE_MAP_PERF + FAKE_SYM_OFFSET1
#define FAKE_IP_PERF_RUN_COMMAND FAKE_MAP_PERF + FAKE_SYM_OFFSET2
#define FAKE_IP_PERF_CMD_RECORD FAKE_MAP_PERF + FAKE_SYM_OFFSET3
#define FAKE_IP_BASH_MAIN FAKE_MAP_BASH + FAKE_SYM_OFFSET1
#define FAKE_IP_BASH_XMALLOC FAKE_MAP_BASH + FAKE_SYM_OFFSET2
#define FAKE_IP_BASH_XFREE FAKE_MAP_BASH + FAKE_SYM_OFFSET3
#define FAKE_IP_LIBC_MALLOC FAKE_MAP_LIBC + FAKE_SYM_OFFSET1
#define FAKE_IP_LIBC_FREE FAKE_MAP_LIBC + FAKE_SYM_OFFSET2
#define FAKE_IP_LIBC_REALLOC FAKE_MAP_LIBC + FAKE_SYM_OFFSET3
#define FAKE_IP_KERNEL_SCHEDULE FAKE_MAP_KERNEL + FAKE_SYM_OFFSET1
#define FAKE_IP_KERNEL_PAGE_FAULT FAKE_MAP_KERNEL + FAKE_SYM_OFFSET2
#define FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN FAKE_MAP_KERNEL + FAKE_SYM_OFFSET3
/*
* The setup_fake_machine() provides a test environment which consists
* of 3 processes that have 3 mappings and in turn, have 3 symbols
......@@ -13,7 +41,7 @@ struct machines;
* ............. ............. ...................
* perf: 100 perf main
* perf: 100 perf run_command
* perf: 100 perf comd_record
* perf: 100 perf cmd_record
* perf: 100 libc malloc
* perf: 100 libc free
* perf: 100 libc realloc
......@@ -22,7 +50,7 @@ struct machines;
* perf: 100 [kernel] sys_perf_event_open
* perf: 200 perf main
* perf: 200 perf run_command
* perf: 200 perf comd_record
* perf: 200 perf cmd_record
* perf: 200 libc malloc
* perf: 200 libc free
* perf: 200 libc realloc
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -45,6 +45,7 @@ int test__hists_filter(void);
int test__mmap_thread_lookup(void);
int test__thread_mg_share(void);
int test__hists_output(void);
int test__hists_cumulate(void);
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
......
......@@ -194,7 +194,7 @@ int ui_browser__warning(struct ui_browser *browser, int timeout,
ui_helpline__vpush(format, args);
va_end(args);
} else {
while ((key == ui__question_window("Warning!", text,
while ((key = ui__question_window("Warning!", text,
"Press any key...",
timeout)) == K_RESIZE)
ui_browser__handle_resize(browser);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -109,6 +109,7 @@ struct symbol_conf {
show_nr_samples,
show_total_period,
use_callchain,
cumulate_callchain,
exclude_other,
show_cpu_utilization,
initialized,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册