提交 b75d3886 编写于 作者: L Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Four tooling fixes, two kprobes KASAN related fixes and an x86 PMU
  driver fix/cleanup"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf jit: Fix build issue on Ubuntu
  perf jevents: Handle events including .c and .o
  perf/x86/intel: Remove an inconsistent NULL check
  kprobes: Unpoison stack in jprobe_return() for KASAN
  kprobes: Avoid false KASAN reports during stack copy
  perf header: Set nr_numa_nodes only when we parsed all the data
  perf top: Fix refreshing hierarchy entries on TUI
...@@ -135,7 +135,7 @@ ENTRY(_cpu_resume) ...@@ -135,7 +135,7 @@ ENTRY(_cpu_resume)
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
mov x0, sp mov x0, sp
bl kasan_unpoison_remaining_stack bl kasan_unpoison_task_stack_below
#endif #endif
ldp x19, x20, [x29, #16] ldp x19, x20, [x29, #16]
......
...@@ -458,8 +458,8 @@ void intel_pmu_lbr_del(struct perf_event *event) ...@@ -458,8 +458,8 @@ void intel_pmu_lbr_del(struct perf_event *event)
if (!x86_pmu.lbr_nr) if (!x86_pmu.lbr_nr)
return; return;
if (branch_user_callstack(cpuc->br_sel) && event->ctx && if (branch_user_callstack(cpuc->br_sel) &&
event->ctx->task_ctx_data) { event->ctx->task_ctx_data) {
task_ctx = event->ctx->task_ctx_data; task_ctx = event->ctx->task_ctx_data;
task_ctx->lbr_callstack_users--; task_ctx->lbr_callstack_users--;
} }
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/frame.h> #include <linux/frame.h>
#include <linux/kasan.h>
#include <asm/text-patching.h> #include <asm/text-patching.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -1057,9 +1058,10 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -1057,9 +1058,10 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* tailcall optimization. So, to be absolutely safe * tailcall optimization. So, to be absolutely safe
* we also save and restore enough stack bytes to cover * we also save and restore enough stack bytes to cover
* the argument area. * the argument area.
* Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
* raw stack chunk with redzones:
*/ */
memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
MIN_STACK_SIZE(addr));
regs->flags &= ~X86_EFLAGS_IF; regs->flags &= ~X86_EFLAGS_IF;
trace_hardirqs_off(); trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry); regs->ip = (unsigned long)(jp->entry);
...@@ -1080,6 +1082,9 @@ void jprobe_return(void) ...@@ -1080,6 +1082,9 @@ void jprobe_return(void)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
/* Unpoison stack redzones in the frames we are going to jump over. */
kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
asm volatile ( asm volatile (
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
" xchg %%rbx,%%rsp \n" " xchg %%rbx,%%rsp \n"
...@@ -1118,7 +1123,7 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -1118,7 +1123,7 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
/* It's OK to start function graph tracing again */ /* It's OK to start function graph tracing again */
unpause_graph_tracing(); unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs; *regs = kcb->jprobe_saved_regs;
memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
preempt_enable_no_resched(); preempt_enable_no_resched();
return 1; return 1;
} }
......
...@@ -44,6 +44,7 @@ static inline void kasan_disable_current(void) ...@@ -44,6 +44,7 @@ static inline void kasan_disable_current(void)
void kasan_unpoison_shadow(const void *address, size_t size); void kasan_unpoison_shadow(const void *address, size_t size);
void kasan_unpoison_task_stack(struct task_struct *task); void kasan_unpoison_task_stack(struct task_struct *task);
void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order);
...@@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache); ...@@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache);
static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_unpoison_task_stack(struct task_struct *task) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
static inline void kasan_enable_current(void) {} static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {} static inline void kasan_disable_current(void) {}
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/bug.h>
#include "kasan.h" #include "kasan.h"
#include "../slab.h" #include "../slab.h"
...@@ -62,7 +63,7 @@ void kasan_unpoison_shadow(const void *address, size_t size) ...@@ -62,7 +63,7 @@ void kasan_unpoison_shadow(const void *address, size_t size)
} }
} }
static void __kasan_unpoison_stack(struct task_struct *task, void *sp) static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
{ {
void *base = task_stack_page(task); void *base = task_stack_page(task);
size_t size = sp - base; size_t size = sp - base;
...@@ -77,9 +78,24 @@ void kasan_unpoison_task_stack(struct task_struct *task) ...@@ -77,9 +78,24 @@ void kasan_unpoison_task_stack(struct task_struct *task)
} }
/* Unpoison the stack for the current task beyond a watermark sp value. */ /* Unpoison the stack for the current task beyond a watermark sp value. */
asmlinkage void kasan_unpoison_remaining_stack(void *sp) asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
{ {
__kasan_unpoison_stack(current, sp); __kasan_unpoison_stack(current, watermark);
}
/*
* Clear all poison for the region between the current SP and a provided
* watermark value, as is sometimes required prior to hand-crafted asm function
* returns in the middle of functions.
*/
void kasan_unpoison_stack_above_sp_to(const void *watermark)
{
const void *sp = __builtin_frame_address(0);
size_t size = watermark - sp;
if (WARN_ON(sp > watermark))
return;
kasan_unpoison_shadow(sp, size);
} }
/* /*
......
...@@ -36,7 +36,7 @@ SOLIBEXT=so ...@@ -36,7 +36,7 @@ SOLIBEXT=so
# The following works at least on fedora 23, you may need the next # The following works at least on fedora 23, you may need the next
# line for other distros. # line for other distros.
ifneq (,$(wildcard /usr/sbin/update-java-alternatives)) ifneq (,$(wildcard /usr/sbin/update-java-alternatives))
JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | cut -d ' ' -f 3) JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
else else
ifneq (,$(wildcard /usr/sbin/alternatives)) ifneq (,$(wildcard /usr/sbin/alternatives))
JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g') JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
......
...@@ -601,7 +601,8 @@ int hist_browser__run(struct hist_browser *browser, const char *help) ...@@ -601,7 +601,8 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
u64 nr_entries; u64 nr_entries;
hbt->timer(hbt->arg); hbt->timer(hbt->arg);
if (hist_browser__has_filter(browser)) if (hist_browser__has_filter(browser) ||
symbol_conf.report_hierarchy)
hist_browser__update_nr_entries(browser); hist_browser__update_nr_entries(browser);
nr_entries = hist_browser__nr_entries(browser); nr_entries = hist_browser__nr_entries(browser);
......
...@@ -1895,7 +1895,6 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse ...@@ -1895,7 +1895,6 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
if (ph->needs_swap) if (ph->needs_swap)
nr = bswap_32(nr); nr = bswap_32(nr);
ph->env.nr_numa_nodes = nr;
nodes = zalloc(sizeof(*nodes) * nr); nodes = zalloc(sizeof(*nodes) * nr);
if (!nodes) if (!nodes)
return -ENOMEM; return -ENOMEM;
...@@ -1932,6 +1931,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse ...@@ -1932,6 +1931,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
free(str); free(str);
} }
ph->env.nr_numa_nodes = nr;
ph->env.numa_nodes = nodes; ph->env.numa_nodes = nodes;
return 0; return 0;
......
...@@ -136,8 +136,8 @@ do { \ ...@@ -136,8 +136,8 @@ do { \
group [^,{}/]*[{][^}]*[}][^,{}/]* group [^,{}/]*[{][^}]*[}][^,{}/]*
event_pmu [^,{}/]+[/][^/]*[/][^,{}/]* event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
event [^,{}/]+ event [^,{}/]+
bpf_object .*\.(o|bpf) bpf_object [^,{}]+\.(o|bpf)
bpf_source .*\.c bpf_source [^,{}]+\.c
num_dec [0-9]+ num_dec [0-9]+
num_hex 0x[a-fA-F0-9]+ num_hex 0x[a-fA-F0-9]+
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册