提交 38f0b33e 编写于 作者: L Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "A larger set of updates for perf.

  Kernel:

   - Handle the SBOX uncore monitoring correctly on Broadwell CPUs which
     do not have SBOX.

   - Store context switch out type in PERF_RECORD_SWITCH[_CPU_WIDE]. The
     percentage of preempting and non-preempting context switches help
     understanding the nature of workloads (CPU or IO bound) that are
     running on a machine. This adds the kernel facility and userspace
     changes needed to show this information in 'perf script' and 'perf
     report -D' (Alexey Budankov)

   - Remove a WARN_ON() in the trace/kprobes code which is pointless
     because the return error code is already telling the caller what's
     wrong.

   - Revert a fugly workaround for clang BPF targets.

   - Fix sample_max_stack maximum check and do not proceed when an error
     has been detect, return them to avoid misidentifying errors (Jiri
     Olsa)

   - Add SPDX idenitifiers and get rid of GPL boilderplate.

  Tools:

   - Synchronize kernel ABI headers, v4.17-rc1 (Ingo Molnar)

   - Support MAP_FIXED_NOREPLACE, noticed when updating the
     tools/include/ copies (Arnaldo Carvalho de Melo)

   - Add '\n' at the end of parse-options error messages (Ravi Bangoria)

   - Add s390 support for detailed/verbose PMU event description (Thomas
     Richter)

   - perf annotate fixes and improvements:

      * Allow showing offsets in more than just jump targets, use the
        new 'O' hotkey in the TUI, config ~/.perfconfig
        annotate.offset_level for it and for --stdio2 (Arnaldo Carvalho
        de Melo)

      * Use the resolved variable names from objdump disassembled lines
        to make them more compact, just like was already done for some
        instructions, like "mov", this eventually will be done more
        generally, but lets now add some more to the existing mechanism
        (Arnaldo Carvalho de Melo)

   - perf record fixes:

      * Change warning for missing topology sysfs entry to debug, as not
        all architectures have those files, s390 being one of those
        (Thomas Richter)

      * Remove old error messages about things that unlikely to be the
        root cause in modern systems (Andi Kleen)

   - perf sched fixes:

      * Fix -g/--call-graph documentation (Takuya Yamamoto)

   - perf stat:

      * Enable 1ms interval for printing event counters values in
        (Alexey Budankov)

   - perf test fixes:

      * Run dwarf unwind on arm32 (Kim Phillips)

      * Remove unused ptrace.h include from LLVM test, sidesteping older
        clang's lack of support for some asm constructs (Arnaldo
        Carvalho de Melo)

      * Fixup BPF test using epoll_pwait syscall function probe, to cope
        with the syscall routines renames performed in this development
        cycle (Arnaldo Carvalho de Melo)

   - perf version fixes:

      * Do not print info about HAVE_LIBAUDIT_SUPPORT in 'perf version
        --build-options' when HAVE_SYSCALL_TABLE_SUPPORT is true, as
        libaudit won't be used in that case, print info about
        syscall_table support instead (Jin Yao)

   - Build system fixes:

      * Use HAVE_..._SUPPORT used consistently (Jin Yao)

      * Restore READ_ONCE() C++ compatibility in tools/include (Mark
        Rutland)

      * Give hints about package names needed to build jvmti (Arnaldo
        Carvalho de Melo)"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
  perf/x86/intel/uncore: Fix SBOX support for Broadwell CPUs
  perf/x86/intel/uncore: Revert "Remove SBOX support for Broadwell server"
  coresight: Move to SPDX identifier
  perf test BPF: Fixup BPF test using epoll_pwait syscall function probe
  perf tests mmap: Show which tracepoint is failing
  perf tools: Add '\n' at the end of parse-options error messages
  perf record: Remove suggestion to enable APIC
  perf record: Remove misleading error suggestion
  perf hists browser: Clarify top/report browser help
  perf mem: Allow all record/report options
  perf trace: Support MAP_FIXED_NOREPLACE
  perf: Remove superfluous allocation error check
  perf: Fix sample_max_stack maximum check
  perf: Return proper values for user stack errors
  perf list: Add s390 support for detailed/verbose PMU event description
  perf script: Extend misc field decoding with switch out event type
  perf report: Extend raw dump (-D) out with switch out event type
  perf/core: Store context switch out type in PERF_RECORD_SWITCH[_CPU_WIDE]
  tools/headers: Synchronize kernel ABI headers, v4.17-rc1
  trace_kprobe: Remove warning message "Could not insert probe at..."
  ...
...@@ -3028,10 +3028,27 @@ static struct intel_uncore_type bdx_uncore_cbox = { ...@@ -3028,10 +3028,27 @@ static struct intel_uncore_type bdx_uncore_cbox = {
.format_group = &hswep_uncore_cbox_format_group, .format_group = &hswep_uncore_cbox_format_group,
}; };
static struct intel_uncore_type bdx_uncore_sbox = {
.name = "sbox",
.num_counters = 4,
.num_boxes = 4,
.perf_ctr_bits = 48,
.event_ctl = HSWEP_S0_MSR_PMON_CTL0,
.perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
.event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
.box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
.msr_offset = HSWEP_SBOX_MSR_OFFSET,
.ops = &hswep_uncore_sbox_msr_ops,
.format_group = &hswep_uncore_sbox_format_group,
};
#define BDX_MSR_UNCORE_SBOX 3
static struct intel_uncore_type *bdx_msr_uncores[] = { static struct intel_uncore_type *bdx_msr_uncores[] = {
&bdx_uncore_ubox, &bdx_uncore_ubox,
&bdx_uncore_cbox, &bdx_uncore_cbox,
&hswep_uncore_pcu, &hswep_uncore_pcu,
&bdx_uncore_sbox,
NULL, NULL,
}; };
...@@ -3043,10 +3060,25 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = { ...@@ -3043,10 +3060,25 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
void bdx_uncore_cpu_init(void) void bdx_uncore_cpu_init(void)
{ {
int pkg = topology_phys_to_logical_pkg(0);
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores; uncore_msr_uncores = bdx_msr_uncores;
/* BDX-DE doesn't have SBOX */
if (boot_cpu_data.x86_model == 86) {
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
/* Detect systems with no SBOXes */
} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
struct pci_dev *pdev;
u32 capid4;
pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
pci_read_config_dword(pdev, 0x94, &capid4);
if (((capid4 >> 6) & 0x3) == 0)
bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
} }
...@@ -3264,6 +3296,11 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { ...@@ -3264,6 +3296,11 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
}, },
{ /* PCU.3 (for Capability registers) */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
HSWEP_PCI_PCU_3),
},
{ /* end: all zeroes */ } { /* end: all zeroes */ }
}; };
......
...@@ -136,7 +136,6 @@ ...@@ -136,7 +136,6 @@
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifndef __BPF__
/* /*
* This output constraint should be used for any inline asm which has a "call" * This output constraint should be used for any inline asm which has a "call"
* instruction. Otherwise the asm may be inserted before the frame pointer * instruction. Otherwise the asm may be inserted before the frame pointer
...@@ -146,6 +145,5 @@ ...@@ -146,6 +145,5 @@
register unsigned long current_stack_pointer asm(_ASM_SP); register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif #endif
#endif
#endif /* _ASM_X86_ASM_H */ #endif /* _ASM_X86_ASM_H */
/* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef _LINUX_CORESIGHT_PMU_H #ifndef _LINUX_CORESIGHT_PMU_H
......
...@@ -650,11 +650,23 @@ struct perf_event_mmap_page { ...@@ -650,11 +650,23 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
/* /*
* Indicates that the content of PERF_SAMPLE_IP points to * These PERF_RECORD_MISC_* flags below are safely reused
* the actual instruction that triggered the event. See also * for the following events:
* perf_event_attr::precise_ip. *
* PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
*
*
* PERF_RECORD_MISC_EXACT_IP:
* Indicates that the content of PERF_SAMPLE_IP points to
* the actual instruction that triggered the event. See also
* perf_event_attr::precise_ip.
*
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
* Indicates that thread was preempted in TASK_RUNNING state.
*/ */
#define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
/* /*
* Reserve the last bit to indicate some extended misc field * Reserve the last bit to indicate some extended misc field
*/ */
......
...@@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack) ...@@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack)
goto exit; goto exit;
} }
if (count > 1) { /*
/* If the allocation failed, give up */ * If requesting per event more than the global cap,
if (!callchain_cpus_entries) * return a different error to help userspace figure
err = -ENOMEM; * this out.
/* *
* If requesting per event more than the global cap, * And also do it here so that we have &callchain_mutex held.
* return a different error to help userspace figure */
* this out. if (event_max_stack > sysctl_perf_event_max_stack) {
* err = -EOVERFLOW;
* And also do it here so that we have &callchain_mutex held.
*/
if (event_max_stack > sysctl_perf_event_max_stack)
err = -EOVERFLOW;
goto exit; goto exit;
} }
err = alloc_callchain_buffers(); if (count == 1)
err = alloc_callchain_buffers();
exit: exit:
if (err) if (err)
atomic_dec(&nr_callchain_events); atomic_dec(&nr_callchain_events);
......
...@@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task, ...@@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task,
}, },
}; };
if (!sched_in && task->state == TASK_RUNNING)
switch_event.event_id.header.misc |=
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
perf_iterate_sb(perf_event_switch_output, perf_iterate_sb(perf_event_switch_output,
&switch_event, &switch_event,
NULL); NULL);
...@@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, ...@@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
* __u16 sample size limit. * __u16 sample size limit.
*/ */
if (attr->sample_stack_user >= USHRT_MAX) if (attr->sample_stack_user >= USHRT_MAX)
ret = -EINVAL; return -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
ret = -EINVAL; return -EINVAL;
} }
if (!attr->sample_max_stack) if (!attr->sample_max_stack)
......
...@@ -512,8 +512,6 @@ static int __register_trace_kprobe(struct trace_kprobe *tk) ...@@ -512,8 +512,6 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
if (ret == 0) if (ret == 0)
tk->tp.flags |= TP_FLAG_REGISTERED; tk->tp.flags |= TP_FLAG_REGISTERED;
else { else {
pr_warn("Could not insert probe at %s+%lu: %d\n",
trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
ret = 0; ret = 0;
......
...@@ -135,6 +135,15 @@ struct kvm_arch_memory_slot { ...@@ -135,6 +135,15 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_CRM_SHIFT 7 #define KVM_REG_ARM_CRM_SHIFT 7
#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
#define KVM_REG_ARM_32_CRN_SHIFT 11 #define KVM_REG_ARM_32_CRN_SHIFT 11
/*
* For KVM currently all guest registers are nonsecure, but we reserve a bit
* in the encoding to distinguish secure from nonsecure for AArch32 system
* registers that are banked by security. This is 1 for the secure banked
* register, and 0 for the nonsecure banked register or if the register is
* not banked by security.
*/
#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000
#define KVM_REG_ARM_SECURE_SHIFT 28
#define ARM_CP15_REG_SHIFT_MASK(x,n) \ #define ARM_CP15_REG_SHIFT_MASK(x,n) \
(((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
......
...@@ -53,12 +53,6 @@ ...@@ -53,12 +53,6 @@
# define NEED_MOVBE 0 # define NEED_MOVBE 0
#endif #endif
#ifdef CONFIG_X86_5LEVEL
# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31))
#else
# define NEED_LA57 0
#endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
/* Paravirtualized systems may not have PSE or PGE available */ /* Paravirtualized systems may not have PSE or PGE available */
...@@ -104,7 +98,7 @@ ...@@ -104,7 +98,7 @@
#define REQUIRED_MASK13 0 #define REQUIRED_MASK13 0
#define REQUIRED_MASK14 0 #define REQUIRED_MASK14 0
#define REQUIRED_MASK15 0 #define REQUIRED_MASK15 0
#define REQUIRED_MASK16 (NEED_LA57) #define REQUIRED_MASK16 0
#define REQUIRED_MASK17 0 #define REQUIRED_MASK17 0
#define REQUIRED_MASK18 0 #define REQUIRED_MASK18 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
......
...@@ -354,8 +354,25 @@ struct kvm_xcrs { ...@@ -354,8 +354,25 @@ struct kvm_xcrs {
__u64 padding[16]; __u64 padding[16];
}; };
/* definition of registers in kvm_run */ #define KVM_SYNC_X86_REGS (1UL << 0)
#define KVM_SYNC_X86_SREGS (1UL << 1)
#define KVM_SYNC_X86_EVENTS (1UL << 2)
#define KVM_SYNC_X86_VALID_FIELDS \
(KVM_SYNC_X86_REGS| \
KVM_SYNC_X86_SREGS| \
KVM_SYNC_X86_EVENTS)
/* kvm_sync_regs struct included by kvm_run struct */
struct kvm_sync_regs { struct kvm_sync_regs {
/* Members of this structure are potentially malicious.
* Care must be taken by code reading, esp. interpreting,
* data fields from them inside KVM to prevent TOCTOU and
* double-fetch types of vulnerabilities.
*/
struct kvm_regs regs;
struct kvm_sregs sregs;
struct kvm_vcpu_events events;
}; };
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
......
...@@ -151,11 +151,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s ...@@ -151,11 +151,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* required ordering. * required ordering.
*/ */
#define READ_ONCE(x) \ #define READ_ONCE(x) \
({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) ({ \
union { typeof(x) __val; char __c[1]; } __u = \
#define WRITE_ONCE(x, val) \ { .__c = { 0 } }; \
({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) __read_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
#define WRITE_ONCE(x, val) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
{ .__val = (val) }; \
__write_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
#ifndef __fallthrough #ifndef __fallthrough
......
/* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef _LINUX_CORESIGHT_PMU_H #ifndef _LINUX_CORESIGHT_PMU_H
......
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
#endif #endif
/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
/* /*
* Flags for mlock * Flags for mlock
*/ */
......
...@@ -864,6 +864,7 @@ enum bpf_func_id { ...@@ -864,6 +864,7 @@ enum bpf_func_id {
/* BPF_FUNC_skb_set_tunnel_key flags. */ /* BPF_FUNC_skb_set_tunnel_key flags. */
#define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_ZERO_CSUM_TX (1ULL << 1)
#define BPF_F_DONT_FRAGMENT (1ULL << 2) #define BPF_F_DONT_FRAGMENT (1ULL << 2)
#define BPF_F_SEQ_NUMBER (1ULL << 3)
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags. * BPF_FUNC_perf_event_read_value flags.
......
...@@ -941,4 +941,43 @@ enum { ...@@ -941,4 +941,43 @@ enum {
IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */
}; };
/* tun section */
enum {
IFLA_TUN_UNSPEC,
IFLA_TUN_OWNER,
IFLA_TUN_GROUP,
IFLA_TUN_TYPE,
IFLA_TUN_PI,
IFLA_TUN_VNET_HDR,
IFLA_TUN_PERSIST,
IFLA_TUN_MULTI_QUEUE,
IFLA_TUN_NUM_QUEUES,
IFLA_TUN_NUM_DISABLED_QUEUES,
__IFLA_TUN_MAX,
};
#define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1)
/* rmnet section */
#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0)
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
enum {
IFLA_RMNET_UNSPEC,
IFLA_RMNET_MUX_ID,
IFLA_RMNET_FLAGS,
__IFLA_RMNET_MAX,
};
#define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1)
struct ifla_rmnet_flags {
__u32 flags;
__u32 mask;
};
#endif /* _UAPI_LINUX_IF_LINK_H */ #endif /* _UAPI_LINUX_IF_LINK_H */
...@@ -396,6 +396,10 @@ struct kvm_run { ...@@ -396,6 +396,10 @@ struct kvm_run {
char padding[256]; char padding[256];
}; };
/* 2048 is the size of the char array used to bound/pad the size
* of the union that holds sync regs.
*/
#define SYNC_REGS_SIZE_BYTES 2048
/* /*
* shared registers between kvm and userspace. * shared registers between kvm and userspace.
* kvm_valid_regs specifies the register classes set by the host * kvm_valid_regs specifies the register classes set by the host
...@@ -407,7 +411,7 @@ struct kvm_run { ...@@ -407,7 +411,7 @@ struct kvm_run {
__u64 kvm_dirty_regs; __u64 kvm_dirty_regs;
union { union {
struct kvm_sync_regs regs; struct kvm_sync_regs regs;
char padding[2048]; char padding[SYNC_REGS_SIZE_BYTES];
} s; } s;
}; };
...@@ -936,6 +940,7 @@ struct kvm_ppc_resize_hpt { ...@@ -936,6 +940,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_GET_CPU_CHAR 151 #define KVM_CAP_PPC_GET_CPU_CHAR 151
#define KVM_CAP_S390_BPB 152 #define KVM_CAP_S390_BPB 152
#define KVM_CAP_GET_MSR_FEATURES 153 #define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -1375,6 +1380,10 @@ struct kvm_enc_region { ...@@ -1375,6 +1380,10 @@ struct kvm_enc_region {
#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
/* Available with KVM_CAP_HYPERV_EVENTFD */
#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
/* Secure Encrypted Virtualization command */ /* Secure Encrypted Virtualization command */
enum sev_cmd_id { enum sev_cmd_id {
/* Guest initialization commands */ /* Guest initialization commands */
...@@ -1515,4 +1524,14 @@ struct kvm_assigned_msix_entry { ...@@ -1515,4 +1524,14 @@ struct kvm_assigned_msix_entry {
#define KVM_ARM_DEV_EL1_PTIMER (1 << 1) #define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
#define KVM_ARM_DEV_PMU (1 << 2) #define KVM_ARM_DEV_PMU (1 << 2)
struct kvm_hyperv_eventfd {
__u32 conn_id;
__s32 fd;
__u32 flags;
__u32 padding[3];
};
#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
#endif /* __LINUX_KVM_H */ #endif /* __LINUX_KVM_H */
...@@ -650,11 +650,23 @@ struct perf_event_mmap_page { ...@@ -650,11 +650,23 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
/* /*
* Indicates that the content of PERF_SAMPLE_IP points to * These PERF_RECORD_MISC_* flags below are safely reused
* the actual instruction that triggered the event. See also * for the following events:
* perf_event_attr::precise_ip. *
* PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
*
*
* PERF_RECORD_MISC_EXACT_IP:
* Indicates that the content of PERF_SAMPLE_IP points to
* the actual instruction that triggered the event. See also
* perf_event_attr::precise_ip.
*
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
* Indicates that thread was preempted in TASK_RUNNING state.
*/ */
#define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
/* /*
* Reserve the last bit to indicate some extended misc field * Reserve the last bit to indicate some extended misc field
*/ */
......
...@@ -242,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t; ...@@ -242,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ #define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ #define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE #define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE
#define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8
#ifdef SNDRV_LITTLE_ENDIAN #ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
......
...@@ -433,7 +433,7 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, ...@@ -433,7 +433,7 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
if (ambiguous_option) { if (ambiguous_option) {
fprintf(stderr, fprintf(stderr,
" Error: Ambiguous option: %s (could be --%s%s or --%s%s)", " Error: Ambiguous option: %s (could be --%s%s or --%s%s)\n",
arg, arg,
(ambiguous_flags & OPT_UNSET) ? "no-" : "", (ambiguous_flags & OPT_UNSET) ? "no-" : "",
ambiguous_option->long_name, ambiguous_option->long_name,
...@@ -458,7 +458,7 @@ static void check_typos(const char *arg, const struct option *options) ...@@ -458,7 +458,7 @@ static void check_typos(const char *arg, const struct option *options)
return; return;
if (strstarts(arg, "no-")) { if (strstarts(arg, "no-")) {
fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg);
exit(129); exit(129);
} }
...@@ -466,7 +466,7 @@ static void check_typos(const char *arg, const struct option *options) ...@@ -466,7 +466,7 @@ static void check_typos(const char *arg, const struct option *options)
if (!options->long_name) if (!options->long_name)
continue; continue;
if (strstarts(options->long_name, arg)) { if (strstarts(options->long_name, arg)) {
fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg);
exit(129); exit(129);
} }
} }
......
...@@ -334,6 +334,11 @@ annotate.*:: ...@@ -334,6 +334,11 @@ annotate.*::
99.93 │ mov %eax,%eax 99.93 │ mov %eax,%eax
annotate.offset_level::
Default is '1', meaning just jump targets will have offsets show right beside
the instruction. When set to '2' 'call' instructions will also have its offsets
shown, 3 or higher will show offsets for all instructions.
hist.*:: hist.*::
hist.percentage:: hist.percentage::
This option control the way to calculate overhead of filtered entries - This option control the way to calculate overhead of filtered entries -
......
...@@ -67,6 +67,9 @@ OPTIONS ...@@ -67,6 +67,9 @@ OPTIONS
--phys-data:: --phys-data::
Record/Report sample physical addresses Record/Report sample physical addresses
In addition, for report all perf report options are valid, and for record
all perf record options.
SEE ALSO SEE ALSO
-------- --------
linkperf:perf-record[1], linkperf:perf-report[1] linkperf:perf-record[1], linkperf:perf-report[1]
...@@ -104,8 +104,8 @@ OPTIONS for 'perf sched timehist' ...@@ -104,8 +104,8 @@ OPTIONS for 'perf sched timehist'
kallsyms pathname kallsyms pathname
-g:: -g::
--no-call-graph:: --call-graph::
Do not display call chains if present. Display call chains if present (default on).
--max-stack:: --max-stack::
Maximum number of functions to display in backtrace, default 5. Maximum number of functions to display in backtrace, default 5.
......
...@@ -228,14 +228,15 @@ OPTIONS ...@@ -228,14 +228,15 @@ OPTIONS
For sample events it's possible to display misc field with -F +misc option, For sample events it's possible to display misc field with -F +misc option,
following letters are displayed for each bit: following letters are displayed for each bit:
PERF_RECORD_MISC_KERNEL K PERF_RECORD_MISC_KERNEL K
PERF_RECORD_MISC_USER U PERF_RECORD_MISC_USER U
PERF_RECORD_MISC_HYPERVISOR H PERF_RECORD_MISC_HYPERVISOR H
PERF_RECORD_MISC_GUEST_KERNEL G PERF_RECORD_MISC_GUEST_KERNEL G
PERF_RECORD_MISC_GUEST_USER g PERF_RECORD_MISC_GUEST_USER g
PERF_RECORD_MISC_MMAP_DATA* M PERF_RECORD_MISC_MMAP_DATA* M
PERF_RECORD_MISC_COMM_EXEC E PERF_RECORD_MISC_COMM_EXEC E
PERF_RECORD_MISC_SWITCH_OUT S PERF_RECORD_MISC_SWITCH_OUT S
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT Sp
$ perf script -F +misc ... $ perf script -F +misc ...
sched-messaging 1414 K 28690.636582: 4590 cycles ... sched-messaging 1414 K 28690.636582: 4590 cycles ...
......
...@@ -153,7 +153,7 @@ perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- m ...@@ -153,7 +153,7 @@ perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- m
-I msecs:: -I msecs::
--interval-print msecs:: --interval-print msecs::
Print count deltas every N milliseconds (minimum: 10ms) Print count deltas every N milliseconds (minimum: 1ms)
The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution. The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution.
example: 'perf stat -I 1000 -e cycles -a sleep 5' example: 'perf stat -I 1000 -e cycles -a sleep 5'
......
...@@ -68,7 +68,7 @@ ifeq ($(NO_PERF_REGS),0) ...@@ -68,7 +68,7 @@ ifeq ($(NO_PERF_REGS),0)
endif endif
ifneq ($(NO_SYSCALL_TABLE),1) ifneq ($(NO_SYSCALL_TABLE),1)
CFLAGS += -DHAVE_SYSCALL_TABLE CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT
endif endif
# So far there's only x86 and arm libdw unwind support merged in perf. # So far there's only x86 and arm libdw unwind support merged in perf.
...@@ -847,7 +847,7 @@ ifndef NO_JVMTI ...@@ -847,7 +847,7 @@ ifndef NO_JVMTI
ifeq ($(feature-jvmti), 1) ifeq ($(feature-jvmti), 1)
$(call detected_var,JDIR) $(call detected_var,JDIR)
else else
$(warning No openjdk development package found, please install JDK package) $(warning No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-1.8.0-openjdk-devel)
NO_JVMTI := 1 NO_JVMTI := 1
endif endif
endif endif
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
struct perf_sample;
#endif
extern struct test arch_tests[];
#endif
libperf-y += regs_load.o libperf-y += regs_load.o
libperf-y += dwarf-unwind.o libperf-y += dwarf-unwind.o
libperf-y += arch-tests.o
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include "tests/tests.h"
#include "arch-tests.h"
struct test arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
{
.desc = "DWARF unwind",
.func = test__dwarf_unwind,
},
#endif
{
.func = NULL,
},
};
// SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <stdbool.h> #include <stdbool.h>
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <api/fs/fs.h> #include <api/fs/fs.h>
......
/* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef INCLUDE__PERF_CS_ETM_H__ #ifndef INCLUDE__PERF_CS_ETM_H__
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <string.h> #include <string.h>
......
...@@ -21,7 +21,7 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') ...@@ -21,7 +21,7 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
$(header): $(sys)/syscall_64.tbl $(systbl) $(header): $(sys)/syscall_64.tbl $(systbl)
@(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
(diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \ (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \
|| echo "Warning: Kernel ABI header at 'tools/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true || echo "Warning: Kernel ABI header at 'tools/perf/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true
$(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@ $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@
clean:: clean::
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
static struct ins x86__instructions[] = { static struct ins x86__instructions[] = {
{ .name = "adc", .ops = &mov_ops, },
{ .name = "adcb", .ops = &mov_ops, },
{ .name = "adcl", .ops = &mov_ops, },
{ .name = "add", .ops = &mov_ops, }, { .name = "add", .ops = &mov_ops, },
{ .name = "addl", .ops = &mov_ops, }, { .name = "addl", .ops = &mov_ops, },
{ .name = "addq", .ops = &mov_ops, }, { .name = "addq", .ops = &mov_ops, },
{ .name = "addsd", .ops = &mov_ops, },
{ .name = "addw", .ops = &mov_ops, }, { .name = "addw", .ops = &mov_ops, },
{ .name = "and", .ops = &mov_ops, }, { .name = "and", .ops = &mov_ops, },
{ .name = "andb", .ops = &mov_ops, },
{ .name = "andl", .ops = &mov_ops, },
{ .name = "andpd", .ops = &mov_ops, },
{ .name = "andps", .ops = &mov_ops, },
{ .name = "andq", .ops = &mov_ops, },
{ .name = "andw", .ops = &mov_ops, },
{ .name = "bsr", .ops = &mov_ops, },
{ .name = "bt", .ops = &mov_ops, },
{ .name = "btr", .ops = &mov_ops, },
{ .name = "bts", .ops = &mov_ops, }, { .name = "bts", .ops = &mov_ops, },
{ .name = "btsq", .ops = &mov_ops, },
{ .name = "call", .ops = &call_ops, }, { .name = "call", .ops = &call_ops, },
{ .name = "callq", .ops = &call_ops, }, { .name = "callq", .ops = &call_ops, },
{ .name = "cmovbe", .ops = &mov_ops, },
{ .name = "cmove", .ops = &mov_ops, },
{ .name = "cmovae", .ops = &mov_ops, },
{ .name = "cmp", .ops = &mov_ops, }, { .name = "cmp", .ops = &mov_ops, },
{ .name = "cmpb", .ops = &mov_ops, }, { .name = "cmpb", .ops = &mov_ops, },
{ .name = "cmpl", .ops = &mov_ops, }, { .name = "cmpl", .ops = &mov_ops, },
{ .name = "cmpq", .ops = &mov_ops, }, { .name = "cmpq", .ops = &mov_ops, },
{ .name = "cmpw", .ops = &mov_ops, }, { .name = "cmpw", .ops = &mov_ops, },
{ .name = "cmpxch", .ops = &mov_ops, }, { .name = "cmpxch", .ops = &mov_ops, },
{ .name = "cmpxchg", .ops = &mov_ops, },
{ .name = "cs", .ops = &mov_ops, },
{ .name = "dec", .ops = &dec_ops, }, { .name = "dec", .ops = &dec_ops, },
{ .name = "decl", .ops = &dec_ops, }, { .name = "decl", .ops = &dec_ops, },
{ .name = "divsd", .ops = &mov_ops, },
{ .name = "divss", .ops = &mov_ops, },
{ .name = "gs", .ops = &mov_ops, },
{ .name = "imul", .ops = &mov_ops, }, { .name = "imul", .ops = &mov_ops, },
{ .name = "inc", .ops = &dec_ops, }, { .name = "inc", .ops = &dec_ops, },
{ .name = "incl", .ops = &dec_ops, }, { .name = "incl", .ops = &dec_ops, },
...@@ -57,25 +79,68 @@ static struct ins x86__instructions[] = { ...@@ -57,25 +79,68 @@ static struct ins x86__instructions[] = {
{ .name = "lea", .ops = &mov_ops, }, { .name = "lea", .ops = &mov_ops, },
{ .name = "lock", .ops = &lock_ops, }, { .name = "lock", .ops = &lock_ops, },
{ .name = "mov", .ops = &mov_ops, }, { .name = "mov", .ops = &mov_ops, },
{ .name = "movapd", .ops = &mov_ops, },
{ .name = "movaps", .ops = &mov_ops, },
{ .name = "movb", .ops = &mov_ops, }, { .name = "movb", .ops = &mov_ops, },
{ .name = "movdqa", .ops = &mov_ops, }, { .name = "movdqa", .ops = &mov_ops, },
{ .name = "movdqu", .ops = &mov_ops, },
{ .name = "movl", .ops = &mov_ops, }, { .name = "movl", .ops = &mov_ops, },
{ .name = "movq", .ops = &mov_ops, }, { .name = "movq", .ops = &mov_ops, },
{ .name = "movsd", .ops = &mov_ops, },
{ .name = "movslq", .ops = &mov_ops, }, { .name = "movslq", .ops = &mov_ops, },
{ .name = "movss", .ops = &mov_ops, },
{ .name = "movupd", .ops = &mov_ops, },
{ .name = "movups", .ops = &mov_ops, },
{ .name = "movw", .ops = &mov_ops, },
{ .name = "movzbl", .ops = &mov_ops, }, { .name = "movzbl", .ops = &mov_ops, },
{ .name = "movzwl", .ops = &mov_ops, }, { .name = "movzwl", .ops = &mov_ops, },
{ .name = "mulsd", .ops = &mov_ops, },
{ .name = "mulss", .ops = &mov_ops, },
{ .name = "nop", .ops = &nop_ops, }, { .name = "nop", .ops = &nop_ops, },
{ .name = "nopl", .ops = &nop_ops, }, { .name = "nopl", .ops = &nop_ops, },
{ .name = "nopw", .ops = &nop_ops, }, { .name = "nopw", .ops = &nop_ops, },
{ .name = "or", .ops = &mov_ops, }, { .name = "or", .ops = &mov_ops, },
{ .name = "orb", .ops = &mov_ops, },
{ .name = "orl", .ops = &mov_ops, }, { .name = "orl", .ops = &mov_ops, },
{ .name = "orps", .ops = &mov_ops, },
{ .name = "orq", .ops = &mov_ops, },
{ .name = "pand", .ops = &mov_ops, },
{ .name = "paddq", .ops = &mov_ops, },
{ .name = "pcmpeqb", .ops = &mov_ops, },
{ .name = "por", .ops = &mov_ops, },
{ .name = "rclb", .ops = &mov_ops, },
{ .name = "rcll", .ops = &mov_ops, },
{ .name = "retq", .ops = &ret_ops, },
{ .name = "sbb", .ops = &mov_ops, },
{ .name = "sbbl", .ops = &mov_ops, },
{ .name = "sete", .ops = &mov_ops, },
{ .name = "sub", .ops = &mov_ops, },
{ .name = "subl", .ops = &mov_ops, },
{ .name = "subq", .ops = &mov_ops, },
{ .name = "subsd", .ops = &mov_ops, },
{ .name = "subw", .ops = &mov_ops, },
{ .name = "test", .ops = &mov_ops, }, { .name = "test", .ops = &mov_ops, },
{ .name = "testb", .ops = &mov_ops, }, { .name = "testb", .ops = &mov_ops, },
{ .name = "testl", .ops = &mov_ops, }, { .name = "testl", .ops = &mov_ops, },
{ .name = "ucomisd", .ops = &mov_ops, },
{ .name = "ucomiss", .ops = &mov_ops, },
{ .name = "vaddsd", .ops = &mov_ops, },
{ .name = "vandpd", .ops = &mov_ops, },
{ .name = "vmovdqa", .ops = &mov_ops, },
{ .name = "vmovq", .ops = &mov_ops, },
{ .name = "vmovsd", .ops = &mov_ops, },
{ .name = "vmulsd", .ops = &mov_ops, },
{ .name = "vorpd", .ops = &mov_ops, },
{ .name = "vsubsd", .ops = &mov_ops, },
{ .name = "vucomisd", .ops = &mov_ops, },
{ .name = "xadd", .ops = &mov_ops, }, { .name = "xadd", .ops = &mov_ops, },
{ .name = "xbeginl", .ops = &jump_ops, }, { .name = "xbeginl", .ops = &jump_ops, },
{ .name = "xbeginq", .ops = &jump_ops, }, { .name = "xbeginq", .ops = &jump_ops, },
{ .name = "retq", .ops = &ret_ops, }, { .name = "xchg", .ops = &mov_ops, },
{ .name = "xor", .ops = &mov_ops, },
{ .name = "xorb", .ops = &mov_ops, },
{ .name = "xorpd", .ops = &mov_ops, },
{ .name = "xorps", .ops = &mov_ops, },
}; };
static bool x86__ins_is_fused(struct arch *arch, const char *ins1, static bool x86__ins_is_fused(struct arch *arch, const char *ins1,
......
...@@ -439,7 +439,7 @@ int cmd_help(int argc, const char **argv) ...@@ -439,7 +439,7 @@ int cmd_help(int argc, const char **argv)
#ifdef HAVE_LIBELF_SUPPORT #ifdef HAVE_LIBELF_SUPPORT
"probe", "probe",
#endif #endif
#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
"trace", "trace",
#endif #endif
NULL }; NULL };
......
...@@ -83,7 +83,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) ...@@ -83,7 +83,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
}; };
argc = parse_options(argc, argv, options, record_mem_usage, argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_STOP_AT_NON_OPTION); PARSE_OPT_KEEP_UNKNOWN);
rec_argc = argc + 9; /* max number of arguments */ rec_argc = argc + 9; /* max number of arguments */
rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv = calloc(rec_argc + 1, sizeof(char *));
...@@ -436,7 +436,7 @@ int cmd_mem(int argc, const char **argv) ...@@ -436,7 +436,7 @@ int cmd_mem(int argc, const char **argv)
} }
argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); mem_usage, PARSE_OPT_KEEP_UNKNOWN);
if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation)) if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
usage_with_options(mem_usage, mem_options); usage_with_options(mem_usage, mem_options);
......
...@@ -657,8 +657,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample, ...@@ -657,8 +657,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
break; break;
case PERF_RECORD_SWITCH: case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE: case PERF_RECORD_SWITCH_CPU_WIDE:
if (has(SWITCH_OUT)) if (has(SWITCH_OUT)) {
ret += fprintf(fp, "S"); ret += fprintf(fp, "S");
if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT)
ret += fprintf(fp, "p");
}
default: default:
break; break;
} }
...@@ -2801,11 +2804,11 @@ int find_scripts(char **scripts_array, char **scripts_path_array) ...@@ -2801,11 +2804,11 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
for_each_lang(scripts_path, scripts_dir, lang_dirent) { for_each_lang(scripts_path, scripts_dir, lang_dirent) {
scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
lang_dirent->d_name); lang_dirent->d_name);
#ifdef NO_LIBPERL #ifndef HAVE_LIBPERL_SUPPORT
if (strstr(lang_path, "perl")) if (strstr(lang_path, "perl"))
continue; continue;
#endif #endif
#ifdef NO_LIBPYTHON #ifndef HAVE_LIBPYTHON_SUPPORT
if (strstr(lang_path, "python")) if (strstr(lang_path, "python"))
continue; continue;
#endif #endif
......
...@@ -1943,7 +1943,8 @@ static const struct option stat_options[] = { ...@@ -1943,7 +1943,8 @@ static const struct option stat_options[] = {
OPT_STRING(0, "post", &post_cmd, "command", OPT_STRING(0, "post", &post_cmd, "command",
"command to run after to the measured command"), "command to run after to the measured command"),
OPT_UINTEGER('I', "interval-print", &stat_config.interval, OPT_UINTEGER('I', "interval-print", &stat_config.interval,
"print counts at regular interval in ms (>= 10)"), "print counts at regular interval in ms "
"(overhead is possible for values <= 100ms)"),
OPT_INTEGER(0, "interval-count", &stat_config.times, OPT_INTEGER(0, "interval-count", &stat_config.times,
"print counts for fixed number of times"), "print counts for fixed number of times"),
OPT_UINTEGER(0, "timeout", &stat_config.timeout, OPT_UINTEGER(0, "timeout", &stat_config.timeout,
...@@ -2923,17 +2924,6 @@ int cmd_stat(int argc, const char **argv) ...@@ -2923,17 +2924,6 @@ int cmd_stat(int argc, const char **argv)
} }
} }
if (interval && interval < 100) {
if (interval < 10) {
pr_err("print interval must be >= 10ms\n");
parse_options_usage(stat_usage, stat_options, "I", 1);
goto out;
} else
pr_warning("print interval < 100ms. "
"The overhead percentage could be high in some cases. "
"Please proceed with caution.\n");
}
if (stat_config.times && interval) if (stat_config.times && interval)
interval_count = true; interval_count = true;
else if (stat_config.times && !interval) { else if (stat_config.times && !interval) {
......
...@@ -60,7 +60,10 @@ static void library_status(void) ...@@ -60,7 +60,10 @@ static void library_status(void)
STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations); STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
STATUS(HAVE_GLIBC_SUPPORT, glibc); STATUS(HAVE_GLIBC_SUPPORT, glibc);
STATUS(HAVE_GTK2_SUPPORT, gtk2); STATUS(HAVE_GTK2_SUPPORT, gtk2);
#ifndef HAVE_SYSCALL_TABLE_SUPPORT
STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit); STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
#endif
STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table);
STATUS(HAVE_LIBBFD_SUPPORT, libbfd); STATUS(HAVE_LIBBFD_SUPPORT, libbfd);
STATUS(HAVE_LIBELF_SUPPORT, libelf); STATUS(HAVE_LIBELF_SUPPORT, libelf);
STATUS(HAVE_LIBNUMA_SUPPORT, libnuma); STATUS(HAVE_LIBNUMA_SUPPORT, libnuma);
......
...@@ -73,7 +73,7 @@ static struct cmd_struct commands[] = { ...@@ -73,7 +73,7 @@ static struct cmd_struct commands[] = {
{ "lock", cmd_lock, 0 }, { "lock", cmd_lock, 0 },
{ "kvm", cmd_kvm, 0 }, { "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 }, { "test", cmd_test, 0 },
#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
{ "trace", cmd_trace, 0 }, { "trace", cmd_trace, 0 },
#endif #endif
{ "inject", cmd_inject, 0 }, { "inject", cmd_inject, 0 },
...@@ -491,7 +491,7 @@ int main(int argc, const char **argv) ...@@ -491,7 +491,7 @@ int main(int argc, const char **argv)
argv[0] = cmd; argv[0] = cmd;
} }
if (strstarts(cmd, "trace")) { if (strstarts(cmd, "trace")) {
#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
setup_path(); setup_path();
argv[0] = "trace"; argv[0] = "trace";
return cmd_trace(argc, argv); return cmd_trace(argc, argv);
......
...@@ -31,7 +31,7 @@ struct bpf_map_def SEC("maps") flip_table = { ...@@ -31,7 +31,7 @@ struct bpf_map_def SEC("maps") flip_table = {
.max_entries = 1, .max_entries = 1,
}; };
SEC("func=SyS_epoll_pwait") SEC("func=do_epoll_wait")
int bpf_func__SyS_epoll_pwait(void *ctx) int bpf_func__SyS_epoll_pwait(void *ctx)
{ {
int ind =0; int ind =0;
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#define SEC(NAME) __attribute__((section(NAME), used)) #define SEC(NAME) __attribute__((section(NAME), used))
#include <uapi/linux/fs.h> #include <uapi/linux/fs.h>
#include <uapi/asm/ptrace.h>
SEC("func=vfs_llseek") SEC("func=vfs_llseek")
int bpf_func__vfs_llseek(void *ctx) int bpf_func__vfs_llseek(void *ctx)
......
...@@ -118,6 +118,7 @@ static struct test generic_tests[] = { ...@@ -118,6 +118,7 @@ static struct test generic_tests[] = {
{ {
.desc = "Breakpoint accounting", .desc = "Breakpoint accounting",
.func = test__bp_accounting, .func = test__bp_accounting,
.is_supported = test__bp_signal_is_supported,
}, },
{ {
.desc = "Number of exit events of a simple workload", .desc = "Number of exit events of a simple workload",
......
...@@ -75,7 +75,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse ...@@ -75,7 +75,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
evsels[i] = perf_evsel__newtp("syscalls", name); evsels[i] = perf_evsel__newtp("syscalls", name);
if (IS_ERR(evsels[i])) { if (IS_ERR(evsels[i])) {
pr_debug("perf_evsel__new\n"); pr_debug("perf_evsel__new(%s)\n", name);
goto out_delete_evlist; goto out_delete_evlist;
} }
......
...@@ -54,6 +54,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, ...@@ -54,6 +54,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
P_MMAP_FLAG(EXECUTABLE); P_MMAP_FLAG(EXECUTABLE);
P_MMAP_FLAG(FILE); P_MMAP_FLAG(FILE);
P_MMAP_FLAG(FIXED); P_MMAP_FLAG(FIXED);
#ifdef MAP_FIXED_NOREPLACE
P_MMAP_FLAG(FIXED_NOREPLACE);
#endif
P_MMAP_FLAG(GROWSDOWN); P_MMAP_FLAG(GROWSDOWN);
P_MMAP_FLAG(HUGETLB); P_MMAP_FLAG(HUGETLB);
P_MMAP_FLAG(LOCKED); P_MMAP_FLAG(LOCKED);
......
...@@ -692,6 +692,7 @@ static int annotate_browser__run(struct annotate_browser *browser, ...@@ -692,6 +692,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
"J Toggle showing number of jump sources on targets\n" "J Toggle showing number of jump sources on targets\n"
"n Search next string\n" "n Search next string\n"
"o Toggle disassembler output/simplified view\n" "o Toggle disassembler output/simplified view\n"
"O Bump offset level (jump targets -> +call -> all -> cycle thru)\n"
"s Toggle source code view\n" "s Toggle source code view\n"
"t Circulate percent, total period, samples view\n" "t Circulate percent, total period, samples view\n"
"/ Search string\n" "/ Search string\n"
...@@ -719,6 +720,10 @@ static int annotate_browser__run(struct annotate_browser *browser, ...@@ -719,6 +720,10 @@ static int annotate_browser__run(struct annotate_browser *browser,
notes->options->use_offset = !notes->options->use_offset; notes->options->use_offset = !notes->options->use_offset;
annotation__update_column_widths(notes); annotation__update_column_widths(notes);
continue; continue;
case 'O':
if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
continue;
case 'j': case 'j':
notes->options->jump_arrows = !notes->options->jump_arrows; notes->options->jump_arrows = !notes->options->jump_arrows;
continue; continue;
......
...@@ -2714,7 +2714,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, ...@@ -2714,7 +2714,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"h/?/F1 Show this window\n" \ "h/?/F1 Show this window\n" \
"UP/DOWN/PGUP\n" \ "UP/DOWN/PGUP\n" \
"PGDN/SPACE Navigate\n" \ "PGDN/SPACE Navigate\n" \
"q/ESC/CTRL+C Exit browser\n\n" \ "q/ESC/CTRL+C Exit browser or go back to previous screen\n\n" \
"For multiple event sessions:\n\n" \ "For multiple event sessions:\n\n" \
"TAB/UNTAB Switch events\n\n" \ "TAB/UNTAB Switch events\n\n" \
"For symbolic views (--sort has sym):\n\n" \ "For symbolic views (--sort has sym):\n\n" \
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
struct annotation_options annotation__default_options = { struct annotation_options annotation__default_options = {
.use_offset = true, .use_offset = true,
.jump_arrows = true, .jump_arrows = true,
.offset_level = ANNOTATION__OFFSET_JUMP_TARGETS,
}; };
const char *disassembler_style; const char *disassembler_style;
...@@ -2512,7 +2513,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati ...@@ -2512,7 +2513,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
if (!notes->options->use_offset) { if (!notes->options->use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else { } else {
if (al->jump_sources) { if (al->jump_sources &&
notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
if (notes->options->show_nr_jumps) { if (notes->options->show_nr_jumps) {
int prev; int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ", printed = scnprintf(bf, sizeof(bf), "%*d ",
...@@ -2523,9 +2525,14 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati ...@@ -2523,9 +2525,14 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
obj__printf(obj, bf); obj__printf(obj, bf);
obj__set_color(obj, prev); obj__set_color(obj, prev);
} }
print_addr:
printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
notes->widths.target, addr); notes->widths.target, addr);
} else if (ins__is_call(&disasm_line(al)->ins) &&
notes->options->offset_level >= ANNOTATION__OFFSET_CALL) {
goto print_addr;
} else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
goto print_addr;
} else { } else {
printed = scnprintf(bf, sizeof(bf), "%-*s ", printed = scnprintf(bf, sizeof(bf), "%-*s ",
notes->widths.addr, " "); notes->widths.addr, " ");
...@@ -2642,10 +2649,11 @@ int __annotation__scnprintf_samples_period(struct annotation *notes, ...@@ -2642,10 +2649,11 @@ int __annotation__scnprintf_samples_period(struct annotation *notes,
*/ */
static struct annotation_config { static struct annotation_config {
const char *name; const char *name;
bool *value; void *value;
} annotation__configs[] = { } annotation__configs[] = {
ANNOTATION__CFG(hide_src_code), ANNOTATION__CFG(hide_src_code),
ANNOTATION__CFG(jump_arrows), ANNOTATION__CFG(jump_arrows),
ANNOTATION__CFG(offset_level),
ANNOTATION__CFG(show_linenr), ANNOTATION__CFG(show_linenr),
ANNOTATION__CFG(show_nr_jumps), ANNOTATION__CFG(show_nr_jumps),
ANNOTATION__CFG(show_nr_samples), ANNOTATION__CFG(show_nr_samples),
...@@ -2677,8 +2685,16 @@ static int annotation__config(const char *var, const char *value, ...@@ -2677,8 +2685,16 @@ static int annotation__config(const char *var, const char *value,
if (cfg == NULL) if (cfg == NULL)
pr_debug("%s variable unknown, ignoring...", var); pr_debug("%s variable unknown, ignoring...", var);
else else if (strcmp(var, "annotate.offset_level") == 0) {
*cfg->value = perf_config_bool(name, value); perf_config_int(cfg->value, name, value);
if (*(int *)cfg->value > ANNOTATION__MAX_OFFSET_LEVEL)
*(int *)cfg->value = ANNOTATION__MAX_OFFSET_LEVEL;
else if (*(int *)cfg->value < ANNOTATION__MIN_OFFSET_LEVEL)
*(int *)cfg->value = ANNOTATION__MIN_OFFSET_LEVEL;
} else {
*(bool *)cfg->value = perf_config_bool(name, value);
}
return 0; return 0;
} }
......
...@@ -70,8 +70,17 @@ struct annotation_options { ...@@ -70,8 +70,17 @@ struct annotation_options {
show_nr_jumps, show_nr_jumps,
show_nr_samples, show_nr_samples,
show_total_period; show_total_period;
u8 offset_level;
}; };
enum {
ANNOTATION__OFFSET_JUMP_TARGETS = 1,
ANNOTATION__OFFSET_CALL,
ANNOTATION__MAX_OFFSET_LEVEL,
};
#define ANNOTATION__MIN_OFFSET_LEVEL ANNOTATION__OFFSET_JUMP_TARGETS
extern struct annotation_options annotation__default_options; extern struct annotation_options annotation__default_options;
struct annotation; struct annotation;
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* SPDX-License-Identifier: GPL-2.0
*
* Copyright(C) 2015-2018 Linaro Limited. * Copyright(C) 2015-2018 Linaro Limited.
* *
* Author: Tor Jeremiassen <tor@ti.com> * Author: Tor Jeremiassen <tor@ti.com>
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* SPDX-License-Identifier: GPL-2.0
*
* Copyright(C) 2015-2018 Linaro Limited. * Copyright(C) 2015-2018 Linaro Limited.
* *
* Author: Tor Jeremiassen <tor@ti.com> * Author: Tor Jeremiassen <tor@ti.com>
......
/* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef INCLUDE__UTIL_PERF_CS_ETM_H__ #ifndef INCLUDE__UTIL_PERF_CS_ETM_H__
......
...@@ -1421,7 +1421,9 @@ size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) ...@@ -1421,7 +1421,9 @@ size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{ {
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
const char *in_out = out ? "OUT" : "IN "; const char *in_out = !out ? "IN " :
!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
"OUT " : "OUT preempt";
if (event->header.type == PERF_RECORD_SWITCH) if (event->header.type == PERF_RECORD_SWITCH)
return fprintf(fp, " %s\n", in_out); return fprintf(fp, " %s\n", in_out);
......
...@@ -2870,8 +2870,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, ...@@ -2870,8 +2870,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
if (evsel->attr.type == PERF_TYPE_HARDWARE) if (evsel->attr.type == PERF_TYPE_HARDWARE)
return scnprintf(msg, size, "%s", return scnprintf(msg, size, "%s",
"No hardware sampling interrupt available.\n" "No hardware sampling interrupt available.\n");
"No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
#endif #endif
break; break;
case EBUSY: case EBUSY:
...@@ -2894,8 +2893,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, ...@@ -2894,8 +2893,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
return scnprintf(msg, size, return scnprintf(msg, size,
"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
"/bin/dmesg may provide additional information.\n" "/bin/dmesg | grep -i perf may provide additional information.\n",
"No CONFIG_PERF_EVENTS=y kernel support configured?",
err, str_error_r(err, sbuf, sizeof(sbuf)), err, str_error_r(err, sbuf, sizeof(sbuf)),
perf_evsel__name(evsel)); perf_evsel__name(evsel));
} }
......
...@@ -38,7 +38,7 @@ do ...@@ -38,7 +38,7 @@ do
done done
echo "#endif /* HAVE_LIBELF_SUPPORT */" echo "#endif /* HAVE_LIBELF_SUPPORT */"
echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)" echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)"
sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt | sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt |
sort | sort |
while read cmd while read cmd
......
...@@ -1320,7 +1320,8 @@ static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) ...@@ -1320,7 +1320,8 @@ static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
dir = opendir(path); dir = opendir(path);
if (!dir) { if (!dir) {
pr_warning("failed: can't open node sysfs data\n"); pr_debug2("%s: could't read %s, does this arch have topology information?\n",
__func__, path);
return -1; return -1;
} }
......
...@@ -562,6 +562,12 @@ static int is_pmu_core(const char *name) ...@@ -562,6 +562,12 @@ static int is_pmu_core(const char *name)
if (stat(path, &st) == 0) if (stat(path, &st) == 0)
return 1; return 1;
/* Look for cpu sysfs (specific to s390) */
scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s",
sysfs, name);
if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5))
return 1;
return 0; return 0;
} }
......
...@@ -2091,16 +2091,14 @@ static bool symbol__read_kptr_restrict(void) ...@@ -2091,16 +2091,14 @@ static bool symbol__read_kptr_restrict(void)
int symbol__annotation_init(void) int symbol__annotation_init(void)
{ {
if (symbol_conf.init_annotation)
return 0;
if (symbol_conf.initialized) { if (symbol_conf.initialized) {
pr_err("Annotation needs to be init before symbol__init()\n"); pr_err("Annotation needs to be init before symbol__init()\n");
return -1; return -1;
} }
if (symbol_conf.init_annotation) {
pr_warning("Annotation being initialized multiple times\n");
return 0;
}
symbol_conf.priv_size += sizeof(struct annotation); symbol_conf.priv_size += sizeof(struct annotation);
symbol_conf.init_annotation = true; symbol_conf.init_annotation = true;
return 0; return 0;
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <stdlib.h> #include <stdlib.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#ifdef HAVE_SYSCALL_TABLE #ifdef HAVE_SYSCALL_TABLE_SUPPORT
#include <string.h> #include <string.h>
#include "string2.h" #include "string2.h"
#include "util.h" #include "util.h"
...@@ -139,7 +139,7 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g ...@@ -139,7 +139,7 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g
return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
} }
#else /* HAVE_SYSCALL_TABLE */ #else /* HAVE_SYSCALL_TABLE_SUPPORT */
#include <libaudit.h> #include <libaudit.h>
...@@ -176,4 +176,4 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g ...@@ -176,4 +176,4 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g
{ {
return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx);
} }
#endif /* HAVE_SYSCALL_TABLE */ #endif /* HAVE_SYSCALL_TABLE_SUPPORT */
...@@ -98,7 +98,7 @@ static void register_python_scripting(struct scripting_ops *scripting_ops) ...@@ -98,7 +98,7 @@ static void register_python_scripting(struct scripting_ops *scripting_ops)
} }
} }
#ifdef NO_LIBPYTHON #ifndef HAVE_LIBPYTHON_SUPPORT
void setup_python_scripting(void) void setup_python_scripting(void)
{ {
register_python_scripting(&python_scripting_unsupported_ops); register_python_scripting(&python_scripting_unsupported_ops);
...@@ -161,7 +161,7 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops) ...@@ -161,7 +161,7 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops)
} }
} }
#ifdef NO_LIBPERL #ifndef HAVE_LIBPERL_SUPPORT
void setup_perl_scripting(void) void setup_perl_scripting(void)
{ {
register_perl_scripting(&perl_scripting_unsupported_ops); register_perl_scripting(&perl_scripting_unsupported_ops);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册