提交 c042f7e9 编写于 作者: I Ingo Molnar

Merge tag 'perf-urgent-for-mingo-4.17-20180420' of...

Merge tag 'perf-urgent-for-mingo-4.17-20180420' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/urgent fixes and improvements from Arnaldo Carvalho de Melo:

- Store context switch out type in PERF_RECORD_SWITCH[_CPU_WIDE].
  The percentage of preempting and non-preempting context switches help
  understanding the nature of workloads (CPU or IO bound) that are running
  on a machine. This adds the kernel facility and userspace changes needed
  to show this information in 'perf script' and 'perf report -D' (Alexey Budankov)

- Remove old error messages about things that unlikely to be the root cause
  in modern systems (Andi Kleen)

- Synchronize kernel ABI headers, v4.17-rc1 (Ingo Molnar)

- Support MAP_FIXED_NOREPLACE, noticed when updating the tools/include/
  copies (Arnaldo Carvalho de Melo)

- Fixup BPF test using epoll_pwait syscall function probe, to cope with
  the syscall routines renames performed in this development cycle (Arnaldo Carvalho de Melo)

- Fix sample_max_stack maximum check and do not proceed when an error
  has been detect, return them to avoid misidentifying errors (Jiri Olsa)

- Add '\n' at the end of parse-options error messages (Ravi Bangoria)

- Add s390 support for detailed/verbose PMU event description (Thomas Richter)
Signed-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
/* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef _LINUX_CORESIGHT_PMU_H #ifndef _LINUX_CORESIGHT_PMU_H
......
...@@ -650,11 +650,23 @@ struct perf_event_mmap_page { ...@@ -650,11 +650,23 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
/* /*
* Indicates that the content of PERF_SAMPLE_IP points to * These PERF_RECORD_MISC_* flags below are safely reused
* the actual instruction that triggered the event. See also * for the following events:
* perf_event_attr::precise_ip. *
* PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
*
*
* PERF_RECORD_MISC_EXACT_IP:
* Indicates that the content of PERF_SAMPLE_IP points to
* the actual instruction that triggered the event. See also
* perf_event_attr::precise_ip.
*
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
* Indicates that thread was preempted in TASK_RUNNING state.
*/ */
#define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
/* /*
* Reserve the last bit to indicate some extended misc field * Reserve the last bit to indicate some extended misc field
*/ */
......
...@@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack) ...@@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack)
goto exit; goto exit;
} }
if (count > 1) { /*
/* If the allocation failed, give up */ * If requesting per event more than the global cap,
if (!callchain_cpus_entries) * return a different error to help userspace figure
err = -ENOMEM; * this out.
/* *
* If requesting per event more than the global cap, * And also do it here so that we have &callchain_mutex held.
* return a different error to help userspace figure */
* this out. if (event_max_stack > sysctl_perf_event_max_stack) {
* err = -EOVERFLOW;
* And also do it here so that we have &callchain_mutex held.
*/
if (event_max_stack > sysctl_perf_event_max_stack)
err = -EOVERFLOW;
goto exit; goto exit;
} }
err = alloc_callchain_buffers(); if (count == 1)
err = alloc_callchain_buffers();
exit: exit:
if (err) if (err)
atomic_dec(&nr_callchain_events); atomic_dec(&nr_callchain_events);
......
...@@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task, ...@@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task,
}, },
}; };
if (!sched_in && task->state == TASK_RUNNING)
switch_event.event_id.header.misc |=
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
perf_iterate_sb(perf_event_switch_output, perf_iterate_sb(perf_event_switch_output,
&switch_event, &switch_event,
NULL); NULL);
...@@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, ...@@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
* __u16 sample size limit. * __u16 sample size limit.
*/ */
if (attr->sample_stack_user >= USHRT_MAX) if (attr->sample_stack_user >= USHRT_MAX)
ret = -EINVAL; return -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
ret = -EINVAL; return -EINVAL;
} }
if (!attr->sample_max_stack) if (!attr->sample_max_stack)
......
...@@ -135,6 +135,15 @@ struct kvm_arch_memory_slot { ...@@ -135,6 +135,15 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_CRM_SHIFT 7 #define KVM_REG_ARM_CRM_SHIFT 7
#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
#define KVM_REG_ARM_32_CRN_SHIFT 11 #define KVM_REG_ARM_32_CRN_SHIFT 11
/*
* For KVM currently all guest registers are nonsecure, but we reserve a bit
* in the encoding to distinguish secure from nonsecure for AArch32 system
* registers that are banked by security. This is 1 for the secure banked
* register, and 0 for the nonsecure banked register or if the register is
* not banked by security.
*/
#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000
#define KVM_REG_ARM_SECURE_SHIFT 28
#define ARM_CP15_REG_SHIFT_MASK(x,n) \ #define ARM_CP15_REG_SHIFT_MASK(x,n) \
(((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
......
...@@ -53,12 +53,6 @@ ...@@ -53,12 +53,6 @@
# define NEED_MOVBE 0 # define NEED_MOVBE 0
#endif #endif
#ifdef CONFIG_X86_5LEVEL
# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31))
#else
# define NEED_LA57 0
#endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
/* Paravirtualized systems may not have PSE or PGE available */ /* Paravirtualized systems may not have PSE or PGE available */
...@@ -104,7 +98,7 @@ ...@@ -104,7 +98,7 @@
#define REQUIRED_MASK13 0 #define REQUIRED_MASK13 0
#define REQUIRED_MASK14 0 #define REQUIRED_MASK14 0
#define REQUIRED_MASK15 0 #define REQUIRED_MASK15 0
#define REQUIRED_MASK16 (NEED_LA57) #define REQUIRED_MASK16 0
#define REQUIRED_MASK17 0 #define REQUIRED_MASK17 0
#define REQUIRED_MASK18 0 #define REQUIRED_MASK18 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
......
...@@ -354,8 +354,25 @@ struct kvm_xcrs { ...@@ -354,8 +354,25 @@ struct kvm_xcrs {
__u64 padding[16]; __u64 padding[16];
}; };
/* definition of registers in kvm_run */ #define KVM_SYNC_X86_REGS (1UL << 0)
#define KVM_SYNC_X86_SREGS (1UL << 1)
#define KVM_SYNC_X86_EVENTS (1UL << 2)
#define KVM_SYNC_X86_VALID_FIELDS \
(KVM_SYNC_X86_REGS| \
KVM_SYNC_X86_SREGS| \
KVM_SYNC_X86_EVENTS)
/* kvm_sync_regs struct included by kvm_run struct */
struct kvm_sync_regs { struct kvm_sync_regs {
/* Members of this structure are potentially malicious.
* Care must be taken by code reading, esp. interpreting,
* data fields from them inside KVM to prevent TOCTOU and
* double-fetch types of vulnerabilities.
*/
struct kvm_regs regs;
struct kvm_sregs sregs;
struct kvm_vcpu_events events;
}; };
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
......
/* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef _LINUX_CORESIGHT_PMU_H #ifndef _LINUX_CORESIGHT_PMU_H
......
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
#endif #endif
/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
/* /*
* Flags for mlock * Flags for mlock
*/ */
......
...@@ -864,6 +864,7 @@ enum bpf_func_id { ...@@ -864,6 +864,7 @@ enum bpf_func_id {
/* BPF_FUNC_skb_set_tunnel_key flags. */ /* BPF_FUNC_skb_set_tunnel_key flags. */
#define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_ZERO_CSUM_TX (1ULL << 1)
#define BPF_F_DONT_FRAGMENT (1ULL << 2) #define BPF_F_DONT_FRAGMENT (1ULL << 2)
#define BPF_F_SEQ_NUMBER (1ULL << 3)
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags. * BPF_FUNC_perf_event_read_value flags.
......
...@@ -941,4 +941,43 @@ enum { ...@@ -941,4 +941,43 @@ enum {
IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */
}; };
/* tun section */
enum {
IFLA_TUN_UNSPEC,
IFLA_TUN_OWNER,
IFLA_TUN_GROUP,
IFLA_TUN_TYPE,
IFLA_TUN_PI,
IFLA_TUN_VNET_HDR,
IFLA_TUN_PERSIST,
IFLA_TUN_MULTI_QUEUE,
IFLA_TUN_NUM_QUEUES,
IFLA_TUN_NUM_DISABLED_QUEUES,
__IFLA_TUN_MAX,
};
#define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1)
/* rmnet section */
#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0)
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
enum {
IFLA_RMNET_UNSPEC,
IFLA_RMNET_MUX_ID,
IFLA_RMNET_FLAGS,
__IFLA_RMNET_MAX,
};
#define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1)
struct ifla_rmnet_flags {
__u32 flags;
__u32 mask;
};
#endif /* _UAPI_LINUX_IF_LINK_H */ #endif /* _UAPI_LINUX_IF_LINK_H */
...@@ -396,6 +396,10 @@ struct kvm_run { ...@@ -396,6 +396,10 @@ struct kvm_run {
char padding[256]; char padding[256];
}; };
/* 2048 is the size of the char array used to bound/pad the size
* of the union that holds sync regs.
*/
#define SYNC_REGS_SIZE_BYTES 2048
/* /*
* shared registers between kvm and userspace. * shared registers between kvm and userspace.
* kvm_valid_regs specifies the register classes set by the host * kvm_valid_regs specifies the register classes set by the host
...@@ -407,7 +411,7 @@ struct kvm_run { ...@@ -407,7 +411,7 @@ struct kvm_run {
__u64 kvm_dirty_regs; __u64 kvm_dirty_regs;
union { union {
struct kvm_sync_regs regs; struct kvm_sync_regs regs;
char padding[2048]; char padding[SYNC_REGS_SIZE_BYTES];
} s; } s;
}; };
...@@ -936,6 +940,7 @@ struct kvm_ppc_resize_hpt { ...@@ -936,6 +940,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_GET_CPU_CHAR 151 #define KVM_CAP_PPC_GET_CPU_CHAR 151
#define KVM_CAP_S390_BPB 152 #define KVM_CAP_S390_BPB 152
#define KVM_CAP_GET_MSR_FEATURES 153 #define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -1375,6 +1380,10 @@ struct kvm_enc_region { ...@@ -1375,6 +1380,10 @@ struct kvm_enc_region {
#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
/* Available with KVM_CAP_HYPERV_EVENTFD */
#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
/* Secure Encrypted Virtualization command */ /* Secure Encrypted Virtualization command */
enum sev_cmd_id { enum sev_cmd_id {
/* Guest initialization commands */ /* Guest initialization commands */
...@@ -1515,4 +1524,14 @@ struct kvm_assigned_msix_entry { ...@@ -1515,4 +1524,14 @@ struct kvm_assigned_msix_entry {
#define KVM_ARM_DEV_EL1_PTIMER (1 << 1) #define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
#define KVM_ARM_DEV_PMU (1 << 2) #define KVM_ARM_DEV_PMU (1 << 2)
struct kvm_hyperv_eventfd {
__u32 conn_id;
__s32 fd;
__u32 flags;
__u32 padding[3];
};
#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
#endif /* __LINUX_KVM_H */ #endif /* __LINUX_KVM_H */
...@@ -650,11 +650,23 @@ struct perf_event_mmap_page { ...@@ -650,11 +650,23 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
/* /*
* Indicates that the content of PERF_SAMPLE_IP points to * These PERF_RECORD_MISC_* flags below are safely reused
* the actual instruction that triggered the event. See also * for the following events:
* perf_event_attr::precise_ip. *
* PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
*
*
* PERF_RECORD_MISC_EXACT_IP:
* Indicates that the content of PERF_SAMPLE_IP points to
* the actual instruction that triggered the event. See also
* perf_event_attr::precise_ip.
*
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
* Indicates that thread was preempted in TASK_RUNNING state.
*/ */
#define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
/* /*
* Reserve the last bit to indicate some extended misc field * Reserve the last bit to indicate some extended misc field
*/ */
......
...@@ -242,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t; ...@@ -242,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ #define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ #define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE #define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE
#define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8
#ifdef SNDRV_LITTLE_ENDIAN #ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
......
...@@ -433,7 +433,7 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, ...@@ -433,7 +433,7 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
if (ambiguous_option) { if (ambiguous_option) {
fprintf(stderr, fprintf(stderr,
" Error: Ambiguous option: %s (could be --%s%s or --%s%s)", " Error: Ambiguous option: %s (could be --%s%s or --%s%s)\n",
arg, arg,
(ambiguous_flags & OPT_UNSET) ? "no-" : "", (ambiguous_flags & OPT_UNSET) ? "no-" : "",
ambiguous_option->long_name, ambiguous_option->long_name,
...@@ -458,7 +458,7 @@ static void check_typos(const char *arg, const struct option *options) ...@@ -458,7 +458,7 @@ static void check_typos(const char *arg, const struct option *options)
return; return;
if (strstarts(arg, "no-")) { if (strstarts(arg, "no-")) {
fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg);
exit(129); exit(129);
} }
...@@ -466,7 +466,7 @@ static void check_typos(const char *arg, const struct option *options) ...@@ -466,7 +466,7 @@ static void check_typos(const char *arg, const struct option *options)
if (!options->long_name) if (!options->long_name)
continue; continue;
if (strstarts(options->long_name, arg)) { if (strstarts(options->long_name, arg)) {
fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg);
exit(129); exit(129);
} }
} }
......
...@@ -67,6 +67,9 @@ OPTIONS ...@@ -67,6 +67,9 @@ OPTIONS
--phys-data:: --phys-data::
Record/Report sample physical addresses Record/Report sample physical addresses
In addition, for report all perf report options are valid, and for record
all perf record options.
SEE ALSO SEE ALSO
-------- --------
linkperf:perf-record[1], linkperf:perf-report[1] linkperf:perf-record[1], linkperf:perf-report[1]
...@@ -228,14 +228,15 @@ OPTIONS ...@@ -228,14 +228,15 @@ OPTIONS
For sample events it's possible to display misc field with -F +misc option, For sample events it's possible to display misc field with -F +misc option,
following letters are displayed for each bit: following letters are displayed for each bit:
PERF_RECORD_MISC_KERNEL K PERF_RECORD_MISC_KERNEL K
PERF_RECORD_MISC_USER U PERF_RECORD_MISC_USER U
PERF_RECORD_MISC_HYPERVISOR H PERF_RECORD_MISC_HYPERVISOR H
PERF_RECORD_MISC_GUEST_KERNEL G PERF_RECORD_MISC_GUEST_KERNEL G
PERF_RECORD_MISC_GUEST_USER g PERF_RECORD_MISC_GUEST_USER g
PERF_RECORD_MISC_MMAP_DATA* M PERF_RECORD_MISC_MMAP_DATA* M
PERF_RECORD_MISC_COMM_EXEC E PERF_RECORD_MISC_COMM_EXEC E
PERF_RECORD_MISC_SWITCH_OUT S PERF_RECORD_MISC_SWITCH_OUT S
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT Sp
$ perf script -F +misc ... $ perf script -F +misc ...
sched-messaging 1414 K 28690.636582: 4590 cycles ... sched-messaging 1414 K 28690.636582: 4590 cycles ...
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <stdbool.h> #include <stdbool.h>
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <api/fs/fs.h> #include <api/fs/fs.h>
......
/* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef INCLUDE__PERF_CS_ETM_H__ #ifndef INCLUDE__PERF_CS_ETM_H__
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <string.h> #include <string.h>
......
...@@ -21,7 +21,7 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') ...@@ -21,7 +21,7 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
$(header): $(sys)/syscall_64.tbl $(systbl) $(header): $(sys)/syscall_64.tbl $(systbl)
@(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
(diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \ (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \
|| echo "Warning: Kernel ABI header at 'tools/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true || echo "Warning: Kernel ABI header at 'tools/perf/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true
$(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@ $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@
clean:: clean::
......
...@@ -83,7 +83,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) ...@@ -83,7 +83,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
}; };
argc = parse_options(argc, argv, options, record_mem_usage, argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_STOP_AT_NON_OPTION); PARSE_OPT_KEEP_UNKNOWN);
rec_argc = argc + 9; /* max number of arguments */ rec_argc = argc + 9; /* max number of arguments */
rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv = calloc(rec_argc + 1, sizeof(char *));
...@@ -436,7 +436,7 @@ int cmd_mem(int argc, const char **argv) ...@@ -436,7 +436,7 @@ int cmd_mem(int argc, const char **argv)
} }
argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); mem_usage, PARSE_OPT_KEEP_UNKNOWN);
if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation)) if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
usage_with_options(mem_usage, mem_options); usage_with_options(mem_usage, mem_options);
......
...@@ -657,8 +657,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample, ...@@ -657,8 +657,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
break; break;
case PERF_RECORD_SWITCH: case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE: case PERF_RECORD_SWITCH_CPU_WIDE:
if (has(SWITCH_OUT)) if (has(SWITCH_OUT)) {
ret += fprintf(fp, "S"); ret += fprintf(fp, "S");
if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT)
ret += fprintf(fp, "p");
}
default: default:
break; break;
} }
......
...@@ -31,7 +31,7 @@ struct bpf_map_def SEC("maps") flip_table = { ...@@ -31,7 +31,7 @@ struct bpf_map_def SEC("maps") flip_table = {
.max_entries = 1, .max_entries = 1,
}; };
SEC("func=SyS_epoll_pwait") SEC("func=do_epoll_wait")
int bpf_func__SyS_epoll_pwait(void *ctx) int bpf_func__SyS_epoll_pwait(void *ctx)
{ {
int ind =0; int ind =0;
......
...@@ -75,7 +75,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse ...@@ -75,7 +75,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
evsels[i] = perf_evsel__newtp("syscalls", name); evsels[i] = perf_evsel__newtp("syscalls", name);
if (IS_ERR(evsels[i])) { if (IS_ERR(evsels[i])) {
pr_debug("perf_evsel__new\n"); pr_debug("perf_evsel__new(%s)\n", name);
goto out_delete_evlist; goto out_delete_evlist;
} }
......
...@@ -54,6 +54,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, ...@@ -54,6 +54,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
P_MMAP_FLAG(EXECUTABLE); P_MMAP_FLAG(EXECUTABLE);
P_MMAP_FLAG(FILE); P_MMAP_FLAG(FILE);
P_MMAP_FLAG(FIXED); P_MMAP_FLAG(FIXED);
#ifdef MAP_FIXED_NOREPLACE
P_MMAP_FLAG(FIXED_NOREPLACE);
#endif
P_MMAP_FLAG(GROWSDOWN); P_MMAP_FLAG(GROWSDOWN);
P_MMAP_FLAG(HUGETLB); P_MMAP_FLAG(HUGETLB);
P_MMAP_FLAG(LOCKED); P_MMAP_FLAG(LOCKED);
......
...@@ -2714,7 +2714,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, ...@@ -2714,7 +2714,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"h/?/F1 Show this window\n" \ "h/?/F1 Show this window\n" \
"UP/DOWN/PGUP\n" \ "UP/DOWN/PGUP\n" \
"PGDN/SPACE Navigate\n" \ "PGDN/SPACE Navigate\n" \
"q/ESC/CTRL+C Exit browser\n\n" \ "q/ESC/CTRL+C Exit browser or go back to previous screen\n\n" \
"For multiple event sessions:\n\n" \ "For multiple event sessions:\n\n" \
"TAB/UNTAB Switch events\n\n" \ "TAB/UNTAB Switch events\n\n" \
"For symbolic views (--sort has sym):\n\n" \ "For symbolic views (--sort has sym):\n\n" \
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* SPDX-License-Identifier: GPL-2.0
*
* Copyright(C) 2015-2018 Linaro Limited. * Copyright(C) 2015-2018 Linaro Limited.
* *
* Author: Tor Jeremiassen <tor@ti.com> * Author: Tor Jeremiassen <tor@ti.com>
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* SPDX-License-Identifier: GPL-2.0
*
* Copyright(C) 2015-2018 Linaro Limited. * Copyright(C) 2015-2018 Linaro Limited.
* *
* Author: Tor Jeremiassen <tor@ti.com> * Author: Tor Jeremiassen <tor@ti.com>
......
/* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Copyright(C) 2015 Linaro Limited. All rights reserved. * Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org> * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef INCLUDE__UTIL_PERF_CS_ETM_H__ #ifndef INCLUDE__UTIL_PERF_CS_ETM_H__
......
...@@ -1421,7 +1421,9 @@ size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) ...@@ -1421,7 +1421,9 @@ size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
{ {
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
const char *in_out = out ? "OUT" : "IN "; const char *in_out = !out ? "IN " :
!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
"OUT " : "OUT preempt";
if (event->header.type == PERF_RECORD_SWITCH) if (event->header.type == PERF_RECORD_SWITCH)
return fprintf(fp, " %s\n", in_out); return fprintf(fp, " %s\n", in_out);
......
...@@ -2870,8 +2870,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, ...@@ -2870,8 +2870,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
if (evsel->attr.type == PERF_TYPE_HARDWARE) if (evsel->attr.type == PERF_TYPE_HARDWARE)
return scnprintf(msg, size, "%s", return scnprintf(msg, size, "%s",
"No hardware sampling interrupt available.\n" "No hardware sampling interrupt available.\n");
"No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
#endif #endif
break; break;
case EBUSY: case EBUSY:
...@@ -2894,8 +2893,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, ...@@ -2894,8 +2893,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
return scnprintf(msg, size, return scnprintf(msg, size,
"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
"/bin/dmesg may provide additional information.\n" "/bin/dmesg | grep -i perf may provide additional information.\n",
"No CONFIG_PERF_EVENTS=y kernel support configured?",
err, str_error_r(err, sbuf, sizeof(sbuf)), err, str_error_r(err, sbuf, sizeof(sbuf)),
perf_evsel__name(evsel)); perf_evsel__name(evsel));
} }
......
...@@ -562,6 +562,12 @@ static int is_pmu_core(const char *name) ...@@ -562,6 +562,12 @@ static int is_pmu_core(const char *name)
if (stat(path, &st) == 0) if (stat(path, &st) == 0)
return 1; return 1;
/* Look for cpu sysfs (specific to s390) */
scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s",
sysfs, name);
if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5))
return 1;
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册