1. 03 4月, 2018 2 次提交
  2. 02 4月, 2018 3 次提交
    • A
      perf trace: Show only failing syscalls · 0a6545bd
      Arnaldo Carvalho de Melo 提交于
      For instance:
      
        # perf probe "vfs_getname=getname_flags:72 pathname=result->name:string"
        Added new event:
          probe:vfs_getname    (on getname_flags:72 with pathname=result->name:string)
      
        You can now use it in all perf tools, such as:
      
      	  perf record -e probe:vfs_getname -aR sleep 1
      
        # perf trace --failure sleep 1
           0.043 ( 0.010 ms): sleep/10978 access(filename: /etc/ld.so.preload, mode: R) = -1 ENOENT No such file or directory
      
      For reference, here are all the syscalls in this case:
      
        # perf trace sleep 1
               ? (         ): sleep/10976  ... [continued]: execve()) = 0
             0.027 ( 0.001 ms): sleep/10976 brk() = 0x55bdc2d04000
             0.044 ( 0.010 ms): sleep/10976 access(filename: /etc/ld.so.preload, mode: R) = -1 ENOENT No such file or directory
             0.057 ( 0.006 ms): sleep/10976 openat(dfd: CWD, filename: /etc/ld.so.cache, flags: CLOEXEC) = 3
             0.064 ( 0.002 ms): sleep/10976 fstat(fd: 3, statbuf: 0x7fffac22b370) = 0
             0.067 ( 0.003 ms): sleep/10976 mmap(len: 111457, prot: READ, flags: PRIVATE, fd: 3) = 0x7feec8615000
             0.071 ( 0.001 ms): sleep/10976 close(fd: 3) = 0
             0.080 ( 0.007 ms): sleep/10976 openat(dfd: CWD, filename: /lib64/libc.so.6, flags: CLOEXEC) = 3
             0.088 ( 0.002 ms): sleep/10976 read(fd: 3, buf: 0x7fffac22b538, count: 832) = 832
             0.092 ( 0.001 ms): sleep/10976 fstat(fd: 3, statbuf: 0x7fffac22b3d0) = 0
             0.094 ( 0.002 ms): sleep/10976 mmap(len: 8192, prot: READ|WRITE, flags: PRIVATE|ANONYMOUS) = 0x7feec8613000
             0.099 ( 0.004 ms): sleep/10976 mmap(len: 3889792, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3) = 0x7feec8057000
             0.104 ( 0.007 ms): sleep/10976 mprotect(start: 0x7feec8203000, len: 2097152) = 0
             0.112 ( 0.005 ms): sleep/10976 mmap(addr: 0x7feec8403000, len: 24576, prot: READ|WRITE, flags: PRIVATE|DENYWRITE|FIXED, fd: 3, off: 1753088) = 0x7feec8403000
             0.120 ( 0.003 ms): sleep/10976 mmap(addr: 0x7feec8409000, len: 14976, prot: READ|WRITE, flags: PRIVATE|ANONYMOUS|FIXED) = 0x7feec8409000
             0.128 ( 0.001 ms): sleep/10976 close(fd: 3) = 0
             0.139 ( 0.001 ms): sleep/10976 arch_prctl(option: 4098, arg2: 140663540761856) = 0
             0.186 ( 0.004 ms): sleep/10976 mprotect(start: 0x7feec8403000, len: 16384, prot: READ) = 0
             0.204 ( 0.003 ms): sleep/10976 mprotect(start: 0x55bdc0ec3000, len: 4096, prot: READ) = 0
             0.209 ( 0.004 ms): sleep/10976 mprotect(start: 0x7feec8631000, len: 4096, prot: READ) = 0
             0.214 ( 0.010 ms): sleep/10976 munmap(addr: 0x7feec8615000, len: 111457) = 0
             0.269 ( 0.001 ms): sleep/10976 brk() = 0x55bdc2d04000
             0.271 ( 0.002 ms): sleep/10976 brk(brk: 0x55bdc2d25000) = 0x55bdc2d25000
             0.274 ( 0.001 ms): sleep/10976 brk() = 0x55bdc2d25000
             0.278 ( 0.007 ms): sleep/10976 open(filename: /usr/lib/locale/locale-archive, flags: CLOEXEC) = 3
             0.288 ( 0.001 ms): sleep/10976 fstat(fd: 3</usr/lib/locale/locale-archive>, statbuf: 0x7feec8408aa0) = 0
             0.290 ( 0.003 ms): sleep/10976 mmap(len: 113045344, prot: READ, flags: PRIVATE, fd: 3) = 0x7feec1488000
             0.297 ( 0.001 ms): sleep/10976 close(fd: 3</usr/lib/locale/locale-archive>) = 0
             0.325 (1000.193 ms): sleep/10976 nanosleep(rqtp: 0x7fffac22c0b0) = 0
          1000.560 ( 0.006 ms): sleep/10976 close(fd: 1) = 0
          1000.573 ( 0.005 ms): sleep/10976 close(fd: 2) = 0
          1000.596 (         ): sleep/10976 exit_group()
        #
      
      And can be done systemwide, etc, with backtraces:
      
        # perf trace --max-stack=16 --failure sleep 1
           0.048 ( 0.015 ms): sleep/11092 access(filename: /etc/ld.so.preload, mode: R) = -1 ENOENT No such file or directory
                                             __access (inlined)
                                             dl_main (/usr/lib64/ld-2.26.so)
        #
      
      Or for some specific syscalls:
      
        # perf trace --max-stack=16 -e openat --failure cat /tmp/rien
        cat: /tmp/rien: No such file or directory
             0.251 ( 0.012 ms): cat/11106 openat(dfd: CWD, filename: /tmp/rien) = -1 ENOENT No such file or directory
                                               __libc_open64 (inlined)
                                               main (/usr/bin/cat)
                                               __libc_start_main (/usr/lib64/libc-2.26.so)
                                               _start (/usr/bin/cat)
        #
      
      Look for inotify* syscalls that fail, system wide, for 2 seconds, with backtraces:
      
        # perf trace -a --max-stack=16 --failure -e inotify* sleep 2
         819.165 ( 0.058 ms): gmain/1724 inotify_add_watch(fd: 8<anon_inode:inotify>, pathname: /home/acme/~, mask: 16789454) = -1 ENOENT No such file or directory
                                             __GI_inotify_add_watch (inlined)
                                             _ik_watch (/usr/lib64/libgio-2.0.so.0.5400.3)
                                             _ip_start_watching (/usr/lib64/libgio-2.0.so.0.5400.3)
                                             im_scan_missing (/usr/lib64/libgio-2.0.so.0.5400.3)
                                             g_timeout_dispatch (/usr/lib64/libglib-2.0.so.0.5400.3)
                                             g_main_context_dispatch (/usr/lib64/libglib-2.0.so.0.5400.3)
                                             g_main_context_iterate.isra.23 (/usr/lib64/libglib-2.0.so.0.5400.3)
                                             g_main_context_iteration (/usr/lib64/libglib-2.0.so.0.5400.3)
                                             glib_worker_main (/usr/lib64/libglib-2.0.so.0.5400.3)
                                             g_thread_proxy (/usr/lib64/libglib-2.0.so.0.5400.3)
                                             start_thread (/usr/lib64/libpthread-2.26.so)
                                             __GI___clone (inlined)
        #
      
      Cc: Adrian Hunter <adrian.hunter@intel.com>
      Cc: David Ahern <dsahern@gmail.com>
      Cc: Jiri Olsa <jolsa@kernel.org>
      Cc: Namhyung Kim <namhyung@kernel.org>
      Cc: Wang Nan <wangnan0@huawei.com>
      Link: https://lkml.kernel.org/n/tip-8f7d3mngaxvi7tlzloz3n7cs@git.kernel.orgSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
      0a6545bd
    • A
      tools headers: Synchronize x86's cpufeatures.h · 5e2a146b
      Arnaldo Carvalho de Melo 提交于
      Due to these commits:
      
        1da961d7 ("x86/cpufeatures: Add Intel Total Memory Encryption cpufeature")
        7958b224 ("x86/cpufeatures: Add Intel PCONFIG cpufeature")
      
      To silence this perf build warning:
      
        Warning: Kernel ABI header at 'tools/arch/x86/include/asm/cpufeatures.h' differs from latest version at 'arch/x86/include/asm/cpufeatures.h'
      
      Nothing in those csets requires changes in tools/perf/, so just
      sync it to silence the build.
      
      Cc: Adrian Hunter <adrian.hunter@intel.com>
      Cc: David Ahern <dsahern@gmail.com>
      Cc: Jiri Olsa <jolsa@kernel.org>
      Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
      Cc: Namhyung Kim <namhyung@kernel.org>
      Cc: Wang Nan <wangnan0@huawei.com>
      Link: https://lkml.kernel.org/n/tip-m2yl8wj0uxs8pncq2ncfcx46@git.kernel.orgSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
      5e2a146b
    • K
      perf tools: Add a "dso_size" sort order · b74d12d5
      Kim Phillips 提交于
      Add DSO size to perf report/top sort output list.
      
      This includes adding a map__size fn to map.h, which is
      approximately equal to the DSO data file_size:
      
        DSO				file size	map (end-start)	file / (end-start)
        libwebkit2gtk-4.0.so.37.24.9	43260072	41295872	95%
        libglib-2.0.so.0.5400.1		 1125680	 1118208	99%
        libc-2.26.so			 1960656 	 1925120	101%
        libdbus-1.so.3.14.13		  309456 	  303104	102%
      
      Sample output:
      
        $ ./perf report -s dso_size,dso
        Samples: 2K of event 'cycles:uppp', Event count (approx.): 128373340
        Overhead  DSO size  Shared Object
          90.62%   unknown  [unknown]
           2.87%   1118208  libglib-2.0.so.0.5400.1
           1.92%    303104  libdbus-1.so.3.14.13
           1.42%   1925120  libc-2.26.so
           0.77%  41295872  libwebkit2gtk-4.0.so.37.24.9
           0.61%    335872  libgobject-2.0.so.0.5400.1
           0.41%   1052672  libgdk-3.so.0.2200.25
           0.36%    106496  libpthread-2.26.so
           0.29%    221184  dbus-daemon
           0.17%    159744  ld-2.26.so
           0.13%     49152  libwayland-client.so.0.3.0
           0.12%   1642496  libgio-2.0.so.0.5400.1
           0.09%   7327744  libgtk-3.so.0.2200.25
           0.09%  12324864  libmozjs-52.so.0.0.0
           0.05%   4796416  perf
           0.04%    843776  libgjs.so.0.0.0
           0.03%   1409024  libmutter-clutter-1.so
      
      Committer testing:
      
      To sort by DSO size, use:
      
        # perf report -F dso_size,dso,overhead -s dso_size
        <SNIP>
           3465216  libdns-export.so.174.0.1   0.00%
           3522560  libgc.so.1.0.3             0.00%
           3538944  libbfd-2.29-13.fc27.so     0.59%
           3670016  libunistring.so.2.1.0      0.00%
           3723264  libguile-2.0.so.22.8.1     0.00%
           3776512  libgio-2.0.so.0.5400.3     0.00%
           3891200  libc-2.26.so               0.96%
           3944448  libmozjs-17.0.so           0.00%
           4218880  libperl.so.5.26.1          0.18%
           4452352  libpython2.7.so.1.0        0.02%
           4472832  perf                       0.02%
           4603904  git                        0.01%
           4751360  libcrypto.so.1.1.0g        0.00%
           5005312  libslang.so.2.3.1          0.00%
           7315456  libgtk-3.so.0.2200.26      0.09%
           8818688  i965_dri.so                2.46%
           8818688  i965_dri.so (deleted)      1.26%
          12414976  libmozjs-52.so.0.0.0       0.03%
          23642112  cc1                        2.02%
          27889664  [kernel.kallsyms]         25.41%
          80834560  libxul.so (deleted)       15.68%
          98078720  chrome                    32.03%
        1056964608  [kernel.kallsyms]          1.59%
        #
      Signed-off-by: NKim Phillips <kim.phillips@arm.com>
      Tested-by: NArnaldo Carvalho de Melo <acme@redhat.com>
      Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
      Cc: Andi Kleen <ak@linux.intel.com>
      Cc: Jin Yao <yao.jin@linux.intel.com>
      Cc: Jiri Olsa <jolsa@redhat.com>
      Cc: Maxim Kuvyrkov <maxim.kuvyrkov@linaro.org>
      Cc: Milian Wolff <milian.wolff@kdab.com>
      Cc: Namhyung Kim <namhyung@kernel.org>
      Cc: Peter Zijlstra <peterz@infradead.org>
      Link: http://lkml.kernel.org/r/20180327060956.1c01ebe67a2a941bb4468c6f@arm.comSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
      b74d12d5
  3. 31 3月, 2018 2 次提交
  4. 29 3月, 2018 3 次提交
  5. 28 3月, 2018 9 次提交
  6. 27 3月, 2018 2 次提交
    • S
      perf/x86/intel: Fix linear IP of PEBS real_ip on Haswell and later CPUs · 71eb9ee9
      Stephane Eranian 提交于
      this patch fix a bug in how the pebs->real_ip is handled in the PEBS
      handler. real_ip only exists in Haswell and later processor. It is
      actually the eventing IP, i.e., where the event occurred. As opposed
      to the pebs->ip which is the PEBS interrupt IP which is always off
      by one.
      
      The problem is that the real_ip just like the IP needs to be fixed up
      because PEBS does not record all the machine state registers, and
      in particular the code segement (cs). This is why we have the set_linear_ip()
      function. The problem was that set_linear_ip() was only used on the pebs->ip
      and not the pebs->real_ip.
      
      We have profiles which ran into invalid callstacks because of this.
      Here is an example:
      
       .....  0: ffffffffffffff80 recent entry, marker kernel v
       .....  1: 000000000040044d <= user address in kernel space!
       .....  2: fffffffffffffe00 marker enter user v
       .....  3: 000000000040044d
       .....  4: 00000000004004b6 oldest entry
      
      Debugging output in get_perf_callchain():
      
       [  857.769909] CALLCHAIN: CPU8 ip=40044d regs->cs=10 user_mode(regs)=0
      
      The problem is that the kernel entry in 1: points to a user level
      address. How can that be?
      
      The reason is that with PEBS sampling the instruction that caused the event
      to occur and the instruction where the CPU was when the interrupt was posted
      may be far apart. And sometime during that time window, the privilege level may
      change. This happens, for instance, when the PEBS sample is taken close to a
      kernel entry point. Here PEBS, eventing IP (real_ip) captured a user level
      instruction. But by the time the PMU interrupt fired, the processor had already
      entered kernel space. This is why the debug output shows a user address with
      user_mode() false.
      
      The problem comes from PEBS not recording the code segment (cs) register.
      The register is used in x86_64 to determine if executing in kernel vs user
      space. This is okay because the kernel has a software workaround called
      set_linear_ip(). But the issue in setup_pebs_sample_data() is that
      set_linear_ip() is never called on the real_ip value when it is available
      (Haswell and later) and precise_ip > 1.
      
      This patch fixes this problem and eliminates the callchain discrepancy.
      
      The patch restructures the code around set_linear_ip() to minimize the number
      of times the IP has to be set.
      Signed-off-by: NStephane Eranian <eranian@google.com>
      Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
      Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
      Cc: Jiri Olsa <jolsa@redhat.com>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Cc: Peter Zijlstra <peterz@infradead.org>
      Cc: Thomas Gleixner <tglx@linutronix.de>
      Cc: Vince Weaver <vincent.weaver@maine.edu>
      Cc: kan.liang@intel.com
      Link: http://lkml.kernel.org/r/1521788507-10231-1-git-send-email-eranian@google.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
      71eb9ee9
    • D
      perf/x86: Update rdpmc_always_available static key to the modern API · 631fe154
      Davidlohr Bueso 提交于
      No changes in refcount semantics -- use DEFINE_STATIC_KEY_FALSE()
      for initialization and replace:
      
        static_key_slow_inc|dec()   =>   static_branch_inc|dec()
        static_key_false()          =>   static_branch_unlikely()
      
      Added a '_key' suffix to rdpmc_always_available, for better self-documentation.
      Signed-off-by: NDavidlohr Bueso <dbueso@suse.de>
      Cc: Davidlohr Bueso <dave@stgolabs.net>
      Cc: Linus Torvalds <torvalds@linux-foundation.org>
      Cc: Peter Zijlstra <peterz@infradead.org>
      Cc: Thomas Gleixner <tglx@linutronix.de>
      Cc: akpm@linux-foundation.org
      Link: http://lkml.kernel.org/r/20180326210929.5244-5-dave@stgolabs.netSigned-off-by: NIngo Molnar <mingo@kernel.org>
      631fe154
  7. 26 3月, 2018 8 次提交
  8. 25 3月, 2018 4 次提交
    • I
      Merge tag 'perf-core-for-mingo-4.17-20180323' of... · a0ac7b3c
      Ingo Molnar 提交于
      Merge tag 'perf-core-for-mingo-4.17-20180323' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
      
      Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:
      
      - Move non-TUI specific annotation routines out of the TUI browser so
        that it can be used in other UIs, and to demonstrate that introduce
        a 'perf annotate --stdio2' option that will apply those formatting
        routines to provide a non-interactive annotation mode (Arnaldo Carvalho de Melo)
      
      - Add 'P' hotkey to the annotation TUI, so dump the current annotated
        symbol to a file, easing report thru e-mail, by getting rid of the
        spaces + right hand side scrollbar chars (Arnaldo Carvalho de Melo)
      
      - Support --ignore-vmlinux to 'perf report' and 'perf annotate', that
        was already present in 'perf top', to use /proc/{kcore,kallsyms},
        allowing to see what is in fact running (patched stuff, alternatives,
        ftrace, etc), not the initial state of the kernel (vmlinux) (Arnaldo Carvalho de Melo)
      
      - Support 'jump' instructions to a different function, treating them
        as 'call' instructions (Arnaldo Carvalho de Melo)
      
      - Fix some jump artifacts when using vmlinux + ASM functions, where
        the ELF symtab for instance, for entry_SYSCALL_64 includes that and
        what comes after the 'syscall_return_via_sysret' label, but the
        objdump -dS prints the jump targets + offsets using the
        syscall_return_via_sysret address, which was confusing 'perf annotate'.
        See the cset comments for further info (Arnaldo Carvalho de Melo)
      
      - Report error from dwfl_attach_state() in the unwind code (Martin Vuille)
      
      - Reference Py_None before returning it in the python extension (Petr Machata)
      Signed-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
      Signed-off-by: NIngo Molnar <mingo@kernel.org>
      a0ac7b3c
    • L
      Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace · e43d40b3
      Linus Torvalds 提交于
      Pull mqueuefs revert from Eric Biederman:
       "This fixes a regression that came in the merge window for v4.16.
      
        The problem is that the permissions for mounting and using the
        mqueuefs filesystem are broken. The necessary permission check is
        missing letting people who should not be able to mount mqueuefs mount
        mqueuefs. The field sb->s_user_ns is set incorrectly not allowing the
        mounter of mqueuefs to remount and otherwise have proper control over
        the filesystem.
      
        Al Viro and I see the path to the necessary fixes differently and I am
        not even certain at this point he actually sees all of the necessary
        fixes. Given a couple weeks we can probably work something out but I
        don't see the review being resolved in time for the final v4.16. I
        don't want v4.16 shipping with a nasty regression. So unfortunately I
        am sending a revert"
      
      * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace:
        Revert "mqueue: switch to on-demand creation of internal mount"
      e43d40b3
    • E
      Revert "mqueue: switch to on-demand creation of internal mount" · cfb2f6f6
      Eric W. Biederman 提交于
      This reverts commit 36735a6a.
      
      Aleksa Sarai <asarai@suse.de> writes:
      > [REGRESSION v4.16-rc6] [PATCH] mqueue: forbid unprivileged user access to internal mount
      >
      > Felix reported weird behaviour on 4.16.0-rc6 with regards to mqueue[1],
      > which was introduced by 36735a6a ("mqueue: switch to on-demand
      > creation of internal mount").
      >
      > Basically, the reproducer boils down to being able to mount mqueue if
      > you create a new user namespace, even if you don't unshare the IPC
      > namespace.
      >
      > Previously this was not possible, and you would get an -EPERM. The mount
      > is the *host* mqueue mount, which is being cached and just returned from
      > mqueue_mount(). To be honest, I'm not sure if this is safe or not (or if
      > it was intentional -- since I'm not familiar with mqueue).
      >
      > To me it looks like there is a missing permission check. I've included a
      > patch below that I've compile-tested, and should block the above case.
      > Can someone please tell me if I'm missing something? Is this actually
      > safe?
      >
      > [1]: https://github.com/docker/docker/issues/36674
      
      The issue is a lot deeper than a missing permission check.  sb->s_user_ns
      was is improperly set as well.  So in addition to the filesystem being
      mounted when it should not be mounted, so things are not allow that should
      be.
      
      We are practically to the release of 4.16 and there is no agreement between
      Al Viro and myself on what the code should looks like to fix things properly.
      So revert the code to what it was before so that we can take our time
      and discuss this properly.
      
      Fixes: 36735a6a ("mqueue: switch to on-demand creation of internal mount")
      Reported-by: NFelix Abecassis <fabecassis@nvidia.com>
      Reported-by: NAleksa Sarai <asarai@suse.de>
      Signed-off-by: N"Eric W. Biederman" <ebiederm@xmission.com>
      cfb2f6f6
    • L
      Merge tag 'pinctrl-v4.16-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl · bcfc1f45
      Linus Torvalds 提交于
      Pull pin control fixes from Linus Walleij:
       "Two fixes for pin control for v4.16:
      
         - Renesas SH-PFC: remove a duplicate clkout pin which was causing
           crashes
      
         - fix Samsung out of bounds exceptions"
      
      * tag 'pinctrl-v4.16-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl:
        pinctrl: samsung: Validate alias coming from DT
        pinctrl: sh-pfc: r8a7795: remove duplicate of CLKOUT pin in pinmux_pins[]
      bcfc1f45
  9. 24 3月, 2018 7 次提交
    • I
      Merge branch 'perf/urgent' into perf/core, to pick up fixes · 7054e4e0
      Ingo Molnar 提交于
      With the cherry-picked perf/urgent commit merged separately we can now
      merge all the fixes without conflicts.
      Signed-off-by: NIngo Molnar <mingo@kernel.org>
      7054e4e0
    • I
      Merge branch 'perf/urgent' into perf/core, to resolve conflicts · 5701dd1e
      Ingo Molnar 提交于
      Pick up a cherry-picked commit.
      Signed-off-by: NIngo Molnar <mingo@kernel.org>
      5701dd1e
    • L
      Merge tag 'trace-v4.16-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace · 99fec39e
      Linus Torvalds 提交于
      Pull kprobe fixes from Steven Rostedt:
       "The documentation for kprobe events says that symbol offets can take
        both a + and - sign to get to befor and after the symbol address.
      
        But in actuality, the code does not support the minus. This fixes that
        issue, and adds a few more selftests to kprobe events"
      
      * tag 'trace-v4.16-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
        selftests: ftrace: Add a testcase for probepoint
        selftests: ftrace: Add a testcase for string type with kprobe_event
        selftests: ftrace: Add probe event argument syntax testcase
        tracing: probeevent: Fix to support minus offset from symbol
      99fec39e
    • A
      x86/entry/64: Don't use IST entry for #BP stack · d8ba61ba
      Andy Lutomirski 提交于
      There's nothing IST-worthy about #BP/int3.  We don't allow kprobes
      in the small handful of places in the kernel that run at CPL0 with
      an invalid stack, and 32-bit kernels have used normal interrupt
      gates for #BP forever.
      
      Furthermore, we don't allow kprobes in places that have usergs while
      in kernel mode, so "paranoid" is also unnecessary.
      Signed-off-by: NAndy Lutomirski <luto@kernel.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
      Cc: stable@vger.kernel.org
      d8ba61ba
    • A
      perf annotate: Use absolute addresses to calculate jump target offsets · 980b68ec
      Arnaldo Carvalho de Melo 提交于
      These types of jumps were confusing the annotate browser:
      
      entry_SYSCALL_64  /lib/modules/4.16.0-rc5-00086-gdf09348f/build/vmlinux
      
      entry_SYSCALL_64  /lib/modules/4.16.0-rc5-00086-gdf09348f/build/vmlinux
        Percent│ffffffff81a00020:   swapgs
        <SNIP>
               │ffffffff81a00128: ↓ jae    ffffffff81a00139 <syscall_return_via_sysret+0x53>
        <SNIP>
               │ffffffff81a00155: → jmpq   *0x825d2d(%rip)   # ffffffff82225e88 <pv_cpu_ops+0xe8>
      
      I.e. the syscall_return_via_sysret function is actually "inside" the
      entry_SYSCALL_64 function, and the offsets in jumps like these (+0x53)
      are relative to syscall_return_via_sysret, not to syscall_return_via_sysret.
      
      Or this may be some artifact in how the assembler marks the start and
      end of a function and how this ends up in the ELF symtab for vmlinux,
      i.e. syscall_return_via_sysret() isn't "inside" entry_SYSCALL_64, but
      just right after it.
      
      From readelf -sw vmlinux:
      
       80267: ffffffff81a00020   315 NOTYPE  GLOBAL DEFAULT    1 entry_SYSCALL_64
         316: ffffffff81a000e6     0 NOTYPE  LOCAL  DEFAULT    1 syscall_return_via_sysret
      
       0xffffffff81a00020 + 315 > 0xffffffff81a000e6
      
      So instead of looking for offsets after that last '+' sign, calculate
      offsets for jump target addresses that are inside the function being
      disassembled from the absolute address, 0xffffffff81a00139 in this case,
      subtracting from it the objdump address for the start of the function
      being disassembled, entry_SYSCALL_64() in this case.
      
      So, before this patch:
      
      entry_SYSCALL_64  /lib/modules/4.16.0-rc5-00086-gdf09348f/build/vmlinux
      Percent│       pop    %r10
             │       pop    %r9
             │       pop    %r8
             │       pop    %rax
             │       pop    %rsi
             │       pop    %rdx
             │       pop    %rsi
             │       mov    %rsp,%rdi
             │       mov    %gs:0x5004,%rsp
             │       pushq  0x28(%rdi)
             │       pushq  (%rdi)
             │       push   %rax
             │     ↑ jmp    6c
             │       mov    %cr3,%rdi
             │     ↑ jmp    62
             │       mov    %rdi,%rax
             │       and    $0x7ff,%rdi
             │       bt     %rdi,%gs:0x2219a
             │     ↑ jae    53
             │       btr    %rdi,%gs:0x2219a
             │       mov    %rax,%rdi
             │     ↑ jmp    5b
      
      After:
      
      entry_SYSCALL_64  /lib/modules/4.16.0-rc5-00086-gdf09348f/build/vmlinux
        0.65 │     → jne    swapgs_restore_regs_and_return_to_usermode
             │       pop    %r10
             │       pop    %r9
             │       pop    %r8
             │       pop    %rax
             │       pop    %rsi
             │       pop    %rdx
             │       pop    %rsi
             │       mov    %rsp,%rdi
             │       mov    %gs:0x5004,%rsp
             │       pushq  0x28(%rdi)
             │       pushq  (%rdi)
             │       push   %rax
             │     ↓ jmp    132
             │       mov    %cr3,%rdi
             │    ┌──jmp    128
             │    │  mov    %rdi,%rax
             │    │  and    $0x7ff,%rdi
             │    │  bt     %rdi,%gs:0x2219a
             │    │↓ jae    119
             │    │  btr    %rdi,%gs:0x2219a
             │    │  mov    %rax,%rdi
             │    │↓ jmp    121
             │119:│  mov    %rax,%rdi
             │    │  bts    $0x3f,%rdi
             │121:│  or     $0x800,%rdi
             │128:└─→or     $0x1000,%rdi
             │       mov    %rdi,%cr3
             │132:   pop    %rax
             │       pop    %rdi
             │       pop    %rsp
             │     → jmpq   *0x825d2d(%rip)        # ffffffff82225e88 <pv_cpu_ops+0xe8>
      
      With those at least navigating to the right destination, an improvement
      for these cases seems to be to be to somehow mark those inner functions,
      which in this case could be:
      
      entry_SYSCALL_64  /lib/modules/4.16.0-rc5-00086-gdf09348f/build/vmlinux
             │syscall_return_via_sysret:
             │       pop    %r15
             │       pop    %r14
             │       pop    %r13
             │       pop    %r12
             │       pop    %rbp
             │       pop    %rbx
             │       pop    %rsi
             │       pop    %r10
             │       pop    %r9
             │       pop    %r8
             │       pop    %rax
             │       pop    %rsi
             │       pop    %rdx
             │       pop    %rsi
             │       mov    %rsp,%rdi
             │       mov    %gs:0x5004,%rsp
             │       pushq  0x28(%rdi)
             │       pushq  (%rdi)
             │       push   %rax
             │     ↓ jmp    132
             │       mov    %cr3,%rdi
             │    ┌──jmp    128
             │    │  mov    %rdi,%rax
             │    │  and    $0x7ff,%rdi
             │    │  bt     %rdi,%gs:0x2219a
             │    │↓ jae    119
             │    │  btr    %rdi,%gs:0x2219a
             │    │  mov    %rax,%rdi
             │    │↓ jmp    121
             │119:│  mov    %rax,%rdi
             │    │  bts    $0x3f,%rdi
             │121:│  or     $0x800,%rdi
             │128:└─→or     $0x1000,%rdi
             │       mov    %rdi,%cr3
             │132:   pop    %rax
             │       pop    %rdi
             │       pop    %rsp
             │     → jmpq   *0x825d2d(%rip)        # ffffffff82225e88 <pv_cpu_ops+0xe8>
      
      This all gets much better viewed if one uses 'perf report --ignore-vmlinux'
      forcing the usage of /proc/kcore + /proc/kallsyms, when the above
      actually gets down to:
      
        # perf report --ignore-vmlinux
        ## do '/64', will show the function names containing '64',
        ## navigate to /entry_SYSCALL_64_after_hwframe.annotation,
        ## press 'A' to annotate, then 'P' to print that annotation
        ## to a file
        ## From another xterm (or see on screen, this 'P' thing is for
        ## getting rid of those right side scroll bars/spaces):
        # cat /entry_SYSCALL_64_after_hwframe.annotation
        entry_SYSCALL_64_after_hwframe() /proc/kcore
        Event: cycles:ppp
      
        Percent
                    Disassembly of section load0:
      
                    ffffffff9aa00044 <load0>:
         11.97        push   %rax
          4.85        push   %rdi
                      push   %rsi
          2.59        push   %rdx
          2.27        push   %rcx
          0.32        pushq  $0xffffffffffffffda
          1.29        push   %r8
                      xor    %r8d,%r8d
          1.62        push   %r9
          0.65        xor    %r9d,%r9d
          1.62        push   %r10
                      xor    %r10d,%r10d
          5.50        push   %r11
                      xor    %r11d,%r11d
          3.56        push   %rbx
                      xor    %ebx,%ebx
          4.21        push   %rbp
                      xor    %ebp,%ebp
          2.59        push   %r12
          0.97        xor    %r12d,%r12d
          3.24        push   %r13
                      xor    %r13d,%r13d
          2.27        push   %r14
                      xor    %r14d,%r14d
          4.21        push   %r15
                      xor    %r15d,%r15d
          0.97        mov    %rsp,%rdi
          5.50      → callq  do_syscall_64
         14.56        mov    0x58(%rsp),%rcx
          7.44        mov    0x80(%rsp),%r11
          0.32        cmp    %rcx,%r11
                    → jne    swapgs_restore_regs_and_return_to_usermode
          0.32        shl    $0x10,%rcx
          0.32        sar    $0x10,%rcx
          3.24        cmp    %rcx,%r11
                    → jne    swapgs_restore_regs_and_return_to_usermode
          2.27        cmpq   $0x33,0x88(%rsp)
          1.29      → jne    swapgs_restore_regs_and_return_to_usermode
                      mov    0x30(%rsp),%r11
          8.74        cmp    %r11,0x90(%rsp)
                    → jne    swapgs_restore_regs_and_return_to_usermode
          0.32        test   $0x10100,%r11
                    → jne    swapgs_restore_regs_and_return_to_usermode
          0.32        cmpq   $0x2b,0xa0(%rsp)
          0.65      → jne    swapgs_restore_regs_and_return_to_usermode
      
      I.e. using kallsyms makes the function start/end be done differently
      than using what is in the vmlinux ELF symtab and actually the hits
      goes to entry_SYSCALL_64_after_hwframe, which is a GLOBAL() after the
      start of entry_SYSCALL_64:
      
        ENTRY(entry_SYSCALL_64)
                UNWIND_HINT_EMPTY
        <SNIP>
                pushq   $__USER_CS                      /* pt_regs->cs */
                pushq   %rcx                            /* pt_regs->ip */
        GLOBAL(entry_SYSCALL_64_after_hwframe)
                pushq   %rax                            /* pt_regs->orig_ax */
      
                PUSH_AND_CLEAR_REGS rax=$-ENOSYS
      
      And it goes and ends at:
      
                cmpq    $__USER_DS, SS(%rsp)            /* SS must match SYSRET */
                jne     swapgs_restore_regs_and_return_to_usermode
      
                /*
                 * We win! This label is here just for ease of understanding
                 * perf profiles. Nothing jumps here.
                 */
        syscall_return_via_sysret:
                /* rcx and r11 are already restored (see code above) */
                UNWIND_HINT_EMPTY
                POP_REGS pop_rdi=0 skip_r11rcx=1
      
      So perhaps some people should really just play with '--ignore-vmlinux'
      to force /proc/kcore + kallsyms.
      
      One idea is to do both, i.e. have a vmlinux annotation and a
      kcore+kallsyms one, when possible, and even show the patched location,
      etc.
      Reported-by: NLinus Torvalds <torvalds@linux-foundation.org>
      Cc: Adrian Hunter <adrian.hunter@intel.com>
      Cc: Andi Kleen <ak@linux.intel.com>
      Cc: David Ahern <dsahern@gmail.com>
      Cc: Jin Yao <yao.jin@linux.intel.com>
      Cc: Jiri Olsa <jolsa@kernel.org>
      Cc: Namhyung Kim <namhyung@kernel.org>
      Cc: Wang Nan <wangnan0@huawei.com>
      Link: https://lkml.kernel.org/n/tip-r11knxv8voesav31xokjiuo6@git.kernel.orgSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
      980b68ec
    • A
      perf annotate: Defer searching for comma in raw line till it is needed · c448234c
      Arnaldo Carvalho de Melo 提交于
      That strchr() in jump__scnprintf() needs to be nuked somehow, as it,
      IIRC is already done in jump__parse() and if needed at scnprintf() time,
      should be stashed in the struct filled in parse() time.
      
      For now jus defer it to just before where it is used.
      
      Cc: Adrian Hunter <adrian.hunter@intel.com>
      Cc: Andi Kleen <ak@linux.intel.com>
      Cc: David Ahern <dsahern@gmail.com>
      Cc: Jin Yao <yao.jin@linux.intel.com>
      Cc: Jiri Olsa <jolsa@kernel.org>
      Cc: Namhyung Kim <namhyung@kernel.org>
      Cc: Wang Nan <wangnan0@huawei.com>
      Link: https://lkml.kernel.org/n/tip-j0t5hagnphoz9xw07bh3ha3g@git.kernel.orgSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
      c448234c
    • A
      perf annotate: Support jumping from one function to another · e4cc91b8
      Arnaldo Carvalho de Melo 提交于
      For instance:
      
        entry_SYSCALL_64  /lib/modules/4.16.0-rc5-00086-gdf09348f/build/vmlinux
          5.50 │     → callq  do_syscall_64
         14.56 │       mov    0x58(%rsp),%rcx
          7.44 │       mov    0x80(%rsp),%r11
          0.32 │       cmp    %rcx,%r11
               │     → jne    swapgs_restore_regs_and_return_to_usermode
          0.32 │       shl    $0x10,%rcx
          0.32 │       sar    $0x10,%rcx
          3.24 │       cmp    %rcx,%r11
               │     → jne    swapgs_restore_regs_and_return_to_usermode
          2.27 │       cmpq   $0x33,0x88(%rsp)
          1.29 │     → jne    swapgs_restore_regs_and_return_to_usermode
               │       mov    0x30(%rsp),%r11
          8.74 │       cmp    %r11,0x90(%rsp)
               │     → jne    swapgs_restore_regs_and_return_to_usermode
          0.32 │       test   $0x10100,%r11
               │     → jne    swapgs_restore_regs_and_return_to_usermode
          0.32 │       cmpq   $0x2b,0xa0(%rsp)
          0.65 │     → jne    swapgs_restore_regs_and_return_to_usermode
      
      It'll behave just like a "call" instruction, i.e. press enter or right
      arrow over one such line and the browser will navigate to the annotated
      disassembly of that function, which when exited, via left arrow or esc,
      will come back to the calling function.
      
      Now to support jump to an offset on a different function...
      Reported-by: NLinus Torvalds <torvalds@linux-foundation.org>
      Cc: Adrian Hunter <adrian.hunter@intel.com>
      Cc: Andi Kleen <ak@linux.intel.com>
      Cc: David Ahern <dsahern@gmail.com>
      Cc: Jin Yao <yao.jin@linux.intel.com>
      Cc: Jiri Olsa <jolsa@kernel.org>
      Cc: Namhyung Kim <namhyung@kernel.org>
      Cc: Wang Nan <wangnan0@huawei.com>
      Link: https://lkml.kernel.org/n/tip-78o508mqvr8inhj63ddtw7mo@git.kernel.orgSigned-off-by: NArnaldo Carvalho de Melo <acme@redhat.com>
      e4cc91b8