1. 15 6月, 2017 1 次提交
  2. 01 6月, 2017 1 次提交
  3. 26 5月, 2017 1 次提交
  4. 03 5月, 2017 3 次提交
    • G
      test_bpf: Use ULL suffix for 64-bit constants · 86f8e247
      Geert Uytterhoeven 提交于
      On 32-bit:
      
          lib/test_bpf.c:4772: warning: integer constant is too large for ‘unsigned long’ type
          lib/test_bpf.c:4772: warning: integer constant is too large for ‘unsigned long’ type
          lib/test_bpf.c:4773: warning: integer constant is too large for ‘unsigned long’ type
          lib/test_bpf.c:4773: warning: integer constant is too large for ‘unsigned long’ type
          lib/test_bpf.c:4787: warning: integer constant is too large for ‘unsigned long’ type
          lib/test_bpf.c:4787: warning: integer constant is too large for ‘unsigned long’ type
          lib/test_bpf.c:4801: warning: integer constant is too large for ‘unsigned long’ type
          lib/test_bpf.c:4801: warning: integer constant is too large for ‘unsigned long’ type
          lib/test_bpf.c:4802: warning: integer constant is too large for ‘unsigned long’ type
          lib/test_bpf.c:4802: warning: integer constant is too large for ‘unsigned long’ type
      
      On 32-bit systems, "long" is only 32-bit.
      Replace the "UL" suffix by "ULL" to fix this.
      
      Fixes: 85f68fe8 ("bpf, arm64: implement jiting of BPF_XADD")
      Signed-off-by: NGeert Uytterhoeven <geert@linux-m68k.org>
      Acked-by: NDaniel Borkmann <daniel@iogearbox.net>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      86f8e247
    • D
      bpf, arm64: fix jit branch offset related to ldimm64 · ddc665a4
      Daniel Borkmann 提交于
      When the instruction right before the branch destination is
      a 64 bit load immediate, we currently calculate the wrong
      jump offset in the ctx->offset[] array as we only account
      one instruction slot for the 64 bit load immediate although
      it uses two BPF instructions. Fix it up by setting the offset
      into the right slot after we incremented the index.
      
      Before (ldimm64 test 1):
      
        [...]
        00000020:  52800007  mov w7, #0x0 // #0
        00000024:  d2800060  mov x0, #0x3 // #3
        00000028:  d2800041  mov x1, #0x2 // #2
        0000002c:  eb01001f  cmp x0, x1
        00000030:  54ffff82  b.cs 0x00000020
        00000034:  d29fffe7  mov x7, #0xffff // #65535
        00000038:  f2bfffe7  movk x7, #0xffff, lsl #16
        0000003c:  f2dfffe7  movk x7, #0xffff, lsl #32
        00000040:  f2ffffe7  movk x7, #0xffff, lsl #48
        00000044:  d29dddc7  mov x7, #0xeeee // #61166
        00000048:  f2bdddc7  movk x7, #0xeeee, lsl #16
        0000004c:  f2ddddc7  movk x7, #0xeeee, lsl #32
        00000050:  f2fdddc7  movk x7, #0xeeee, lsl #48
        [...]
      
      After (ldimm64 test 1):
      
        [...]
        00000020:  52800007  mov w7, #0x0 // #0
        00000024:  d2800060  mov x0, #0x3 // #3
        00000028:  d2800041  mov x1, #0x2 // #2
        0000002c:  eb01001f  cmp x0, x1
        00000030:  540000a2  b.cs 0x00000044
        00000034:  d29fffe7  mov x7, #0xffff // #65535
        00000038:  f2bfffe7  movk x7, #0xffff, lsl #16
        0000003c:  f2dfffe7  movk x7, #0xffff, lsl #32
        00000040:  f2ffffe7  movk x7, #0xffff, lsl #48
        00000044:  d29dddc7  mov x7, #0xeeee // #61166
        00000048:  f2bdddc7  movk x7, #0xeeee, lsl #16
        0000004c:  f2ddddc7  movk x7, #0xeeee, lsl #32
        00000050:  f2fdddc7  movk x7, #0xeeee, lsl #48
        [...]
      
      Also, add a couple of test cases to make sure JITs pass
      this test. Tested on Cavium ThunderX ARMv8. The added
      test cases all pass after the fix.
      
      Fixes: 8eee539d ("arm64: bpf: fix out-of-bounds read in bpf2a64_offset()")
      Reported-by: NDavid S. Miller <davem@davemloft.net>
      Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
      Acked-by: NAlexei Starovoitov <ast@kernel.org>
      Cc: Xi Wang <xi.wang@gmail.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      ddc665a4
    • D
      bpf, arm64: implement jiting of BPF_XADD · 85f68fe8
      Daniel Borkmann 提交于
      This work adds BPF_XADD for BPF_W/BPF_DW to the arm64 JIT and therefore
      completes JITing of all BPF instructions, meaning we can thus also remove
      the 'notyet' label and do not need to fall back to the interpreter when
      BPF_XADD is used in a program!
      
      This now also brings arm64 JIT in line with x86_64, s390x, ppc64, sparc64,
      where all current eBPF features are supported.
      
      BPF_W example from test_bpf:
      
        .u.insns_int = {
          BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
          BPF_ST_MEM(BPF_W, R10, -40, 0x10),
          BPF_STX_XADD(BPF_W, R10, R0, -40),
          BPF_LDX_MEM(BPF_W, R0, R10, -40),
          BPF_EXIT_INSN(),
        },
      
        [...]
        00000020:  52800247  mov w7, #0x12 // #18
        00000024:  928004eb  mov x11, #0xffffffffffffffd8 // #-40
        00000028:  d280020a  mov x10, #0x10 // #16
        0000002c:  b82b6b2a  str w10, [x25,x11]
        // start of xadd mapping:
        00000030:  928004ea  mov x10, #0xffffffffffffffd8 // #-40
        00000034:  8b19014a  add x10, x10, x25
        00000038:  f9800151  prfm pstl1strm, [x10]
        0000003c:  885f7d4b  ldxr w11, [x10]
        00000040:  0b07016b  add w11, w11, w7
        00000044:  880b7d4b  stxr w11, w11, [x10]
        00000048:  35ffffab  cbnz w11, 0x0000003c
        // end of xadd mapping:
        [...]
      
      BPF_DW example from test_bpf:
      
        .u.insns_int = {
          BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
          BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
          BPF_STX_XADD(BPF_DW, R10, R0, -40),
          BPF_LDX_MEM(BPF_DW, R0, R10, -40),
          BPF_EXIT_INSN(),
        },
      
        [...]
        00000020:  52800247  mov w7,  #0x12 // #18
        00000024:  928004eb  mov x11, #0xffffffffffffffd8 // #-40
        00000028:  d280020a  mov x10, #0x10 // #16
        0000002c:  f82b6b2a  str x10, [x25,x11]
        // start of xadd mapping:
        00000030:  928004ea  mov x10, #0xffffffffffffffd8 // #-40
        00000034:  8b19014a  add x10, x10, x25
        00000038:  f9800151  prfm pstl1strm, [x10]
        0000003c:  c85f7d4b  ldxr x11, [x10]
        00000040:  8b07016b  add x11, x11, x7
        00000044:  c80b7d4b  stxr w11, x11, [x10]
        00000048:  35ffffab  cbnz w11, 0x0000003c
        // end of xadd mapping:
        [...]
      
      Tested on Cavium ThunderX ARMv8, test suite results after the patch:
      
        No JIT:   [ 3751.855362] test_bpf: Summary: 311 PASSED, 0 FAILED, [0/303 JIT'ed]
        With JIT: [ 3573.759527] test_bpf: Summary: 311 PASSED, 0 FAILED, [303/303 JIT'ed]
      Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
      Acked-by: NAlexei Starovoitov <ast@kernel.org>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      85f68fe8
  5. 21 10月, 2016 1 次提交
    • D
      bpf, test: fix ld_abs + vlan push/pop stress test · 0d906b1e
      Daniel Borkmann 提交于
      After commit 636c2628 ("net: skbuff: Remove errornous length
      validation in skb_vlan_pop()") mentioned test case stopped working,
      throwing a -12 (ENOMEM) return code. The issue however is not due to
      636c2628, but rather due to a buggy test case that got uncovered
      from the change in behaviour in 636c2628.
      
      The data_size of that test case for the skb was set to 1. In the
      bpf_fill_ld_abs_vlan_push_pop() handler bpf insns are generated that
      loop with: reading skb data, pushing 68 tags, reading skb data,
      popping 68 tags, reading skb data, etc, in order to force a skb
      expansion and thus trigger that JITs recache skb->data. Problem is
      that initial data_size is too small.
      
      While before 636c2628, the test silently bailed out due to the
      skb->len < VLAN_ETH_HLEN check with returning 0, and now throwing an
      error from failing skb_ensure_writable(). Set at least minimum of
      ETH_HLEN as an initial length so that on first push of data, equivalent
      pop will succeed.
      
      Fixes: 4d9c5c53 ("test_bpf: add bpf_skb_vlan_push/pop() tests")
      Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
      Acked-by: NAlexei Starovoitov <ast@kernel.org>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      0d906b1e
  6. 16 9月, 2016 1 次提交
  7. 17 5月, 2016 1 次提交
  8. 07 4月, 2016 4 次提交
  9. 19 12月, 2015 1 次提交
    • D
      bpf, test: add couple of test cases · 9dd2af83
      Daniel Borkmann 提交于
      Add couple of test cases for interpreter but also JITs, f.e. to test that
      when imm32 moves are being done, upper 32bits of the regs are being zero
      extended.
      
      Without JIT:
      
        [...]
        [ 1114.129301] test_bpf: #43 MOV REG64 jited:0 128 PASS
        [ 1114.130626] test_bpf: #44 MOV REG32 jited:0 139 PASS
        [ 1114.132055] test_bpf: #45 LD IMM64 jited:0 124 PASS
        [...]
      
      With JIT (generated code can as usual be nicely verified with the help of
      bpf_jit_disasm tool):
      
        [...]
        [ 1062.726782] test_bpf: #43 MOV REG64 jited:1 6 PASS
        [ 1062.726890] test_bpf: #44 MOV REG32 jited:1 6 PASS
        [ 1062.726993] test_bpf: #45 LD IMM64 jited:1 6 PASS
        [...]
      Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
      Acked-by: NAlexei Starovoitov <ast@kernel.org>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      9dd2af83
  10. 05 11月, 2015 1 次提交
  11. 07 8月, 2015 6 次提交
  12. 31 7月, 2015 1 次提交
  13. 21 7月, 2015 1 次提交
    • A
      test_bpf: add bpf_skb_vlan_push/pop() tests · 4d9c5c53
      Alexei Starovoitov 提交于
      improve accuracy of timing in test_bpf and add two stress tests:
      - {skb->data[0], get_smp_processor_id} repeated 2k times
      - {skb->data[0], vlan_push} x 68 followed by {skb->data[0], vlan_pop} x 68
      
      1st test is useful to test performance of JIT implementation of BPF_LD_ABS
      together with BPF_CALL instructions.
      2nd test is stressing skb_vlan_push/pop logic together with skb->data access
      via BPF_LD_ABS insn which checks that re-caching of skb->data is done correctly.
      
      In order to call bpf_skb_vlan_push() from test_bpf.ko have to add
      three export_symbol_gpl.
      Signed-off-by: NAlexei Starovoitov <ast@plumgrid.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      4d9c5c53
  14. 09 7月, 2015 1 次提交
  15. 28 5月, 2015 1 次提交
    • D
      test_bpf: add similarly conflicting jump test case only for classic · bde28bc6
      Daniel Borkmann 提交于
      While 3b529602 ("test_bpf: add more eBPF jump torture cases")
      added the int3 bug test case only for eBPF, which needs exactly 11
      passes to converge, here's a version for classic BPF with 11 passes,
      and one that would need 70 passes on x86_64 to actually converge for
      being successfully JITed. Effectively, all jumps are being optimized
      out resulting in a JIT image of just 89 bytes (from originally max
      BPF insns), only returning K.
      
      Might be useful as a receipe for folks wanting to craft a test case
      when backporting the fix in commit 3f7352bf ("x86: bpf_jit: fix
      compilation of large bpf programs") while not having eBPF. The 2nd
      one is delegated to the interpreter as the last pass still results
      in shrinking, in other words, this one won't be JITed on x86_64.
      Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
      Acked-by: NAlexei Starovoitov <ast@plumgrid.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      bde28bc6
  16. 25 5月, 2015 1 次提交
  17. 23 5月, 2015 1 次提交
  18. 15 5月, 2015 2 次提交
    • M
      test_bpf: fix sparse warnings · 56cbaa45
      Michael Holzheu 提交于
      Fix several sparse warnings like:
      lib/test_bpf.c:1824:25: sparse: constant 4294967295 is so big it is long
      lib/test_bpf.c:1878:25: sparse: constant 0x0000ffffffff0000 is so big it is long
      
      Fixes: cffc642d ("test_bpf: add 173 new testcases for eBPF")
      Reported-by: NFengguang Wu <fengguang.wu@intel.com>
      Signed-off-by: NMichael Holzheu <holzheu@linux.vnet.ibm.com>
      Signed-off-by: NAlexei Starovoitov <ast@plumgrid.com>
      Acked-by: NDaniel Borkmann <daniel@iogearbox.net>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      56cbaa45
    • D
      test_bpf: add tests related to BPF_MAXINSNS · a4afd37b
      Daniel Borkmann 提交于
      Couple of torture test cases related to the bug fixed in 0b59d880
      ("ARM: net: delegate filter to kernel interpreter when imm_offset()
      return value can't fit into 12bits.").
      
      I've added a helper to allocate and fill the insn space. Output on
      x86_64 from my laptop:
      
      test_bpf: #233 BPF_MAXINSNS: Maximum possible literals jited:0 7 PASS
      test_bpf: #234 BPF_MAXINSNS: Single literal jited:0 8 PASS
      test_bpf: #235 BPF_MAXINSNS: Run/add until end jited:0 11553 PASS
      test_bpf: #236 BPF_MAXINSNS: Too many instructions PASS
      test_bpf: #237 BPF_MAXINSNS: Very long jump jited:0 9 PASS
      test_bpf: #238 BPF_MAXINSNS: Ctx heavy transformations jited:0 20329 20398 PASS
      test_bpf: #239 BPF_MAXINSNS: Call heavy transformations jited:0 32178 32475 PASS
      test_bpf: #240 BPF_MAXINSNS: Jump heavy test jited:0 10518 PASS
      
      test_bpf: #233 BPF_MAXINSNS: Maximum possible literals jited:1 4 PASS
      test_bpf: #234 BPF_MAXINSNS: Single literal jited:1 4 PASS
      test_bpf: #235 BPF_MAXINSNS: Run/add until end jited:1 1625 PASS
      test_bpf: #236 BPF_MAXINSNS: Too many instructions PASS
      test_bpf: #237 BPF_MAXINSNS: Very long jump jited:1 8 PASS
      test_bpf: #238 BPF_MAXINSNS: Ctx heavy transformations jited:1 3301 3174 PASS
      test_bpf: #239 BPF_MAXINSNS: Call heavy transformations jited:1 24107 23491 PASS
      test_bpf: #240 BPF_MAXINSNS: Jump heavy test jited:1 8651 PASS
      Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
      Cc: Alexei Starovoitov <ast@plumgrid.com>
      Cc: Nicolas Schichan <nschichan@freebox.fr>
      Acked-by: NAlexei Starovoitov <ast@plumgrid.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      a4afd37b
  19. 13 5月, 2015 1 次提交
  20. 11 5月, 2015 1 次提交
  21. 01 5月, 2015 1 次提交
    • D
      test_bpf: indicate whether bpf prog got jited in test suite · 327941f8
      Daniel Borkmann 提交于
      I think this is useful to verify whether a filter could be JITed or
      not in case of bpf_prog_enable >= 1, which otherwise the test suite
      doesn't tell besides taking a good peek at the performance numbers.
      
      Nicolas Schichan reported a bug in the ARM JIT compiler that rejected
      and waved the filter to the interpreter although it shouldn't have.
      Nevertheless, the test passes as expected, but such information is
      not visible.
      
      It's i.e. useful for the remaining classic JITs, but also for
      implementing remaining opcodes that are not yet present in eBPF JITs
      (e.g. ARM64 waves some of them to the interpreter). This minor patch
      allows to grep through dmesg to find those accordingly, but also
      provides a total summary, i.e.: [<X>/53 JIT'ed]
      
        # echo 1 > /proc/sys/net/core/bpf_jit_enable
        # insmod lib/test_bpf.ko
        # dmesg | grep "jited:0"
      
      dmesg example on the ARM issue with JIT rejection:
      
      [...]
      [   67.925387] test_bpf: #2 ADD_SUB_MUL_K jited:1 24 PASS
      [   67.930889] test_bpf: #3 DIV_MOD_KX jited:0 794 PASS
      [   67.943940] test_bpf: #4 AND_OR_LSH_K jited:1 20 20 PASS
      [...]
      Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
      Cc: Nicolas Schichan <nschichan@freebox.fr>
      Cc: Alexei Starovoitov <ast@plumgrid.com>
      Acked-by: NAlexei Starovoitov <ast@plumgrid.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      327941f8
  22. 09 12月, 2014 1 次提交
  23. 31 10月, 2014 1 次提交
  24. 23 9月, 2014 1 次提交
  25. 10 9月, 2014 1 次提交
    • A
      net: filter: add "load 64-bit immediate" eBPF instruction · 02ab695b
      Alexei Starovoitov 提交于
      add BPF_LD_IMM64 instruction to load 64-bit immediate value into a register.
      All previous instructions were 8-byte. This is first 16-byte instruction.
      Two consecutive 'struct bpf_insn' blocks are interpreted as single instruction:
      insn[0].code = BPF_LD | BPF_DW | BPF_IMM
      insn[0].dst_reg = destination register
      insn[0].imm = lower 32-bit
      insn[1].code = 0
      insn[1].imm = upper 32-bit
      All unused fields must be zero.
      
      Classic BPF has similar instruction: BPF_LD | BPF_W | BPF_IMM
      which loads 32-bit immediate value into a register.
      
      x64 JITs it as single 'movabsq %rax, imm64'
      arm64 may JIT as sequence of four 'movk x0, #imm16, lsl #shift' insn
      
      Note that old eBPF programs are binary compatible with new interpreter.
      
      It helps eBPF programs load 64-bit constant into a register with one
      instruction instead of using two registers and 4 instructions:
      BPF_MOV32_IMM(R1, imm32)
      BPF_ALU64_IMM(BPF_LSH, R1, 32)
      BPF_MOV32_IMM(R2, imm32)
      BPF_ALU64_REG(BPF_OR, R1, R2)
      
      User space generated programs will use this instruction to load constants only.
      
      To tell kernel that user space needs a pointer the _pseudo_ variant of
      this instruction may be added later, which will use extra bits of encoding
      to indicate what type of pointer user space is asking kernel to provide.
      For example 'off' or 'src_reg' fields can be used for such purpose.
      src_reg = 1 could mean that user space is asking kernel to validate and
      load in-kernel map pointer.
      src_reg = 2 could mean that user space needs readonly data section pointer
      src_reg = 3 could mean that user space needs a pointer to per-cpu local data
      All such future pseudo instructions will not be carrying the actual pointer
      as part of the instruction, but rather will be treated as a request to kernel
      to provide one. The kernel will verify the request_for_a_pointer, then
      will drop _pseudo_ marking and will store actual internal pointer inside
      the instruction, so the end result is the interpreter and JITs never
      see pseudo BPF_LD_IMM64 insns and only operate on generic BPF_LD_IMM64 that
      loads 64-bit immediate into a register. User space never operates on direct
      pointers and verifier can easily recognize request_for_pointer vs other
      instructions.
      Signed-off-by: NAlexei Starovoitov <ast@plumgrid.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      02ab695b
  26. 06 9月, 2014 1 次提交
    • D
      net: bpf: make eBPF interpreter images read-only · 60a3b225
      Daniel Borkmann 提交于
      With eBPF getting more extended and exposure to user space is on it's way,
      hardening the memory range the interpreter uses to steer its command flow
      seems appropriate.  This patch moves the to be interpreted bytecode to
      read-only pages.
      
      In case we execute a corrupted BPF interpreter image for some reason e.g.
      caused by an attacker which got past a verifier stage, it would not only
      provide arbitrary read/write memory access but arbitrary function calls
      as well. After setting up the BPF interpreter image, its contents do not
      change until destruction time, thus we can setup the image on immutable
      made pages in order to mitigate modifications to that code. The idea
      is derived from commit 314beb9b ("x86: bpf_jit_comp: secure bpf jit
      against spraying attacks").
      
      This is possible because bpf_prog is not part of sk_filter anymore.
      After setup bpf_prog cannot be altered during its life-time. This prevents
      any modifications to the entire bpf_prog structure (incl. function/JIT
      image pointer).
      
      Every eBPF program (including classic BPF that are migrated) have to call
      bpf_prog_select_runtime() to select either interpreter or a JIT image
      as a last setup step, and they all are being freed via bpf_prog_free(),
      including non-JIT. Therefore, we can easily integrate this into the
      eBPF life-time, plus since we directly allocate a bpf_prog, we have no
      performance penalty.
      
      Tested with seccomp and test_bpf testsuite in JIT/non-JIT mode and manual
      inspection of kernel_page_tables.  Brad Spengler proposed the same idea
      via Twitter during development of this patch.
      
      Joint work with Hannes Frederic Sowa.
      Suggested-by: NBrad Spengler <spender@grsecurity.net>
      Signed-off-by: NDaniel Borkmann <dborkman@redhat.com>
      Signed-off-by: NHannes Frederic Sowa <hannes@stressinduktion.org>
      Cc: Alexei Starovoitov <ast@plumgrid.com>
      Cc: Kees Cook <keescook@chromium.org>
      Acked-by: NAlexei Starovoitov <ast@plumgrid.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      60a3b225
  27. 26 8月, 2014 1 次提交
  28. 03 8月, 2014 1 次提交
    • A
      net: filter: split 'struct sk_filter' into socket and bpf parts · 7ae457c1
      Alexei Starovoitov 提交于
      clean up names related to socket filtering and bpf in the following way:
      - everything that deals with sockets keeps 'sk_*' prefix
      - everything that is pure BPF is changed to 'bpf_*' prefix
      
      split 'struct sk_filter' into
      struct sk_filter {
      	atomic_t        refcnt;
      	struct rcu_head rcu;
      	struct bpf_prog *prog;
      };
      and
      struct bpf_prog {
              u32                     jited:1,
                                      len:31;
              struct sock_fprog_kern  *orig_prog;
              unsigned int            (*bpf_func)(const struct sk_buff *skb,
                                                  const struct bpf_insn *filter);
              union {
                      struct sock_filter      insns[0];
                      struct bpf_insn         insnsi[0];
                      struct work_struct      work;
              };
      };
      so that 'struct bpf_prog' can be used independent of sockets and cleans up
      'unattached' bpf use cases
      
      split SK_RUN_FILTER macro into:
          SK_RUN_FILTER to be used with 'struct sk_filter *' and
          BPF_PROG_RUN to be used with 'struct bpf_prog *'
      
      __sk_filter_release(struct sk_filter *) gains
      __bpf_prog_release(struct bpf_prog *) helper function
      
      also perform related renames for the functions that work
      with 'struct bpf_prog *', since they're on the same lines:
      
      sk_filter_size -> bpf_prog_size
      sk_filter_select_runtime -> bpf_prog_select_runtime
      sk_filter_free -> bpf_prog_free
      sk_unattached_filter_create -> bpf_prog_create
      sk_unattached_filter_destroy -> bpf_prog_destroy
      sk_store_orig_filter -> bpf_prog_store_orig_filter
      sk_release_orig_filter -> bpf_release_orig_filter
      __sk_migrate_filter -> bpf_migrate_filter
      __sk_prepare_filter -> bpf_prepare_filter
      
      API for attaching classic BPF to a socket stays the same:
      sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *)
      and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program
      which is used by sockets, tun, af_packet
      
      API for 'unattached' BPF programs becomes:
      bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *)
      and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program
      which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf
      Signed-off-by: NAlexei Starovoitov <ast@plumgrid.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      7ae457c1
  29. 25 7月, 2014 1 次提交