提交 10a3b7c1 编写于 作者: D David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2020-08-15

The following pull-request contains BPF updates for your *net* tree.

We've added 23 non-merge commits during the last 4 day(s) which contain
a total of 32 files changed, 421 insertions(+), 141 deletions(-).

The main changes are:

1) Fix sock_ops ctx access splat due to register override, from John Fastabend.

2) Batch of various fixes to libbpf, bpftool, and selftests when testing build
   in 32-bit mode, from Andrii Nakryiko.

3) Fix vmlinux.h generation on ARM by mapping GCC built-in types (__Poly*_t)
   to equivalent ones clang can work with, from Jean-Philippe Brucker.

4) Fix build_id lookup in bpf_get_stackid() helper by walking all NOTE ELF
   sections instead of just first, from Jiri Olsa.

5) Avoid use of __builtin_offsetof() in libbpf for CO-RE, from Yonghong Song.

6) Fix segfault in test_mmap due to inconsistent length params, from Jianlin Lv.

7) Don't override errno in libbpf when logging errors, from Toke Høiland-Jørgensen.

8) Fix v4_to_v6 sockaddr conversion in sk_lookup test, from Stanislav Fomichev.

9) Add link to bpf-helpers(7) man page to BPF doc, from Joe Stringer.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -36,6 +36,12 @@ Two sets of Questions and Answers (Q&A) are maintained. ...@@ -36,6 +36,12 @@ Two sets of Questions and Answers (Q&A) are maintained.
bpf_devel_QA bpf_devel_QA
Helper functions
================
* `bpf-helpers(7)`_ maintains a list of helpers available to eBPF programs.
Program types Program types
============= =============
...@@ -79,4 +85,5 @@ Other ...@@ -79,4 +85,5 @@ Other
.. _networking-filter: ../networking/filter.rst .. _networking-filter: ../networking/filter.rst
.. _man-pages: https://www.kernel.org/doc/man-pages/ .. _man-pages: https://www.kernel.org/doc/man-pages/
.. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html .. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html
.. _bpf-helpers(7): https://man7.org/linux/man-pages/man7/bpf-helpers.7.html
.. _BPF and XDP Reference Guide: https://docs.cilium.io/en/latest/bpf/ .. _BPF and XDP Reference Guide: https://docs.cilium.io/en/latest/bpf/
...@@ -213,11 +213,13 @@ static int stack_map_get_build_id_32(void *page_addr, ...@@ -213,11 +213,13 @@ static int stack_map_get_build_id_32(void *page_addr,
phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
for (i = 0; i < ehdr->e_phnum; ++i) for (i = 0; i < ehdr->e_phnum; ++i) {
if (phdr[i].p_type == PT_NOTE) if (phdr[i].p_type == PT_NOTE &&
return stack_map_parse_build_id(page_addr, build_id, !stack_map_parse_build_id(page_addr, build_id,
page_addr + phdr[i].p_offset, page_addr + phdr[i].p_offset,
phdr[i].p_filesz); phdr[i].p_filesz))
return 0;
}
return -EINVAL; return -EINVAL;
} }
...@@ -236,11 +238,13 @@ static int stack_map_get_build_id_64(void *page_addr, ...@@ -236,11 +238,13 @@ static int stack_map_get_build_id_64(void *page_addr,
phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
for (i = 0; i < ehdr->e_phnum; ++i) for (i = 0; i < ehdr->e_phnum; ++i) {
if (phdr[i].p_type == PT_NOTE) if (phdr[i].p_type == PT_NOTE &&
return stack_map_parse_build_id(page_addr, build_id, !stack_map_parse_build_id(page_addr, build_id,
page_addr + phdr[i].p_offset, page_addr + phdr[i].p_offset,
phdr[i].p_filesz); phdr[i].p_filesz))
return 0;
}
return -EINVAL; return -EINVAL;
} }
......
...@@ -8913,10 +8913,6 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack ...@@ -8913,10 +8913,6 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
NL_SET_ERR_MSG(extack, "Active program does not match expected"); NL_SET_ERR_MSG(extack, "Active program does not match expected");
return -EEXIST; return -EEXIST;
} }
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
NL_SET_ERR_MSG(extack, "XDP program already attached");
return -EBUSY;
}
/* put effective new program into new_prog */ /* put effective new program into new_prog */
if (link) if (link)
...@@ -8927,6 +8923,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack ...@@ -8927,6 +8923,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
? XDP_MODE_DRV : XDP_MODE_SKB; ? XDP_MODE_DRV : XDP_MODE_SKB;
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
NL_SET_ERR_MSG(extack, "XDP program already attached");
return -EBUSY;
}
if (!offload && dev_xdp_prog(dev, other_mode)) { if (!offload && dev_xdp_prog(dev, other_mode)) {
NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
return -EEXIST; return -EEXIST;
......
...@@ -8317,15 +8317,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, ...@@ -8317,15 +8317,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
/* Helper macro for adding read access to tcp_sock or sock fields. */ /* Helper macro for adding read access to tcp_sock or sock fields. */
#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
do { \ do { \
int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
fullsock_reg = reg; \
jmp += 2; \
} \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, \ struct bpf_sock_ops_kern, \
is_fullsock), \ is_fullsock), \
si->dst_reg, si->src_reg, \ fullsock_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \ offsetof(struct bpf_sock_ops_kern, \
is_fullsock)); \ is_fullsock)); \
*insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
if (si->dst_reg == si->src_reg) \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, sk),\ struct bpf_sock_ops_kern, sk),\
si->dst_reg, si->src_reg, \ si->dst_reg, si->src_reg, \
...@@ -8334,6 +8350,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, ...@@ -8334,6 +8350,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
OBJ_FIELD), \ OBJ_FIELD), \
si->dst_reg, si->dst_reg, \ si->dst_reg, si->dst_reg, \
offsetof(OBJ, OBJ_FIELD)); \ offsetof(OBJ, OBJ_FIELD)); \
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_JMP_A(1); \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
} \
} while (0)
#define SOCK_OPS_GET_SK() \
do { \
int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == reg || si->src_reg == reg) \
reg--; \
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
fullsock_reg = reg; \
jmp += 2; \
} \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, \
is_fullsock), \
fullsock_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
is_fullsock)); \
*insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
if (si->dst_reg == si->src_reg) \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
struct bpf_sock_ops_kern, sk),\
si->dst_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, sk));\
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_JMP_A(1); \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
} \
} while (0) } while (0)
#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \ #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
...@@ -8620,17 +8679,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, ...@@ -8620,17 +8679,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked); SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
break; break;
case offsetof(struct bpf_sock_ops, sk): case offsetof(struct bpf_sock_ops, sk):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( SOCK_OPS_GET_SK();
struct bpf_sock_ops_kern,
is_fullsock),
si->dst_reg, si->src_reg,
offsetof(struct bpf_sock_ops_kern,
is_fullsock));
*insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
struct bpf_sock_ops_kern, sk),
si->dst_reg, si->src_reg,
offsetof(struct bpf_sock_ops_kern, sk));
break; break;
} }
return insn - insn_buf; return insn - insn_buf;
......
...@@ -67,7 +67,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, ...@@ -67,7 +67,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
if (!info->btf_id || !info->nr_func_info || if (!info->btf_id || !info->nr_func_info ||
btf__get_from_id(info->btf_id, &prog_btf)) btf__get_from_id(info->btf_id, &prog_btf))
goto print; goto print;
finfo = (struct bpf_func_info *)info->func_info; finfo = u64_to_ptr(info->func_info);
func_type = btf__type_by_id(prog_btf, finfo->type_id); func_type = btf__type_by_id(prog_btf, finfo->type_id);
if (!func_type || !btf_is_func(func_type)) if (!func_type || !btf_is_func(func_type))
goto print; goto print;
......
...@@ -143,6 +143,20 @@ static int codegen_datasec_def(struct bpf_object *obj, ...@@ -143,6 +143,20 @@ static int codegen_datasec_def(struct bpf_object *obj,
var_name, align); var_name, align);
return -EINVAL; return -EINVAL;
} }
/* Assume 32-bit architectures when generating data section
* struct memory layout. Given bpftool can't know which target
* host architecture it's emitting skeleton for, we need to be
* conservative and assume 32-bit one to ensure enough padding
* bytes are generated for pointer and long types. This will
* still work correctly for 64-bit architectures, because in
* the worst case we'll generate unnecessary padding field,
* which on 64-bit architectures is not strictly necessary and
* would be handled by natural 8-byte alignment. But it still
* will be a correct memory layout, based on recorded offsets
* in BTF.
*/
if (align > 4)
align = 4;
align_off = (off + align - 1) / align * align; align_off = (off + align - 1) / align * align;
if (align_off != need_off) { if (align_off != need_off) {
...@@ -397,7 +411,7 @@ static int do_skeleton(int argc, char **argv) ...@@ -397,7 +411,7 @@ static int do_skeleton(int argc, char **argv)
{ \n\ { \n\
struct %1$s *obj; \n\ struct %1$s *obj; \n\
\n\ \n\
obj = (typeof(obj))calloc(1, sizeof(*obj)); \n\ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
if (!obj) \n\ if (!obj) \n\
return NULL; \n\ return NULL; \n\
if (%1$s__create_skeleton(obj)) \n\ if (%1$s__create_skeleton(obj)) \n\
...@@ -461,7 +475,7 @@ static int do_skeleton(int argc, char **argv) ...@@ -461,7 +475,7 @@ static int do_skeleton(int argc, char **argv)
{ \n\ { \n\
struct bpf_object_skeleton *s; \n\ struct bpf_object_skeleton *s; \n\
\n\ \n\
s = (typeof(s))calloc(1, sizeof(*s)); \n\ s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
if (!s) \n\ if (!s) \n\
return -1; \n\ return -1; \n\
obj->skeleton = s; \n\ obj->skeleton = s; \n\
...@@ -479,7 +493,7 @@ static int do_skeleton(int argc, char **argv) ...@@ -479,7 +493,7 @@ static int do_skeleton(int argc, char **argv)
/* maps */ \n\ /* maps */ \n\
s->map_cnt = %zu; \n\ s->map_cnt = %zu; \n\
s->map_skel_sz = sizeof(*s->maps); \n\ s->map_skel_sz = sizeof(*s->maps); \n\
s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
if (!s->maps) \n\ if (!s->maps) \n\
goto err; \n\ goto err; \n\
", ",
...@@ -515,7 +529,7 @@ static int do_skeleton(int argc, char **argv) ...@@ -515,7 +529,7 @@ static int do_skeleton(int argc, char **argv)
/* programs */ \n\ /* programs */ \n\
s->prog_cnt = %zu; \n\ s->prog_cnt = %zu; \n\
s->prog_skel_sz = sizeof(*s->progs); \n\ s->prog_skel_sz = sizeof(*s->progs); \n\
s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\ s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
if (!s->progs) \n\ if (!s->progs) \n\
goto err; \n\ goto err; \n\
", ",
......
...@@ -106,7 +106,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) ...@@ -106,7 +106,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
switch (info->type) { switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT: case BPF_LINK_TYPE_RAW_TRACEPOINT:
jsonw_string_field(json_wtr, "tp_name", jsonw_string_field(json_wtr, "tp_name",
(const char *)info->raw_tracepoint.tp_name); u64_to_ptr(info->raw_tracepoint.tp_name));
break; break;
case BPF_LINK_TYPE_TRACING: case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info); err = get_prog_info(info->prog_id, &prog_info);
...@@ -185,7 +185,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) ...@@ -185,7 +185,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
switch (info->type) { switch (info->type) {
case BPF_LINK_TYPE_RAW_TRACEPOINT: case BPF_LINK_TYPE_RAW_TRACEPOINT:
printf("\n\ttp '%s' ", printf("\n\ttp '%s' ",
(const char *)info->raw_tracepoint.tp_name); (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
break; break;
case BPF_LINK_TYPE_TRACING: case BPF_LINK_TYPE_TRACING:
err = get_prog_info(info->prog_id, &prog_info); err = get_prog_info(info->prog_id, &prog_info);
......
...@@ -21,7 +21,15 @@ ...@@ -21,7 +21,15 @@
/* Make sure we do not use kernel-only integer typedefs */ /* Make sure we do not use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64)(unsigned long)ptr;
}
static inline void *u64_to_ptr(__u64 ptr)
{
return (void *)(unsigned long)ptr;
}
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); }) #define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); }) #define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
......
...@@ -428,14 +428,14 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, ...@@ -428,14 +428,14 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
p_info("no instructions returned"); p_info("no instructions returned");
return -1; return -1;
} }
buf = (unsigned char *)(info->jited_prog_insns); buf = u64_to_ptr(info->jited_prog_insns);
member_len = info->jited_prog_len; member_len = info->jited_prog_len;
} else { /* DUMP_XLATED */ } else { /* DUMP_XLATED */
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) { if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?"); p_err("error retrieving insn dump: kernel.kptr_restrict set?");
return -1; return -1;
} }
buf = (unsigned char *)info->xlated_prog_insns; buf = u64_to_ptr(info->xlated_prog_insns);
member_len = info->xlated_prog_len; member_len = info->xlated_prog_len;
} }
...@@ -444,7 +444,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, ...@@ -444,7 +444,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
return -1; return -1;
} }
func_info = (void *)info->func_info; func_info = u64_to_ptr(info->func_info);
if (info->nr_line_info) { if (info->nr_line_info) {
prog_linfo = bpf_prog_linfo__new(info); prog_linfo = bpf_prog_linfo__new(info);
...@@ -462,7 +462,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, ...@@ -462,7 +462,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
n = write(fd, buf, member_len); n = write(fd, buf, member_len);
close(fd); close(fd);
if (n != member_len) { if (n != (ssize_t)member_len) {
p_err("error writing output file: %s", p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write"); n < 0 ? strerror(errno) : "short write");
return -1; return -1;
...@@ -492,13 +492,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, ...@@ -492,13 +492,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
__u32 i; __u32 i;
if (info->nr_jited_ksyms) { if (info->nr_jited_ksyms) {
kernel_syms_load(&dd); kernel_syms_load(&dd);
ksyms = (__u64 *) info->jited_ksyms; ksyms = u64_to_ptr(info->jited_ksyms);
} }
if (json_output) if (json_output)
jsonw_start_array(json_wtr); jsonw_start_array(json_wtr);
lens = (__u32 *) info->jited_func_lens; lens = u64_to_ptr(info->jited_func_lens);
for (i = 0; i < info->nr_jited_func_lens; i++) { for (i = 0; i < info->nr_jited_func_lens; i++) {
if (ksyms) { if (ksyms) {
sym = kernel_syms_search(&dd, ksyms[i]); sym = kernel_syms_search(&dd, ksyms[i]);
...@@ -559,7 +559,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, ...@@ -559,7 +559,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
} else { } else {
kernel_syms_load(&dd); kernel_syms_load(&dd);
dd.nr_jited_ksyms = info->nr_jited_ksyms; dd.nr_jited_ksyms = info->nr_jited_ksyms;
dd.jited_ksyms = (__u64 *) info->jited_ksyms; dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
dd.btf = btf; dd.btf = btf;
dd.func_info = func_info; dd.func_info = func_info;
dd.finfo_rec_size = info->func_info_rec_size; dd.finfo_rec_size = info->func_info_rec_size;
...@@ -1681,7 +1681,7 @@ static char *profile_target_name(int tgt_fd) ...@@ -1681,7 +1681,7 @@ static char *profile_target_name(int tgt_fd)
goto out; goto out;
} }
func_info = (struct bpf_func_info *)(info_linear->info.func_info); func_info = u64_to_ptr(info_linear->info.func_info);
t = btf__type_by_id(btf, func_info[0].type_id); t = btf__type_by_id(btf, func_info[0].type_id);
if (!t) { if (!t) {
p_err("btf %d doesn't have type %d", p_err("btf %d doesn't have type %d",
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
* Helper macro to manipulate data structures * Helper macro to manipulate data structures
*/ */
#ifndef offsetof #ifndef offsetof
#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) #define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
#endif #endif
#ifndef container_of #ifndef container_of
#define container_of(ptr, type, member) \ #define container_of(ptr, type, member) \
......
...@@ -41,6 +41,7 @@ struct btf { ...@@ -41,6 +41,7 @@ struct btf {
__u32 types_size; __u32 types_size;
__u32 data_size; __u32 data_size;
int fd; int fd;
int ptr_sz;
}; };
static inline __u64 ptr_to_u64(const void *ptr) static inline __u64 ptr_to_u64(const void *ptr)
...@@ -221,6 +222,70 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) ...@@ -221,6 +222,70 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
return btf->types[type_id]; return btf->types[type_id];
} }
static int determine_ptr_size(const struct btf *btf)
{
const struct btf_type *t;
const char *name;
int i;
for (i = 1; i <= btf->nr_types; i++) {
t = btf__type_by_id(btf, i);
if (!btf_is_int(t))
continue;
name = btf__name_by_offset(btf, t->name_off);
if (!name)
continue;
if (strcmp(name, "long int") == 0 ||
strcmp(name, "long unsigned int") == 0) {
if (t->size != 4 && t->size != 8)
continue;
return t->size;
}
}
return -1;
}
static size_t btf_ptr_sz(const struct btf *btf)
{
if (!btf->ptr_sz)
((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
}
/* Return pointer size this BTF instance assumes. The size is heuristically
* determined by looking for 'long' or 'unsigned long' integer type and
* recording its size in bytes. If BTF type information doesn't have any such
* type, this function returns 0. In the latter case, native architecture's
* pointer size is assumed, so will be either 4 or 8, depending on
* architecture that libbpf was compiled for. It's possible to override
* guessed value by using btf__set_pointer_size() API.
*/
size_t btf__pointer_size(const struct btf *btf)
{
if (!btf->ptr_sz)
((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
if (btf->ptr_sz < 0)
/* not enough BTF type info to guess */
return 0;
return btf->ptr_sz;
}
/* Override or set pointer size in bytes. Only values of 4 and 8 are
* supported.
*/
int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
{
if (ptr_sz != 4 && ptr_sz != 8)
return -EINVAL;
btf->ptr_sz = ptr_sz;
return 0;
}
static bool btf_type_is_void(const struct btf_type *t) static bool btf_type_is_void(const struct btf_type *t)
{ {
return t == &btf_void || btf_is_fwd(t); return t == &btf_void || btf_is_fwd(t);
...@@ -253,7 +318,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) ...@@ -253,7 +318,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
size = t->size; size = t->size;
goto done; goto done;
case BTF_KIND_PTR: case BTF_KIND_PTR:
size = sizeof(void *); size = btf_ptr_sz(btf);
goto done; goto done;
case BTF_KIND_TYPEDEF: case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE: case BTF_KIND_VOLATILE:
...@@ -293,9 +358,9 @@ int btf__align_of(const struct btf *btf, __u32 id) ...@@ -293,9 +358,9 @@ int btf__align_of(const struct btf *btf, __u32 id)
switch (kind) { switch (kind) {
case BTF_KIND_INT: case BTF_KIND_INT:
case BTF_KIND_ENUM: case BTF_KIND_ENUM:
return min(sizeof(void *), (size_t)t->size); return min(btf_ptr_sz(btf), (size_t)t->size);
case BTF_KIND_PTR: case BTF_KIND_PTR:
return sizeof(void *); return btf_ptr_sz(btf);
case BTF_KIND_TYPEDEF: case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE: case BTF_KIND_VOLATILE:
case BTF_KIND_CONST: case BTF_KIND_CONST:
...@@ -533,6 +598,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) ...@@ -533,6 +598,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
if (IS_ERR(btf)) if (IS_ERR(btf))
goto done; goto done;
switch (gelf_getclass(elf)) {
case ELFCLASS32:
btf__set_pointer_size(btf, 4);
break;
case ELFCLASS64:
btf__set_pointer_size(btf, 8);
break;
default:
pr_warn("failed to get ELF class (bitness) for %s\n", path);
break;
}
if (btf_ext && btf_ext_data) { if (btf_ext && btf_ext_data) {
*btf_ext = btf_ext__new(btf_ext_data->d_buf, *btf_ext = btf_ext__new(btf_ext_data->d_buf,
btf_ext_data->d_size); btf_ext_data->d_size);
......
...@@ -76,6 +76,8 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, ...@@ -76,6 +76,8 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id); __u32 id);
LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <errno.h> #include <errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/btf.h> #include <linux/btf.h>
#include <linux/kernel.h>
#include "btf.h" #include "btf.h"
#include "hashmap.h" #include "hashmap.h"
#include "libbpf.h" #include "libbpf.h"
...@@ -60,6 +61,7 @@ struct btf_dump { ...@@ -60,6 +61,7 @@ struct btf_dump {
const struct btf_ext *btf_ext; const struct btf_ext *btf_ext;
btf_dump_printf_fn_t printf_fn; btf_dump_printf_fn_t printf_fn;
struct btf_dump_opts opts; struct btf_dump_opts opts;
int ptr_sz;
bool strip_mods; bool strip_mods;
/* per-type auxiliary state */ /* per-type auxiliary state */
...@@ -138,6 +140,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf, ...@@ -138,6 +140,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
d->btf_ext = btf_ext; d->btf_ext = btf_ext;
d->printf_fn = printf_fn; d->printf_fn = printf_fn;
d->opts.ctx = opts ? opts->ctx : NULL; d->opts.ctx = opts ? opts->ctx : NULL;
d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (IS_ERR(d->type_names)) { if (IS_ERR(d->type_names)) {
...@@ -549,6 +552,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) ...@@ -549,6 +552,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
} }
} }
static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t); const struct btf_type *t);
static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id, static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
...@@ -671,6 +677,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) ...@@ -671,6 +677,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
switch (kind) { switch (kind) {
case BTF_KIND_INT: case BTF_KIND_INT:
/* Emit type alias definitions if necessary */
btf_dump_emit_missing_aliases(d, id, t);
tstate->emit_state = EMITTED; tstate->emit_state = EMITTED;
break; break;
case BTF_KIND_ENUM: case BTF_KIND_ENUM:
...@@ -797,7 +806,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d, ...@@ -797,7 +806,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
int align, int lvl) int align, int lvl)
{ {
int off_diff = m_off - cur_off; int off_diff = m_off - cur_off;
int ptr_bits = sizeof(void *) * 8; int ptr_bits = d->ptr_sz * 8;
if (off_diff <= 0) if (off_diff <= 0)
/* no gap */ /* no gap */
...@@ -870,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, ...@@ -870,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
btf_dump_printf(d, ": %d", m_sz); btf_dump_printf(d, ": %d", m_sz);
off = m_off + m_sz; off = m_off + m_sz;
} else { } else {
m_sz = max(0, btf__resolve_size(d->btf, m->type)); m_sz = max(0LL, btf__resolve_size(d->btf, m->type));
off = m_off + m_sz * 8; off = m_off + m_sz * 8;
} }
btf_dump_printf(d, ";"); btf_dump_printf(d, ";");
...@@ -890,6 +899,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, ...@@ -890,6 +899,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
btf_dump_printf(d, " __attribute__((packed))"); btf_dump_printf(d, " __attribute__((packed))");
} }
static const char *missing_base_types[][2] = {
/*
* GCC emits typedefs to its internal __PolyX_t types when compiling Arm
* SIMD intrinsics. Alias them to standard base types.
*/
{ "__Poly8_t", "unsigned char" },
{ "__Poly16_t", "unsigned short" },
{ "__Poly64_t", "unsigned long long" },
{ "__Poly128_t", "unsigned __int128" },
};
static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
const char *name = btf_dump_type_name(d, id);
int i;
for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
if (strcmp(name, missing_base_types[i][0]) == 0) {
btf_dump_printf(d, "typedef %s %s;\n\n",
missing_base_types[i][1], name);
break;
}
}
}
static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t) const struct btf_type *t)
{ {
......
...@@ -2434,6 +2434,8 @@ static int bpf_object__init_btf(struct bpf_object *obj, ...@@ -2434,6 +2434,8 @@ static int bpf_object__init_btf(struct bpf_object *obj,
BTF_ELF_SEC, err); BTF_ELF_SEC, err);
goto out; goto out;
} }
/* enforce 8-byte pointers for BPF-targeted BTFs */
btf__set_pointer_size(obj->btf, 8);
err = 0; err = 0;
} }
if (btf_ext_data) { if (btf_ext_data) {
...@@ -2542,6 +2544,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) ...@@ -2542,6 +2544,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (IS_ERR(kern_btf)) if (IS_ERR(kern_btf))
return PTR_ERR(kern_btf); return PTR_ERR(kern_btf);
/* enforce 8-byte pointers for BPF-targeted BTFs */
btf__set_pointer_size(obj->btf, 8);
bpf_object__sanitize_btf(obj, kern_btf); bpf_object__sanitize_btf(obj, kern_btf);
} }
...@@ -3478,10 +3482,11 @@ bpf_object__probe_global_data(struct bpf_object *obj) ...@@ -3478,10 +3482,11 @@ bpf_object__probe_global_data(struct bpf_object *obj)
map = bpf_create_map_xattr(&map_attr); map = bpf_create_map_xattr(&map_attr);
if (map < 0) { if (map < 0) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); ret = -errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
__func__, cp, errno); __func__, cp, -ret);
return -errno; return ret;
} }
insns[0].imm = map; insns[0].imm = map;
...@@ -5194,7 +5199,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, ...@@ -5194,7 +5199,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
static int bpf_object__collect_map_relos(struct bpf_object *obj, static int bpf_object__collect_map_relos(struct bpf_object *obj,
GElf_Shdr *shdr, Elf_Data *data) GElf_Shdr *shdr, Elf_Data *data)
{ {
int i, j, nrels, new_sz, ptr_sz = sizeof(void *); const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
int i, j, nrels, new_sz;
const struct btf_var_secinfo *vi = NULL; const struct btf_var_secinfo *vi = NULL;
const struct btf_type *sec, *var, *def; const struct btf_type *sec, *var, *def;
const struct btf_member *member; const struct btf_member *member;
...@@ -5243,7 +5249,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, ...@@ -5243,7 +5249,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
vi = btf_var_secinfos(sec) + map->btf_var_idx; vi = btf_var_secinfos(sec) + map->btf_var_idx;
if (vi->offset <= rel.r_offset && if (vi->offset <= rel.r_offset &&
rel.r_offset + sizeof(void *) <= vi->offset + vi->size) rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
break; break;
} }
if (j == obj->nr_maps) { if (j == obj->nr_maps) {
...@@ -5279,17 +5285,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, ...@@ -5279,17 +5285,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
return -EINVAL; return -EINVAL;
moff = rel.r_offset - vi->offset - moff; moff = rel.r_offset - vi->offset - moff;
if (moff % ptr_sz) /* here we use BPF pointer size, which is always 64 bit, as we
* are parsing ELF that was built for BPF target
*/
if (moff % bpf_ptr_sz)
return -EINVAL; return -EINVAL;
moff /= ptr_sz; moff /= bpf_ptr_sz;
if (moff >= map->init_slots_sz) { if (moff >= map->init_slots_sz) {
new_sz = moff + 1; new_sz = moff + 1;
tmp = realloc(map->init_slots, new_sz * ptr_sz); tmp = realloc(map->init_slots, new_sz * host_ptr_sz);
if (!tmp) if (!tmp)
return -ENOMEM; return -ENOMEM;
map->init_slots = tmp; map->init_slots = tmp;
memset(map->init_slots + map->init_slots_sz, 0, memset(map->init_slots + map->init_slots_sz, 0,
(new_sz - map->init_slots_sz) * ptr_sz); (new_sz - map->init_slots_sz) * host_ptr_sz);
map->init_slots_sz = new_sz; map->init_slots_sz = new_sz;
} }
map->init_slots[moff] = targ_map; map->init_slots[moff] = targ_map;
...@@ -6012,9 +6021,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, ...@@ -6012,9 +6021,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
} }
if (bpf_obj_pin(prog->instances.fds[instance], path)) { if (bpf_obj_pin(prog->instances.fds[instance], path)) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
pr_warn("failed to pin program: %s\n", cp); pr_warn("failed to pin program: %s\n", cp);
return -errno; return err;
} }
pr_debug("pinned program '%s'\n", path); pr_debug("pinned program '%s'\n", path);
......
...@@ -295,5 +295,7 @@ LIBBPF_0.1.0 { ...@@ -295,5 +295,7 @@ LIBBPF_0.1.0 {
bpf_program__set_sk_lookup; bpf_program__set_sk_lookup;
btf__parse; btf__parse;
btf__parse_raw; btf__parse_raw;
btf__pointer_size;
btf__set_fd; btf__set_fd;
btf__set_pointer_size;
} LIBBPF_0.0.9; } LIBBPF_0.0.9;
...@@ -159,15 +159,15 @@ void test_bpf_obj_id(void) ...@@ -159,15 +159,15 @@ void test_bpf_obj_id(void)
/* Check getting link info */ /* Check getting link info */
info_len = sizeof(struct bpf_link_info) * 2; info_len = sizeof(struct bpf_link_info) * 2;
bzero(&link_infos[i], info_len); bzero(&link_infos[i], info_len);
link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name; link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name); link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]), err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
&link_infos[i], &info_len); &link_infos[i], &info_len);
if (CHECK(err || if (CHECK(err ||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT || link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
link_infos[i].prog_id != prog_infos[i].id || link_infos[i].prog_id != prog_infos[i].id ||
link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name || link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
strcmp((char *)link_infos[i].raw_tracepoint.tp_name, strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter") || "sys_enter") ||
info_len != sizeof(struct bpf_link_info), info_len != sizeof(struct bpf_link_info),
"get-link-info(fd)", "get-link-info(fd)",
...@@ -178,7 +178,7 @@ void test_bpf_obj_id(void) ...@@ -178,7 +178,7 @@ void test_bpf_obj_id(void)
link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
link_infos[i].id, link_infos[i].id,
link_infos[i].prog_id, prog_infos[i].id, link_infos[i].prog_id, prog_infos[i].id,
(char *)link_infos[i].raw_tracepoint.tp_name, (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
"sys_enter")) "sys_enter"))
goto done; goto done;
......
...@@ -12,15 +12,16 @@ void btf_dump_printf(void *ctx, const char *fmt, va_list args) ...@@ -12,15 +12,16 @@ void btf_dump_printf(void *ctx, const char *fmt, va_list args)
static struct btf_dump_test_case { static struct btf_dump_test_case {
const char *name; const char *name;
const char *file; const char *file;
bool known_ptr_sz;
struct btf_dump_opts opts; struct btf_dump_opts opts;
} btf_dump_test_cases[] = { } btf_dump_test_cases[] = {
{"btf_dump: syntax", "btf_dump_test_case_syntax", {}}, {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
{"btf_dump: ordering", "btf_dump_test_case_ordering", {}}, {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
{"btf_dump: padding", "btf_dump_test_case_padding", {}}, {"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
{"btf_dump: packing", "btf_dump_test_case_packing", {}}, {"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
{"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}}, {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
{"btf_dump: multidim", "btf_dump_test_case_multidim", {}}, {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
{"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}}, {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
}; };
static int btf_dump_all_types(const struct btf *btf, static int btf_dump_all_types(const struct btf *btf,
...@@ -62,6 +63,18 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t) ...@@ -62,6 +63,18 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
goto done; goto done;
} }
/* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
* so it's impossible to determine correct pointer size; but if they
* do, it should be 8 regardless of host architecture, becaues BPF
* target is always 64-bit
*/
if (!t->known_ptr_sz) {
btf__set_pointer_size(btf, 8);
} else {
CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n",
8, btf__pointer_size(btf));
}
snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file); snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
fd = mkstemp(out_file); fd = mkstemp(out_file);
if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) { if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
......
...@@ -159,8 +159,8 @@ void test_core_extern(void) ...@@ -159,8 +159,8 @@ void test_core_extern(void)
exp = (uint64_t *)&t->data; exp = (uint64_t *)&t->data;
for (j = 0; j < n; j++) { for (j = 0; j < n; j++) {
CHECK(got[j] != exp[j], "check_res", CHECK(got[j] != exp[j], "check_res",
"result #%d: expected %lx, but got %lx\n", "result #%d: expected %llx, but got %llx\n",
j, exp[j], got[j]); j, (__u64)exp[j], (__u64)got[j]);
} }
cleanup: cleanup:
test_core_extern__destroy(skel); test_core_extern__destroy(skel);
......
...@@ -237,7 +237,7 @@ ...@@ -237,7 +237,7 @@
.union_sz = sizeof(((type *)0)->union_field), \ .union_sz = sizeof(((type *)0)->union_field), \
.arr_sz = sizeof(((type *)0)->arr_field), \ .arr_sz = sizeof(((type *)0)->arr_field), \
.arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \ .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
.ptr_sz = sizeof(((type *)0)->ptr_field), \ .ptr_sz = 8, /* always 8-byte pointer for BPF */ \
.enum_sz = sizeof(((type *)0)->enum_field), \ .enum_sz = sizeof(((type *)0)->enum_field), \
} }
...@@ -432,20 +432,20 @@ static struct core_reloc_test_case test_cases[] = { ...@@ -432,20 +432,20 @@ static struct core_reloc_test_case test_cases[] = {
.sb4 = -1, .sb4 = -1,
.sb20 = -0x17654321, .sb20 = -0x17654321,
.u32 = 0xBEEF, .u32 = 0xBEEF,
.s32 = -0x3FEDCBA987654321, .s32 = -0x3FEDCBA987654321LL,
}), }),
BITFIELDS_CASE(bitfields___bitfield_vs_int, { BITFIELDS_CASE(bitfields___bitfield_vs_int, {
.ub1 = 0xFEDCBA9876543210, .ub1 = 0xFEDCBA9876543210LL,
.ub2 = 0xA6, .ub2 = 0xA6,
.ub7 = -0x7EDCBA987654321, .ub7 = -0x7EDCBA987654321LL,
.sb4 = -0x6123456789ABCDE, .sb4 = -0x6123456789ABCDELL,
.sb20 = 0xD00D, .sb20 = 0xD00DLL,
.u32 = -0x76543, .u32 = -0x76543,
.s32 = 0x0ADEADBEEFBADB0B, .s32 = 0x0ADEADBEEFBADB0BLL,
}), }),
BITFIELDS_CASE(bitfields___just_big_enough, { BITFIELDS_CASE(bitfields___just_big_enough, {
.ub1 = 0xF, .ub1 = 0xFLL,
.ub2 = 0x0812345678FEDCBA, .ub2 = 0x0812345678FEDCBALL,
}), }),
BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield), BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
......
...@@ -16,7 +16,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, ...@@ -16,7 +16,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
__u32 duration = 0, retval; __u32 duration = 0, retval;
struct bpf_map *data_map; struct bpf_map *data_map;
const int zero = 0; const int zero = 0;
u64 *result = NULL; __u64 *result = NULL;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd); &pkt_obj, &pkt_fd);
...@@ -29,7 +29,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, ...@@ -29,7 +29,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
link = calloc(sizeof(struct bpf_link *), prog_cnt); link = calloc(sizeof(struct bpf_link *), prog_cnt);
prog = calloc(sizeof(struct bpf_program *), prog_cnt); prog = calloc(sizeof(struct bpf_program *), prog_cnt);
result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64)); result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
if (CHECK(!link || !prog || !result, "alloc_memory", if (CHECK(!link || !prog || !result, "alloc_memory",
"failed to alloc memory")) "failed to alloc memory"))
goto close_prog; goto close_prog;
...@@ -72,7 +72,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, ...@@ -72,7 +72,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
goto close_prog; goto close_prog;
for (i = 0; i < prog_cnt; i++) for (i = 0; i < prog_cnt; i++)
if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n", if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n",
result[i])) result[i]))
goto close_prog; goto close_prog;
......
...@@ -591,7 +591,7 @@ void test_flow_dissector(void) ...@@ -591,7 +591,7 @@ void test_flow_dissector(void)
CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
err || tattr.retval != 1, err || tattr.retval != 1,
tests[i].name, tests[i].name,
"err %d errno %d retval %d duration %d size %u/%lu\n", "err %d errno %d retval %d duration %d size %u/%zu\n",
err, errno, tattr.retval, tattr.duration, err, errno, tattr.retval, tattr.duration,
tattr.data_size_out, sizeof(flow_keys)); tattr.data_size_out, sizeof(flow_keys));
CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
static void test_global_data_number(struct bpf_object *obj, __u32 duration) static void test_global_data_number(struct bpf_object *obj, __u32 duration)
{ {
int i, err, map_fd; int i, err, map_fd;
uint64_t num; __u64 num;
map_fd = bpf_find_map(__func__, obj, "result_number"); map_fd = bpf_find_map(__func__, obj, "result_number");
if (CHECK_FAIL(map_fd < 0)) if (CHECK_FAIL(map_fd < 0))
...@@ -14,7 +14,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) ...@@ -14,7 +14,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
struct { struct {
char *name; char *name;
uint32_t key; uint32_t key;
uint64_t num; __u64 num;
} tests[] = { } tests[] = {
{ "relocate .bss reference", 0, 0 }, { "relocate .bss reference", 0, 0 },
{ "relocate .data reference", 1, 42 }, { "relocate .data reference", 1, 42 },
...@@ -32,7 +32,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) ...@@ -32,7 +32,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
CHECK(err || num != tests[i].num, tests[i].name, CHECK(err || num != tests[i].num, tests[i].name,
"err %d result %lx expected %lx\n", "err %d result %llx expected %llx\n",
err, num, tests[i].num); err, num, tests[i].num);
} }
} }
......
...@@ -21,7 +21,7 @@ void test_mmap(void) ...@@ -21,7 +21,7 @@ void test_mmap(void)
const long page_size = sysconf(_SC_PAGE_SIZE); const long page_size = sysconf(_SC_PAGE_SIZE);
int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd; int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
struct bpf_map *data_map, *bss_map; struct bpf_map *data_map, *bss_map;
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
struct test_mmap__bss *bss_data; struct test_mmap__bss *bss_data;
struct bpf_map_info map_info; struct bpf_map_info map_info;
__u32 map_info_sz = sizeof(map_info); __u32 map_info_sz = sizeof(map_info);
...@@ -183,16 +183,23 @@ void test_mmap(void) ...@@ -183,16 +183,23 @@ void test_mmap(void)
/* check some more advanced mmap() manipulations */ /* check some more advanced mmap() manipulations */
tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
-1, 0);
if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
goto cleanup;
/* map all but last page: pages 1-3 mapped */ /* map all but last page: pages 1-3 mapped */
tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED, tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0); data_map_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno)) if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
munmap(tmp0, 4 * page_size);
goto cleanup; goto cleanup;
}
/* unmap second page: pages 1, 3 mapped */ /* unmap second page: pages 1, 3 mapped */
err = munmap(tmp1 + page_size, page_size); err = munmap(tmp1 + page_size, page_size);
if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) { if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
munmap(tmp1, map_sz); munmap(tmp1, 4 * page_size);
goto cleanup; goto cleanup;
} }
...@@ -201,7 +208,7 @@ void test_mmap(void) ...@@ -201,7 +208,7 @@ void test_mmap(void)
MAP_SHARED | MAP_FIXED, data_map_fd, 0); MAP_SHARED | MAP_FIXED, data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) { if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
munmap(tmp1, page_size); munmap(tmp1, page_size);
munmap(tmp1 + 2*page_size, page_size); munmap(tmp1 + 2*page_size, 2 * page_size);
goto cleanup; goto cleanup;
} }
CHECK(tmp1 + page_size != tmp2, "adv_mmap4", CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
...@@ -211,7 +218,7 @@ void test_mmap(void) ...@@ -211,7 +218,7 @@ void test_mmap(void)
tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, 0); data_map_fd, 0);
if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) { if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
munmap(tmp1, 3 * page_size); /* unmap page 1 */ munmap(tmp1, 4 * page_size); /* unmap page 1 */
goto cleanup; goto cleanup;
} }
CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2); CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
......
...@@ -28,7 +28,7 @@ void test_prog_run_xattr(void) ...@@ -28,7 +28,7 @@ void test_prog_run_xattr(void)
"err %d errno %d retval %d\n", err, errno, tattr.retval); "err %d errno %d retval %d\n", err, errno, tattr.retval);
CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
"incorrect output size, want %lu have %u\n", "incorrect output size, want %zu have %u\n",
sizeof(pkt_v4), tattr.data_size_out); sizeof(pkt_v4), tattr.data_size_out);
CHECK_ATTR(buf[5] != 0, "overflow", CHECK_ATTR(buf[5] != 0, "overflow",
......
...@@ -309,6 +309,7 @@ static void v4_to_v6(struct sockaddr_storage *ss) ...@@ -309,6 +309,7 @@ static void v4_to_v6(struct sockaddr_storage *ss)
v6->sin6_addr.s6_addr[10] = 0xff; v6->sin6_addr.s6_addr[10] = 0xff;
v6->sin6_addr.s6_addr[11] = 0xff; v6->sin6_addr.s6_addr[11] = 0xff;
memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4); memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4);
memset(&v6->sin6_addr.s6_addr[0], 0, 10);
} }
static int udp_recv_send(int server_fd) static int udp_recv_send(int server_fd)
......
...@@ -81,7 +81,7 @@ void test_skb_ctx(void) ...@@ -81,7 +81,7 @@ void test_skb_ctx(void)
CHECK_ATTR(tattr.ctx_size_out != sizeof(skb), CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
"ctx_size_out", "ctx_size_out",
"incorrect output size, want %lu have %u\n", "incorrect output size, want %zu have %u\n",
sizeof(skb), tattr.ctx_size_out); sizeof(skb), tattr.ctx_size_out);
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
......
...@@ -44,25 +44,25 @@ void test_varlen(void) ...@@ -44,25 +44,25 @@ void test_varlen(void)
CHECK_VAL(bss->payload1_len2, size2); CHECK_VAL(bss->payload1_len2, size2);
CHECK_VAL(bss->total1, size1 + size2); CHECK_VAL(bss->total1, size1 + size2);
CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check", CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check",
"doesn't match!"); "doesn't match!\n");
CHECK_VAL(data->payload2_len1, size1); CHECK_VAL(data->payload2_len1, size1);
CHECK_VAL(data->payload2_len2, size2); CHECK_VAL(data->payload2_len2, size2);
CHECK_VAL(data->total2, size1 + size2); CHECK_VAL(data->total2, size1 + size2);
CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check", CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check",
"doesn't match!"); "doesn't match!\n");
CHECK_VAL(data->payload3_len1, size1); CHECK_VAL(data->payload3_len1, size1);
CHECK_VAL(data->payload3_len2, size2); CHECK_VAL(data->payload3_len2, size2);
CHECK_VAL(data->total3, size1 + size2); CHECK_VAL(data->total3, size1 + size2);
CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check", CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check",
"doesn't match!"); "doesn't match!\n");
CHECK_VAL(data->payload4_len1, size1); CHECK_VAL(data->payload4_len1, size1);
CHECK_VAL(data->payload4_len2, size2); CHECK_VAL(data->payload4_len2, size2);
CHECK_VAL(data->total4, size1 + size2); CHECK_VAL(data->total4, size1 + size2);
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check", CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
"doesn't match!"); "doesn't match!\n");
cleanup: cleanup:
test_varlen__destroy(skel); test_varlen__destroy(skel);
} }
#include <stdint.h> #include <stdint.h>
#include <stdbool.h> #include <stdbool.h>
void preserce_ptr_sz_fn(long x) {}
#define __bpf_aligned __attribute__((aligned(8)))
/* /*
* KERNEL * KERNEL
*/ */
...@@ -444,51 +449,51 @@ struct core_reloc_primitives { ...@@ -444,51 +449,51 @@ struct core_reloc_primitives {
char a; char a;
int b; int b;
enum core_reloc_primitives_enum c; enum core_reloc_primitives_enum c;
void *d; void *d __bpf_aligned;
int (*f)(const char *); int (*f)(const char *) __bpf_aligned;
}; };
struct core_reloc_primitives___diff_enum_def { struct core_reloc_primitives___diff_enum_def {
char a; char a;
int b; int b;
void *d; void *d __bpf_aligned;
int (*f)(const char *); int (*f)(const char *) __bpf_aligned;
enum { enum {
X = 100, X = 100,
Y = 200, Y = 200,
} c; /* inline enum def with differing set of values */ } c __bpf_aligned; /* inline enum def with differing set of values */
}; };
struct core_reloc_primitives___diff_func_proto { struct core_reloc_primitives___diff_func_proto {
void (*f)(int); /* incompatible function prototype */ void (*f)(int) __bpf_aligned; /* incompatible function prototype */
void *d; void *d __bpf_aligned;
enum core_reloc_primitives_enum c; enum core_reloc_primitives_enum c __bpf_aligned;
int b; int b;
char a; char a;
}; };
struct core_reloc_primitives___diff_ptr_type { struct core_reloc_primitives___diff_ptr_type {
const char * const d; /* different pointee type + modifiers */ const char * const d __bpf_aligned; /* different pointee type + modifiers */
char a; char a __bpf_aligned;
int b; int b;
enum core_reloc_primitives_enum c; enum core_reloc_primitives_enum c;
int (*f)(const char *); int (*f)(const char *) __bpf_aligned;
}; };
struct core_reloc_primitives___err_non_enum { struct core_reloc_primitives___err_non_enum {
char a[1]; char a[1];
int b; int b;
int c; /* int instead of enum */ int c; /* int instead of enum */
void *d; void *d __bpf_aligned;
int (*f)(const char *); int (*f)(const char *) __bpf_aligned;
}; };
struct core_reloc_primitives___err_non_int { struct core_reloc_primitives___err_non_int {
char a[1]; char a[1];
int *b; /* ptr instead of int */ int *b __bpf_aligned; /* ptr instead of int */
enum core_reloc_primitives_enum c; enum core_reloc_primitives_enum c __bpf_aligned;
void *d; void *d __bpf_aligned;
int (*f)(const char *); int (*f)(const char *) __bpf_aligned;
}; };
struct core_reloc_primitives___err_non_ptr { struct core_reloc_primitives___err_non_ptr {
...@@ -496,7 +501,7 @@ struct core_reloc_primitives___err_non_ptr { ...@@ -496,7 +501,7 @@ struct core_reloc_primitives___err_non_ptr {
int b; int b;
enum core_reloc_primitives_enum c; enum core_reloc_primitives_enum c;
int d; /* int instead of ptr */ int d; /* int instead of ptr */
int (*f)(const char *); int (*f)(const char *) __bpf_aligned;
}; };
/* /*
...@@ -507,7 +512,7 @@ struct core_reloc_mods_output { ...@@ -507,7 +512,7 @@ struct core_reloc_mods_output {
}; };
typedef const int int_t; typedef const int int_t;
typedef const char *char_ptr_t; typedef const char *char_ptr_t __bpf_aligned;
typedef const int arr_t[7]; typedef const int arr_t[7];
struct core_reloc_mods_substruct { struct core_reloc_mods_substruct {
...@@ -523,9 +528,9 @@ typedef struct { ...@@ -523,9 +528,9 @@ typedef struct {
struct core_reloc_mods { struct core_reloc_mods {
int a; int a;
int_t b; int_t b;
char *c; char *c __bpf_aligned;
char_ptr_t d; char_ptr_t d;
int e[3]; int e[3] __bpf_aligned;
arr_t f; arr_t f;
struct core_reloc_mods_substruct g; struct core_reloc_mods_substruct g;
core_reloc_mods_substruct_t h; core_reloc_mods_substruct_t h;
...@@ -535,9 +540,9 @@ struct core_reloc_mods { ...@@ -535,9 +540,9 @@ struct core_reloc_mods {
struct core_reloc_mods___mod_swap { struct core_reloc_mods___mod_swap {
int b; int b;
int_t a; int_t a;
char *d; char *d __bpf_aligned;
char_ptr_t c; char_ptr_t c;
int f[3]; int f[3] __bpf_aligned;
arr_t e; arr_t e;
struct { struct {
int y; int y;
...@@ -555,7 +560,7 @@ typedef arr1_t arr2_t; ...@@ -555,7 +560,7 @@ typedef arr1_t arr2_t;
typedef arr2_t arr3_t; typedef arr2_t arr3_t;
typedef arr3_t arr4_t; typedef arr3_t arr4_t;
typedef const char * const volatile fancy_char_ptr_t; typedef const char * const volatile fancy_char_ptr_t __bpf_aligned;
typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt; typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt;
...@@ -567,7 +572,7 @@ struct core_reloc_mods___typedefs { ...@@ -567,7 +572,7 @@ struct core_reloc_mods___typedefs {
arr4_t e; arr4_t e;
fancy_char_ptr_t d; fancy_char_ptr_t d;
fancy_char_ptr_t c; fancy_char_ptr_t c;
int3_t b; int3_t b __bpf_aligned;
int3_t a; int3_t a;
}; };
...@@ -740,18 +745,18 @@ struct core_reloc_bitfields___bit_sz_change { ...@@ -740,18 +745,18 @@ struct core_reloc_bitfields___bit_sz_change {
int32_t sb20: 30; /* 20 -> 30 */ int32_t sb20: 30; /* 20 -> 30 */
/* non-bitfields */ /* non-bitfields */
uint16_t u32; /* 32 -> 16 */ uint16_t u32; /* 32 -> 16 */
int64_t s32; /* 32 -> 64 */ int64_t s32 __bpf_aligned; /* 32 -> 64 */
}; };
/* turn bitfield into non-bitfield and vice versa */ /* turn bitfield into non-bitfield and vice versa */
struct core_reloc_bitfields___bitfield_vs_int { struct core_reloc_bitfields___bitfield_vs_int {
uint64_t ub1; /* 3 -> 64 non-bitfield */ uint64_t ub1; /* 3 -> 64 non-bitfield */
uint8_t ub2; /* 20 -> 8 non-bitfield */ uint8_t ub2; /* 20 -> 8 non-bitfield */
int64_t ub7; /* 7 -> 64 non-bitfield signed */ int64_t ub7 __bpf_aligned; /* 7 -> 64 non-bitfield signed */
int64_t sb4; /* 4 -> 64 non-bitfield signed */ int64_t sb4 __bpf_aligned; /* 4 -> 64 non-bitfield signed */
uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */ uint64_t sb20 __bpf_aligned; /* 20 -> 16 non-bitfield unsigned */
int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */ int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */ uint64_t s32: 60 __bpf_aligned; /* 32 non-bitfield -> 60 bitfield */
}; };
struct core_reloc_bitfields___just_big_enough { struct core_reloc_bitfields___just_big_enough {
......
...@@ -54,6 +54,7 @@ SEC("sockops") ...@@ -54,6 +54,7 @@ SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops) int bpf_testcb(struct bpf_sock_ops *skops)
{ {
char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)]; char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
struct bpf_sock_ops *reuse = skops;
struct tcphdr *thdr; struct tcphdr *thdr;
int good_call_rv = 0; int good_call_rv = 0;
int bad_call_rv = 0; int bad_call_rv = 0;
...@@ -62,6 +63,46 @@ int bpf_testcb(struct bpf_sock_ops *skops) ...@@ -62,6 +63,46 @@ int bpf_testcb(struct bpf_sock_ops *skops)
int v = 0; int v = 0;
int op; int op;
/* Test reading fields in bpf_sock_ops using single register */
asm volatile (
"%[reuse] = *(u32 *)(%[reuse] +96)"
: [reuse] "+r"(reuse)
:);
asm volatile (
"%[op] = *(u32 *)(%[skops] +96)"
: [op] "+r"(op)
: [skops] "r"(skops)
:);
asm volatile (
"r9 = %[skops];\n"
"r8 = *(u32 *)(r9 +164);\n"
"*(u32 *)(r9 +164) = r8;\n"
:: [skops] "r"(skops)
: "r9", "r8");
asm volatile (
"r1 = %[skops];\n"
"r1 = *(u64 *)(r1 +184);\n"
"if r1 == 0 goto +1;\n"
"r1 = *(u32 *)(r1 +4);\n"
:: [skops] "r"(skops):"r1");
asm volatile (
"r9 = %[skops];\n"
"r9 = *(u64 *)(r9 +184);\n"
"if r9 == 0 goto +1;\n"
"r9 = *(u32 *)(r9 +4);\n"
:: [skops] "r"(skops):"r9");
asm volatile (
"r1 = %[skops];\n"
"r2 = *(u64 *)(r1 +184);\n"
"if r2 == 0 goto +1;\n"
"r2 = *(u32 *)(r2 +4);\n"
:: [skops] "r"(skops):"r1", "r2");
op = (int) skops->op; op = (int) skops->op;
update_event_map(op); update_event_map(op);
......
...@@ -15,9 +15,9 @@ int test_pid = 0; ...@@ -15,9 +15,9 @@ int test_pid = 0;
bool capture = false; bool capture = false;
/* .bss */ /* .bss */
long payload1_len1 = 0; __u64 payload1_len1 = 0;
long payload1_len2 = 0; __u64 payload1_len2 = 0;
long total1 = 0; __u64 total1 = 0;
char payload1[MAX_LEN + MAX_LEN] = {}; char payload1[MAX_LEN + MAX_LEN] = {};
/* .data */ /* .data */
......
...@@ -3883,7 +3883,7 @@ static int test_big_btf_info(unsigned int test_num) ...@@ -3883,7 +3883,7 @@ static int test_big_btf_info(unsigned int test_num)
info_garbage.garbage = 0; info_garbage.garbage = 0;
err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len); err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
if (CHECK(err || info_len != sizeof(*info), if (CHECK(err || info_len != sizeof(*info),
"err:%d errno:%d info_len:%u sizeof(*info):%lu", "err:%d errno:%d info_len:%u sizeof(*info):%zu",
err, errno, info_len, sizeof(*info))) { err, errno, info_len, sizeof(*info))) {
err = -1; err = -1;
goto done; goto done;
...@@ -4094,7 +4094,7 @@ static int do_test_get_info(unsigned int test_num) ...@@ -4094,7 +4094,7 @@ static int do_test_get_info(unsigned int test_num)
if (CHECK(err || !info.id || info_len != sizeof(info) || if (CHECK(err || !info.id || info_len != sizeof(info) ||
info.btf_size != raw_btf_size || info.btf_size != raw_btf_size ||
(ret = memcmp(raw_btf, user_btf, expected_nbytes)), (ret = memcmp(raw_btf, user_btf, expected_nbytes)),
"err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d", "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
err, errno, info.id, info_len, sizeof(info), err, errno, info.id, info_len, sizeof(info),
raw_btf_size, info.btf_size, expected_nbytes, ret)) { raw_btf_size, info.btf_size, expected_nbytes, ret)) {
err = -1; err = -1;
...@@ -4730,7 +4730,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, ...@@ -4730,7 +4730,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
nexpected_line = snprintf(expected_line, line_size, nexpected_line = snprintf(expected_line, line_size,
"%s%u: {%u,0,%d,0x%x,0x%x,0x%x," "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
"{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," "{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
"%u,0x%x,[[%d,%d],[%d,%d]]}\n", "%u,0x%x,[[%d,%d],[%d,%d]]}\n",
percpu_map ? "\tcpu" : "", percpu_map ? "\tcpu" : "",
percpu_map ? cpu : next_key, percpu_map ? cpu : next_key,
...@@ -4738,7 +4738,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, ...@@ -4738,7 +4738,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
v->unused_bits2a, v->unused_bits2a,
v->bits28, v->bits28,
v->unused_bits2b, v->unused_bits2b,
v->ui64, (__u64)v->ui64,
v->ui8a[0], v->ui8a[1], v->ui8a[0], v->ui8a[1],
v->ui8a[2], v->ui8a[3], v->ui8a[2], v->ui8a[3],
v->ui8a[4], v->ui8a[5], v->ui8a[4], v->ui8a[5],
......
...@@ -135,6 +135,11 @@ static inline __u64 ptr_to_u64(const void *ptr) ...@@ -135,6 +135,11 @@ static inline __u64 ptr_to_u64(const void *ptr)
return (__u64) (unsigned long) ptr; return (__u64) (unsigned long) ptr;
} }
static inline void *u64_to_ptr(__u64 ptr)
{
return (void *) (unsigned long) ptr;
}
int bpf_find_map(const char *test, struct bpf_object *obj, const char *name); int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
int compare_map_keys(int map1_fd, int map2_fd); int compare_map_keys(int map1_fd, int map2_fd);
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册