提交 10aceb62 编写于 作者: D Dave Marchevsky 提交者: Alexei Starovoitov

bpf: Add bpf_trace_vprintk helper

This helper is meant to be "bpf_trace_printk, but with proper vararg
support". Follow bpf_snprintf's example and take a u64 pseudo-vararg
array. Write to /sys/kernel/debug/tracing/trace_pipe using the same
mechanism as bpf_trace_printk. The functionality of this helper was
requested in the libbpf issue tracker [0].

[0] Closes: https://github.com/libbpf/libbpf/issues/315Signed-off-by: NDave Marchevsky <davemarchevsky@fb.com>
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
Acked-by: NAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210917182911.2426606-4-davemarchevsky@fb.com
上级 84b4c529
......@@ -1088,6 +1088,7 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
......
......@@ -4898,6 +4898,16 @@ union bpf_attr {
* **-EINVAL** if *flags* is not zero.
*
* **-ENOENT** if architecture does not support branch records.
*
* long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
* Description
* Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
* to format and can handle more format args as a result.
*
* Arguments are to be used as in **bpf_seq_printf**\ () helper.
* Return
* The number of bytes written to the buffer, or a negative error
* in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -5077,6 +5087,7 @@ union bpf_attr {
FN(get_attach_cookie), \
FN(task_pt_regs), \
FN(get_branch_snapshot), \
FN(trace_vprintk), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
......@@ -2357,6 +2357,11 @@ const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
return NULL;
}
const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
{
return NULL;
}
u64 __weak
bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
......
......@@ -1435,6 +1435,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_snprintf_proto;
case BPF_FUNC_task_pt_regs:
return &bpf_task_pt_regs_proto;
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
default:
return NULL;
}
......
......@@ -398,7 +398,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
.arg2_type = ARG_CONST_SIZE,
};
const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
static void __set_printk_clr_event(void)
{
/*
* This program might be calling bpf_trace_printk,
......@@ -410,10 +410,58 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
*/
if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
pr_warn_ratelimited("could not enable bpf_trace_printk events");
}
const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
{
__set_printk_clr_event();
return &bpf_trace_printk_proto;
}
BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data,
u32, data_len)
{
static char buf[BPF_TRACE_PRINTK_SIZE];
unsigned long flags;
int ret, num_args;
u32 *bin_args;
if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
(data_len && !data))
return -EINVAL;
num_args = data_len / 8;
ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
if (ret < 0)
return ret;
raw_spin_lock_irqsave(&trace_printk_lock, flags);
ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
trace_bpf_trace_printk(buf);
raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
bpf_bprintf_cleanup();
return ret;
}
static const struct bpf_func_proto bpf_trace_vprintk_proto = {
.func = bpf_trace_vprintk,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_MEM,
.arg2_type = ARG_CONST_SIZE,
.arg3_type = ARG_PTR_TO_MEM_OR_NULL,
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
};
const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
{
__set_printk_clr_event();
return &bpf_trace_vprintk_proto;
}
BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
const void *, data, u32, data_len)
{
......@@ -1160,6 +1208,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_func_ip_proto_tracing;
case BPF_FUNC_get_branch_snapshot:
return &bpf_get_branch_snapshot_proto;
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
default:
return bpf_base_func_proto(func_id);
}
......
......@@ -4898,6 +4898,16 @@ union bpf_attr {
* **-EINVAL** if *flags* is not zero.
*
* **-ENOENT** if architecture does not support branch records.
*
* long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
* Description
* Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
* to format and can handle more format args as a result.
*
* Arguments are to be used as in **bpf_seq_printf**\ () helper.
* Return
* The number of bytes written to the buffer, or a negative error
* in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -5077,6 +5087,7 @@ union bpf_attr {
FN(get_attach_cookie), \
FN(task_pt_regs), \
FN(get_branch_snapshot), \
FN(trace_vprintk), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册