提交 63d9b80d 编写于 作者: H Hao Luo 提交者: Alexei Starovoitov

bpf: Introducte bpf_this_cpu_ptr()

Add bpf_this_cpu_ptr() to help access percpu var on this cpu. This
helper always returns a valid pointer, therefore no need to check
returned value for NULL. Also note that all programs run with
preemption disabled, which means that the returned pointer is stable
during all the execution of the program.
Signed-off-by: NHao Luo <haoluo@google.com>
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
Acked-by: NAndrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200929235049.2533242-6-haoluo@google.com
上级 eaa6bcb7
......@@ -309,6 +309,7 @@ enum bpf_return_type {
RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */
RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
......@@ -1832,6 +1833,7 @@ extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
extern const struct bpf_func_proto bpf_copy_from_user_proto;
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
......
......@@ -3703,6 +3703,18 @@ union bpf_attr {
* Return
* A pointer pointing to the kernel percpu variable on *cpu*, or
* NULL, if *cpu* is invalid.
*
* void *bpf_this_cpu_ptr(const void *percpu_ptr)
* Description
* Take a pointer to a percpu ksym, *percpu_ptr*, and return a
* pointer to the percpu kernel variable on this cpu. See the
* description of 'ksym' in **bpf_per_cpu_ptr**\ ().
*
* bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
* the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
* never return NULL.
* Return
* A pointer pointing to the kernel percpu variable on this cpu.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -3859,6 +3871,7 @@ union bpf_attr {
FN(skb_cgroup_classid), \
FN(redirect_neigh), \
FN(bpf_per_cpu_ptr), \
FN(bpf_this_cpu_ptr), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
......@@ -639,6 +639,18 @@ const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
{
return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
}
const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
.func = bpf_this_cpu_ptr,
.gpl_only = false,
.ret_type = RET_PTR_TO_MEM_OR_BTF_ID,
.arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
};
const struct bpf_func_proto bpf_get_current_task_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
......@@ -707,6 +719,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_jiffies64_proto;
case BPF_FUNC_bpf_per_cpu_ptr:
return &bpf_per_cpu_ptr_proto;
case BPF_FUNC_bpf_this_cpu_ptr:
return &bpf_this_cpu_ptr_proto;
default:
break;
}
......
......@@ -5128,7 +5128,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
regs[BPF_REG_0].id = ++env->id_gen;
regs[BPF_REG_0].mem_size = meta.mem_size;
} else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL) {
} else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL ||
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) {
const struct btf_type *t;
mark_reg_known_zero(env, regs, BPF_REG_0);
......@@ -5146,10 +5147,14 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
tname, PTR_ERR(ret));
return -EINVAL;
}
regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
regs[BPF_REG_0].type =
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
PTR_TO_MEM : PTR_TO_MEM_OR_NULL;
regs[BPF_REG_0].mem_size = tsize;
} else {
regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL;
regs[BPF_REG_0].type =
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL;
regs[BPF_REG_0].btf_id = meta.ret_btf_id;
}
} else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
......
......@@ -1329,6 +1329,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_snprintf_btf_proto;
case BPF_FUNC_bpf_per_cpu_ptr:
return &bpf_per_cpu_ptr_proto;
case BPF_FUNC_bpf_this_cpu_ptr:
return &bpf_this_cpu_ptr_proto;
default:
return NULL;
}
......
......@@ -3703,6 +3703,18 @@ union bpf_attr {
* Return
* A pointer pointing to the kernel percpu variable on *cpu*, or
* NULL, if *cpu* is invalid.
*
* void *bpf_this_cpu_ptr(const void *percpu_ptr)
* Description
* Take a pointer to a percpu ksym, *percpu_ptr*, and return a
* pointer to the percpu kernel variable on this cpu. See the
* description of 'ksym' in **bpf_per_cpu_ptr**\ ().
*
* bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
* the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
* never return NULL.
* Return
* A pointer pointing to the kernel percpu variable on this cpu.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -3859,6 +3871,7 @@ union bpf_attr {
FN(skb_cgroup_classid), \
FN(redirect_neigh), \
FN(bpf_per_cpu_ptr), \
FN(bpf_this_cpu_ptr), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册