提交 4e1ec56c 编写于 作者: D Daniel Borkmann 提交者: Alexei Starovoitov

bpf: add skb_load_bytes_relative helper

This adds a small BPF helper similar to bpf_skb_load_bytes() that
is able to load relative to mac/net header offset from the skb's
linear data. Compared to bpf_skb_load_bytes(), it takes a fifth
argument namely start_header, which is either BPF_HDR_START_MAC
or BPF_HDR_START_NET. This allows for a more flexible alternative
compared to LD_ABS/LD_IND with negative offset. It's enabled for
tc BPF programs as well as sock filter program types where it's
mainly useful in reuseport programs to ease access to lower header
data.

Reference: https://lists.iovisor.org/pipermail/iovisor-dev/2017-March/000698.htmlSigned-off-by: NDaniel Borkmann <daniel@iogearbox.net>
Acked-by: NAlexei Starovoitov <ast@kernel.org>
Signed-off-by: NAlexei Starovoitov <ast@kernel.org>
上级 e0cea7ce
...@@ -1802,6 +1802,30 @@ union bpf_attr { ...@@ -1802,6 +1802,30 @@ union bpf_attr {
* Return * Return
* a non-negative value equal to or less than size on success, or * a non-negative value equal to or less than size on success, or
* a negative error in case of failure. * a negative error in case of failure.
*
* int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
* from the packet associated to *skb*, into the buffer pointed
* by *to*. The difference to **bpf_skb_load_bytes**\ () is that
* a fifth argument *start_header* exists in order to select a
* base offset to start from. *start_header* can be one of:
*
* **BPF_HDR_START_MAC**
* Base offset to load data from is *skb*'s mac header.
* **BPF_HDR_START_NET**
* Base offset to load data from is *skb*'s network header.
*
* In general, "direct packet access" is the preferred method to
* access packet data, however, this helper is in particular useful
* in socket filters where *skb*\ **->data** does not always point
* to the start of the mac header and where "direct packet access"
* is not available.
*
* Return
* 0 on success, or a negative error in case of failure.
*
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -1871,7 +1895,8 @@ union bpf_attr { ...@@ -1871,7 +1895,8 @@ union bpf_attr {
FN(bind), \ FN(bind), \
FN(xdp_adjust_tail), \ FN(xdp_adjust_tail), \
FN(skb_get_xfrm_state), \ FN(skb_get_xfrm_state), \
FN(get_stack), FN(get_stack), \
FN(skb_load_bytes_relative),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
...@@ -1932,6 +1957,12 @@ enum bpf_adj_room_mode { ...@@ -1932,6 +1957,12 @@ enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET, BPF_ADJ_ROOM_NET,
}; };
/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
enum bpf_hdr_start_off {
BPF_HDR_START_MAC,
BPF_HDR_START_NET,
};
/* user accessible mirror of in-kernel sk_buff. /* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure * new fields can only be added to the end of this structure
*/ */
......
...@@ -1684,6 +1684,47 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = { ...@@ -1684,6 +1684,47 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
.arg4_type = ARG_CONST_SIZE, .arg4_type = ARG_CONST_SIZE,
}; };
BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
u32, offset, void *, to, u32, len, u32, start_header)
{
u8 *ptr;
if (unlikely(offset > 0xffff || len > skb_headlen(skb)))
goto err_clear;
switch (start_header) {
case BPF_HDR_START_MAC:
ptr = skb_mac_header(skb) + offset;
break;
case BPF_HDR_START_NET:
ptr = skb_network_header(skb) + offset;
break;
default:
goto err_clear;
}
if (likely(ptr >= skb_mac_header(skb) &&
ptr + len <= skb_tail_pointer(skb))) {
memcpy(to, ptr, len);
return 0;
}
err_clear:
memset(to, 0, len);
return -EFAULT;
}
static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
.func = bpf_skb_load_bytes_relative,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
.arg4_type = ARG_CONST_SIZE,
.arg5_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
{ {
/* Idea is the following: should the needed direct read/write /* Idea is the following: should the needed direct read/write
...@@ -4061,6 +4102,8 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -4061,6 +4102,8 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
switch (func_id) { switch (func_id) {
case BPF_FUNC_skb_load_bytes: case BPF_FUNC_skb_load_bytes:
return &bpf_skb_load_bytes_proto; return &bpf_skb_load_bytes_proto;
case BPF_FUNC_skb_load_bytes_relative:
return &bpf_skb_load_bytes_relative_proto;
case BPF_FUNC_get_socket_cookie: case BPF_FUNC_get_socket_cookie:
return &bpf_get_socket_cookie_proto; return &bpf_get_socket_cookie_proto;
case BPF_FUNC_get_socket_uid: case BPF_FUNC_get_socket_uid:
...@@ -4078,6 +4121,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -4078,6 +4121,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skb_store_bytes_proto; return &bpf_skb_store_bytes_proto;
case BPF_FUNC_skb_load_bytes: case BPF_FUNC_skb_load_bytes:
return &bpf_skb_load_bytes_proto; return &bpf_skb_load_bytes_proto;
case BPF_FUNC_skb_load_bytes_relative:
return &bpf_skb_load_bytes_relative_proto;
case BPF_FUNC_skb_pull_data: case BPF_FUNC_skb_pull_data:
return &bpf_skb_pull_data_proto; return &bpf_skb_pull_data_proto;
case BPF_FUNC_csum_diff: case BPF_FUNC_csum_diff:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册