提交 c8123ead 编写于 作者: N Nitin Hande 提交者: Daniel Borkmann

bpf: Extend the sk_lookup() helper to XDP hookpoint.

This patch proposes to extend the sk_lookup() BPF API to the
XDP hookpoint. The sk_lookup() helper supports a lookup
on incoming packet to find the corresponding socket that will
receive this packet. Current support for this BPF API is
at the tc hookpoint. This patch will extend this API at XDP
hookpoint. A XDP program can map the incoming packet to the
5-tuple parameter and invoke the API to find the corresponding
socket structure.
Signed-off-by: NNitin Hande <Nitin.Hande@gmail.com>
Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
上级 bf598a8f
...@@ -2201,6 +2201,8 @@ union bpf_attr { ...@@ -2201,6 +2201,8 @@ union bpf_attr {
* **CONFIG_NET** configuration option. * **CONFIG_NET** configuration option.
* Return * Return
* Pointer to *struct bpf_sock*, or NULL in case of failure. * Pointer to *struct bpf_sock*, or NULL in case of failure.
* For sockets with reuseport option, *struct bpf_sock*
* return is from reuse->socks[] using hash of the packet.
* *
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
* Description * Description
...@@ -2233,6 +2235,8 @@ union bpf_attr { ...@@ -2233,6 +2235,8 @@ union bpf_attr {
* **CONFIG_NET** configuration option. * **CONFIG_NET** configuration option.
* Return * Return
* Pointer to *struct bpf_sock*, or NULL in case of failure. * Pointer to *struct bpf_sock*, or NULL in case of failure.
* For sockets with reuseport option, *struct bpf_sock*
* return is from reuse->socks[] using hash of the packet.
* *
* int bpf_sk_release(struct bpf_sock *sk) * int bpf_sk_release(struct bpf_sock *sk)
* Description * Description
......
...@@ -4845,38 +4845,32 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { ...@@ -4845,38 +4845,32 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
#ifdef CONFIG_INET #ifdef CONFIG_INET
static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
struct sk_buff *skb, u8 family, u8 proto) int dif, int sdif, u8 family, u8 proto)
{ {
bool refcounted = false; bool refcounted = false;
struct sock *sk = NULL; struct sock *sk = NULL;
int dif = 0;
if (skb->dev)
dif = skb->dev->ifindex;
if (family == AF_INET) { if (family == AF_INET) {
__be32 src4 = tuple->ipv4.saddr; __be32 src4 = tuple->ipv4.saddr;
__be32 dst4 = tuple->ipv4.daddr; __be32 dst4 = tuple->ipv4.daddr;
int sdif = inet_sdif(skb);
if (proto == IPPROTO_TCP) if (proto == IPPROTO_TCP)
sk = __inet_lookup(net, &tcp_hashinfo, skb, 0, sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
src4, tuple->ipv4.sport, src4, tuple->ipv4.sport,
dst4, tuple->ipv4.dport, dst4, tuple->ipv4.dport,
dif, sdif, &refcounted); dif, sdif, &refcounted);
else else
sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport, sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
dst4, tuple->ipv4.dport, dst4, tuple->ipv4.dport,
dif, sdif, &udp_table, skb); dif, sdif, &udp_table, NULL);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
} else { } else {
struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
u16 hnum = ntohs(tuple->ipv6.dport); u16 hnum = ntohs(tuple->ipv6.dport);
int sdif = inet6_sdif(skb);
if (proto == IPPROTO_TCP) if (proto == IPPROTO_TCP)
sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0, sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
src6, tuple->ipv6.sport, src6, tuple->ipv6.sport,
dst6, hnum, dst6, hnum,
dif, sdif, &refcounted); dif, sdif, &refcounted);
...@@ -4885,7 +4879,7 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, ...@@ -4885,7 +4879,7 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
src6, tuple->ipv6.sport, src6, tuple->ipv6.sport,
dst6, hnum, dst6, hnum,
dif, sdif, dif, sdif,
&udp_table, skb); &udp_table, NULL);
#endif #endif
} }
...@@ -4902,31 +4896,33 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, ...@@ -4902,31 +4896,33 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
* callers to satisfy BPF_CALL declarations. * callers to satisfy BPF_CALL declarations.
*/ */
static unsigned long static unsigned long
bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
u8 proto, u64 netns_id, u64 flags) struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
u64 flags)
{ {
struct net *caller_net;
struct sock *sk = NULL; struct sock *sk = NULL;
u8 family = AF_UNSPEC; u8 family = AF_UNSPEC;
struct net *net; struct net *net;
int sdif;
family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
goto out; goto out;
if (skb->dev) if (family == AF_INET)
caller_net = dev_net(skb->dev); sdif = inet_sdif(skb);
else else
caller_net = sock_net(skb->sk); sdif = inet6_sdif(skb);
if (netns_id) { if (netns_id) {
net = get_net_ns_by_id(caller_net, netns_id); net = get_net_ns_by_id(caller_net, netns_id);
if (unlikely(!net)) if (unlikely(!net))
goto out; goto out;
sk = sk_lookup(net, tuple, skb, family, proto); sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
put_net(net); put_net(net);
} else { } else {
net = caller_net; net = caller_net;
sk = sk_lookup(net, tuple, skb, family, proto); sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
} }
if (sk) if (sk)
...@@ -4935,6 +4931,25 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, ...@@ -4935,6 +4931,25 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
return (unsigned long) sk; return (unsigned long) sk;
} }
static unsigned long
bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
u8 proto, u64 netns_id, u64 flags)
{
struct net *caller_net;
int ifindex;
if (skb->dev) {
caller_net = dev_net(skb->dev);
ifindex = skb->dev->ifindex;
} else {
caller_net = sock_net(skb->sk);
ifindex = 0;
}
return __bpf_sk_lookup(skb, tuple, len, caller_net, ifindex,
proto, netns_id, flags);
}
BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ {
...@@ -4984,6 +4999,50 @@ static const struct bpf_func_proto bpf_sk_release_proto = { ...@@ -4984,6 +4999,50 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_SOCKET, .arg1_type = ARG_PTR_TO_SOCKET,
}; };
BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{
struct net *caller_net = dev_net(ctx->rxq->dev);
int ifindex = ctx->rxq->dev->ifindex;
return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
IPPROTO_UDP, netns_id, flags);
}
static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
.func = bpf_xdp_sk_lookup_udp,
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{
struct net *caller_net = dev_net(ctx->rxq->dev);
int ifindex = ctx->rxq->dev->ifindex;
return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
IPPROTO_TCP, netns_id, flags);
}
static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
.func = bpf_xdp_sk_lookup_tcp,
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
#endif /* CONFIG_INET */ #endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func) bool bpf_helper_changes_pkt_data(void *func)
...@@ -5234,6 +5293,14 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -5234,6 +5293,14 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_xdp_adjust_tail_proto; return &bpf_xdp_adjust_tail_proto;
case BPF_FUNC_fib_lookup: case BPF_FUNC_fib_lookup:
return &bpf_xdp_fib_lookup_proto; return &bpf_xdp_fib_lookup_proto;
#ifdef CONFIG_INET
case BPF_FUNC_sk_lookup_udp:
return &bpf_xdp_sk_lookup_udp_proto;
case BPF_FUNC_sk_lookup_tcp:
return &bpf_xdp_sk_lookup_tcp_proto;
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
#endif
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册