提交 cee65b79 编写于 作者: L Liu Jian

net: add bpf_is_local_ipaddr bpf helper function

hulk inclusion
category: feature
bugzilla: NA
CVE: N/A

----------------------------------------------------

Some network acceleration solutions, such as sockmap, are valid only for
internal packets of the local host. The bpf_is_local_ipaddr() bpf helper
function is added so that the ebpf program can determine whether a packet
is an internal packet of the local host.
Signed-off-by: NLiu Jian <liujian56@huawei.com>
上级 9fb0c016
...@@ -3139,6 +3139,7 @@ CONFIG_DLCI_MAX=8 ...@@ -3139,6 +3139,7 @@ CONFIG_DLCI_MAX=8
CONFIG_USB4_NET=m CONFIG_USB4_NET=m
# CONFIG_NETDEVSIM is not set # CONFIG_NETDEVSIM is not set
CONFIG_NET_FAILOVER=m CONFIG_NET_FAILOVER=m
CONFIG_NET_LOCALIP_LST=m
# CONFIG_ISDN is not set # CONFIG_ISDN is not set
# #
......
...@@ -3216,6 +3216,7 @@ CONFIG_USB4_NET=m ...@@ -3216,6 +3216,7 @@ CONFIG_USB4_NET=m
CONFIG_HYPERV_NET=m CONFIG_HYPERV_NET=m
CONFIG_NETDEVSIM=m CONFIG_NETDEVSIM=m
CONFIG_NET_FAILOVER=m CONFIG_NET_FAILOVER=m
CONFIG_NET_LOCALIP_LST=m
CONFIG_ISDN=y CONFIG_ISDN=y
CONFIG_ISDN_CAPI=y CONFIG_ISDN_CAPI=y
CONFIG_CAPI_TRACE=y CONFIG_CAPI_TRACE=y
......
...@@ -592,4 +592,12 @@ config NET_FAILOVER ...@@ -592,4 +592,12 @@ config NET_FAILOVER
a VM with direct attached VF by failing over to the paravirtual a VM with direct attached VF by failing over to the paravirtual
datapath when the VF is unplugged. datapath when the VF is unplugged.
config NET_LOCALIP_LST
tristate "Collect local ipv4 address"
depends on INET
default n
help
Similar to inet_addr_lst, only the IP address is recorded, and
net_namespace is not concerned.
endif # NETDEVICES endif # NETDEVICES
...@@ -84,3 +84,4 @@ thunderbolt-net-y += thunderbolt.o ...@@ -84,3 +84,4 @@ thunderbolt-net-y += thunderbolt.o
obj-$(CONFIG_USB4_NET) += thunderbolt-net.o obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
obj-$(CONFIG_NETDEVSIM) += netdevsim/ obj-$(CONFIG_NETDEVSIM) += netdevsim/
obj-$(CONFIG_NET_FAILOVER) += net_failover.o obj-$(CONFIG_NET_FAILOVER) += net_failover.o
obj-$(CONFIG_NET_LOCALIP_LST) += localip/
# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for the linux kernel.
#
# Object file lists.
obj-$(CONFIG_NET_LOCALIP_LST) += localip.o
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2023 Huawei Technologies Co., Ltd
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/inetdevice.h>
#include <linux/spinlock.h>
#include <trace/events/net.h>
#define IN4_ADDR_HSIZE_SHIFT 8
#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
static struct hlist_head localip_lst[IN4_ADDR_HSIZE];
static DEFINE_SPINLOCK(localip_lock);
struct localipaddr {
struct hlist_node node;
struct rcu_head rcu;
__u32 ipaddr;
};
static u32 localip_hash(__be32 addr)
{
return hash_32(addr, IN4_ADDR_HSIZE_SHIFT);
}
static void localip_hash_insert(struct localipaddr *ip)
{
u32 hash = localip_hash(ip->ipaddr);
hlist_add_head_rcu(&ip->node, &localip_lst[hash]);
}
static void localip_hash_remove(struct localipaddr *ip)
{
hlist_del_init_rcu(&ip->node);
}
static int is_local_ipaddr(uint32_t ipaddr)
{
u32 hash = localip_hash(ipaddr);
struct localipaddr *localip;
rcu_read_lock();
hlist_for_each_entry_rcu(localip, &localip_lst[hash], node) {
if (localip->ipaddr == ipaddr) {
rcu_read_unlock();
return 1;
}
}
rcu_read_unlock();
return 0;
}
static int localip_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct in_ifaddr *ifa = ptr;
struct net_device *event_netdev = ifa->ifa_dev->dev;
struct localipaddr *localip;
u32 hash;
if (ipv4_is_loopback(ifa->ifa_local))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
pr_debug("UP, dev:%s, ip:0x%x, mask:0x%x\n", event_netdev->name,
ifa->ifa_local, ifa->ifa_mask);
localip = kzalloc(sizeof(struct localipaddr), GFP_KERNEL);
if (!localip) {
pr_err("kzalloc failed.\n");
break;
}
localip->ipaddr = ifa->ifa_local;
spin_lock(&localip_lock);
localip_hash_insert(localip);
spin_unlock(&localip_lock);
break;
case NETDEV_DOWN:
pr_debug("DOWN, dev:%s, ip:0x%x, mask:0x%x\n", event_netdev->name,
ifa->ifa_local, ifa->ifa_mask);
hash = localip_hash(ifa->ifa_local);
spin_lock(&localip_lock);
hlist_for_each_entry(localip, &localip_lst[hash], node) {
if (localip->ipaddr == ifa->ifa_local) {
localip_hash_remove(localip);
kfree_rcu(localip, rcu);
break;
}
}
spin_unlock(&localip_lock);
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block localip_notifier = {
.notifier_call = localip_event,
};
static void is_local_ipaddr_trace(void *data, int *ret, uint32_t ipaddr)
{
*ret = is_local_ipaddr(ipaddr);
}
static int localip_init(void)
{
int i, err;
for (i = 0; i < IN4_ADDR_HSIZE; i++)
INIT_HLIST_HEAD(&localip_lst[i]);
err = register_inetaddr_notifier(&localip_notifier);
if (err)
return err;
err = register_trace_is_local_ipaddr(is_local_ipaddr_trace, NULL);
if (err) {
pr_err("Failed to connet probe to is_local_ipaddr.\n");
unregister_inetaddr_notifier(&localip_notifier);
return err;
}
return 0;
}
static void localip_cleanup(void)
{
struct localipaddr *localip;
struct hlist_node *n;
int i;
unregister_trace_is_local_ipaddr(is_local_ipaddr_trace, NULL);
unregister_inetaddr_notifier(&localip_notifier);
spin_lock(&localip_lock);
for (i = 0; i < IN4_ADDR_HSIZE; i++) {
hlist_for_each_entry_safe(localip, n, &localip_lst[i], node) {
pr_debug("cleanup, hash:%i, ip:0x%x\n", i, localip->ipaddr);
localip_hash_remove(localip);
kfree_rcu(localip, rcu);
}
}
spin_unlock(&localip_lock);
synchronize_rcu();
}
module_init(localip_init);
module_exit(localip_cleanup);
MODULE_LICENSE("GPL");
...@@ -326,6 +326,10 @@ DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit, ...@@ -326,6 +326,10 @@ DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
TP_ARGS(ret) TP_ARGS(ret)
); );
DECLARE_TRACE(is_local_ipaddr,
TP_PROTO(int *ret, u32 ipaddr),
TP_ARGS(ret, ipaddr));
#endif /* _TRACE_NET_H */ #endif /* _TRACE_NET_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -3872,6 +3872,12 @@ union bpf_attr { ...@@ -3872,6 +3872,12 @@ union bpf_attr {
* check src_cpu whether share cache with dst_cpu. * check src_cpu whether share cache with dst_cpu.
* Return * Return
* yes 1, no 0. * yes 1, no 0.
*
* long bpf_is_local_ipaddr(u32 ipaddr)
* Description
* Check the ipaddr is local address or not.
* Return
* 1 is local address, 0 is not.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -4044,6 +4050,7 @@ union bpf_attr { ...@@ -4044,6 +4050,7 @@ union bpf_attr {
FN(sched_entity_to_tg), \ FN(sched_entity_to_tg), \
FN(cpumask_op), \ FN(cpumask_op), \
FN(cpus_share_cache), \ FN(cpus_share_cache), \
FN(is_local_ipaddr), \
/* */ /* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
...@@ -77,6 +77,7 @@ ...@@ -77,6 +77,7 @@
#include <net/transp_v6.h> #include <net/transp_v6.h>
#include <linux/btf_ids.h> #include <linux/btf_ids.h>
#include <net/tls.h> #include <net/tls.h>
#include <trace/events/net.h>
static const struct bpf_func_proto * static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id); bpf_sk_base_func_proto(enum bpf_func_id func_id);
...@@ -5084,6 +5085,21 @@ static const struct bpf_func_proto bpf_sk_original_addr_proto = { ...@@ -5084,6 +5085,21 @@ static const struct bpf_func_proto bpf_sk_original_addr_proto = {
.arg4_type = ARG_CONST_SIZE, .arg4_type = ARG_CONST_SIZE,
}; };
BPF_CALL_1(bpf_is_local_ipaddr, uint32_t, ipaddr)
{
int ret = 0;
trace_is_local_ipaddr(&ret, ipaddr);
return ret;
}
static const struct bpf_func_proto bpf_is_local_ipaddr_proto = {
.func = bpf_is_local_ipaddr,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
};
BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx, BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx,
int, level, int, optname, char *, optval, int, optlen) int, level, int, optname, char *, optval, int, optlen)
{ {
...@@ -7398,6 +7414,8 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -7398,6 +7414,8 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_tcp_sock: case BPF_FUNC_tcp_sock:
return &bpf_tcp_sock_proto; return &bpf_tcp_sock_proto;
#endif /* CONFIG_INET */ #endif /* CONFIG_INET */
case BPF_FUNC_is_local_ipaddr:
return &bpf_is_local_ipaddr_proto;
default: default:
return bpf_sk_base_func_proto(func_id); return bpf_sk_base_func_proto(func_id);
} }
......
...@@ -60,3 +60,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb); ...@@ -60,3 +60,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll); EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_send_reset); EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_send_reset);
EXPORT_TRACEPOINT_SYMBOL_GPL(is_local_ipaddr);
...@@ -3872,6 +3872,12 @@ union bpf_attr { ...@@ -3872,6 +3872,12 @@ union bpf_attr {
* check src_cpu whether share cache with dst_cpu. * check src_cpu whether share cache with dst_cpu.
* Return * Return
* true yes, false no. * true yes, false no.
*
* long bpf_is_local_ipaddr(u32 ipaddr)
* Description
* Check the ipaddr is local address or not.
* Return
* 1 is local address, 0 is not.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -4044,6 +4050,7 @@ union bpf_attr { ...@@ -4044,6 +4050,7 @@ union bpf_attr {
FN(sched_entity_to_tg), \ FN(sched_entity_to_tg), \
FN(cpumask_op), \ FN(cpumask_op), \
FN(cpus_share_cache), \ FN(cpus_share_cache), \
FN(is_local_ipaddr), \
/* */ /* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册