提交 5cf3d461 编写于 作者: D David Held 提交者: David S. Miller

udp: Simplify __udp*_lib_mcast_deliver.

Switch to using sk_nulls_for_each which shortens the code and makes it
easier to update.
Signed-off-by: NDavid Held <drheld@google.com>
Acked-by: NEric Dumazet <edumazet@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 3e1c0f0b
...@@ -594,26 +594,6 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, ...@@ -594,26 +594,6 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
return true; return true;
} }
static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(loc_port);
sk_nulls_for_each_from(sk, node) {
if (__udp_is_mcast_sock(net, sk,
loc_port, loc_addr,
rmt_port, rmt_addr,
dif, hnum))
goto found;
}
sk = NULL;
found:
return sk;
}
/* /*
* This routine is called by the ICMP module when it gets some * This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should * sort of error condition. If err < 0 then the socket should
...@@ -1667,24 +1647,24 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, ...@@ -1667,24 +1647,24 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct udp_table *udptable) struct udp_table *udptable)
{ {
struct sock *sk, *stack[256 / sizeof(struct sock *)]; struct sock *sk, *stack[256 / sizeof(struct sock *)];
struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); struct hlist_nulls_node *node;
int dif; unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
int dif = skb->dev->ifindex;
unsigned int i, count = 0; unsigned int i, count = 0;
spin_lock(&hslot->lock); spin_lock(&hslot->lock);
sk = sk_nulls_head(&hslot->head); sk_nulls_for_each(sk, node, &hslot->head) {
dif = skb->dev->ifindex; if (__udp_is_mcast_sock(net, sk,
sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); uh->dest, daddr,
while (sk) { uh->source, saddr,
stack[count++] = sk; dif, hnum)) {
sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
daddr, uh->source, saddr, dif);
if (unlikely(count == ARRAY_SIZE(stack))) { if (unlikely(count == ARRAY_SIZE(stack))) {
if (!sk)
break;
flush_stack(stack, count, skb, ~0); flush_stack(stack, count, skb, ~0);
count = 0; count = 0;
} }
stack[count++] = sk;
}
} }
/* /*
* before releasing chain lock, we must take a reference on sockets * before releasing chain lock, we must take a reference on sockets
......
...@@ -702,43 +702,26 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ...@@ -702,43 +702,26 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return -1; return -1;
} }
static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, const struct in6_addr *loc_addr, __be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr, __be16 rmt_port, const struct in6_addr *rmt_addr,
int dif) int dif, unsigned short hnum)
{ {
struct hlist_nulls_node *node;
unsigned short num = ntohs(loc_port);
sk_nulls_for_each_from(sk, node) {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net)) if (!net_eq(sock_net(sk), net))
continue; return false;
if (udp_sk(sk)->udp_port_hash == num && if (udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family == PF_INET6) { sk->sk_family != PF_INET6 ||
if (inet->inet_dport) { (inet->inet_dport && inet->inet_dport != rmt_port) ||
if (inet->inet_dport != rmt_port) (!ipv6_addr_any(&sk->sk_v6_daddr) &&
continue; !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
} (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
if (!ipv6_addr_any(&sk->sk_v6_daddr) && return false;
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
continue;
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
continue;
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
continue;
}
if (!inet6_mc_check(sk, loc_addr, rmt_addr)) if (!inet6_mc_check(sk, loc_addr, rmt_addr))
continue; return false;
return sk; return true;
}
}
return NULL;
} }
static void flush_stack(struct sock **stack, unsigned int count, static void flush_stack(struct sock **stack, unsigned int count,
...@@ -787,29 +770,28 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, ...@@ -787,29 +770,28 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
{ {
struct sock *sk, *stack[256 / sizeof(struct sock *)]; struct sock *sk, *stack[256 / sizeof(struct sock *)];
const struct udphdr *uh = udp_hdr(skb); const struct udphdr *uh = udp_hdr(skb);
struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); struct hlist_nulls_node *node;
int dif; unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
int dif = inet6_iif(skb);
unsigned int i, count = 0; unsigned int i, count = 0;
spin_lock(&hslot->lock); spin_lock(&hslot->lock);
sk = sk_nulls_head(&hslot->head); sk_nulls_for_each(sk, node, &hslot->head) {
dif = inet6_iif(skb); if (__udp_v6_is_mcast_sock(net, sk,
sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); uh->dest, daddr,
while (sk) { uh->source, saddr,
dif, hnum) &&
/* If zero checksum and no_check is not on for /* If zero checksum and no_check is not on for
* the socket then skip it. * the socket then skip it.
*/ */
if (uh->check || udp_sk(sk)->no_check6_rx) (uh->check || udp_sk(sk)->no_check6_rx)) {
stack[count++] = sk;
sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
uh->source, saddr, dif);
if (unlikely(count == ARRAY_SIZE(stack))) { if (unlikely(count == ARRAY_SIZE(stack))) {
if (!sk)
break;
flush_stack(stack, count, skb, ~0); flush_stack(stack, count, skb, ~0);
count = 0; count = 0;
} }
stack[count++] = sk;
}
} }
/* /*
* before releasing the lock, we must take reference on sockets * before releasing the lock, we must take reference on sockets
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册