提交 0e3e466b 编写于 作者: D David S. Miller

Merge branch 'udp-gso-cleanups'

Alexander Duyck says:

====================
UDP GSO Segmentation clean-ups

This patch set addresses a number of issues I found while sorting out
enabling UDP GSO Segmentation support for ixgbe/ixgbevf. Specifically there
were a number of issues related to the checksum and such that seemed to
cause either minor irregularities or kernel panics in the case of the
offload request being allowed to traverse between name spaces.

With this set applied I am was able to get UDP GSO traffic to pass over
vxlan tunnels in both offloaded modes and non-offloaded modes for ixgbe and
ixgbevf.

I submitted the driver specific patches earlier as an RFC:
https://patchwork.ozlabs.org/project/netdev/list/?series=42477&archive=both&state=*

v2: Updated patches based on feedback from Eric Dumazet
    Split first patch into several patches based on feedback from Eric
v3: Drop patch that was calling pskb_may_pull as it was redundant.
    Added code to use MANGLED_0 in case of UDP checksum
    Drop patch adding NETIF_F_GSO_UDP_L4 to list of GSO software offloads
    Added Acked-by for patches reviewed by Willem and not changed
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -175,8 +175,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, ...@@ -175,8 +175,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features, netdev_features_t features);
unsigned int mss, __sum16 check);
static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
{ {
......
...@@ -793,6 +793,8 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, ...@@ -793,6 +793,8 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
skb_shinfo(skb)->gso_size = cork->gso_size; skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
cork->gso_size);
goto csum_partial; goto csum_partial;
} }
......
...@@ -188,66 +188,92 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, ...@@ -188,66 +188,92 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
EXPORT_SYMBOL(skb_udp_tunnel_segment); EXPORT_SYMBOL(skb_udp_tunnel_segment);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features, netdev_features_t features)
unsigned int mss, __sum16 check)
{ {
struct sock *sk = gso_skb->sk; struct sock *sk = gso_skb->sk;
unsigned int sum_truesize = 0; unsigned int sum_truesize = 0;
struct sk_buff *segs, *seg; struct sk_buff *segs, *seg;
unsigned int hdrlen;
struct udphdr *uh; struct udphdr *uh;
unsigned int mss;
bool copy_dtor;
__sum16 check;
__be16 newlen;
mss = skb_shinfo(gso_skb)->gso_size;
if (gso_skb->len <= sizeof(*uh) + mss) if (gso_skb->len <= sizeof(*uh) + mss)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
hdrlen = gso_skb->data - skb_mac_header(gso_skb);
skb_pull(gso_skb, sizeof(*uh)); skb_pull(gso_skb, sizeof(*uh));
/* clear destructor to avoid skb_segment assigning it to tail */ /* clear destructor to avoid skb_segment assigning it to tail */
WARN_ON_ONCE(gso_skb->destructor != sock_wfree); copy_dtor = gso_skb->destructor == sock_wfree;
gso_skb->destructor = NULL; if (copy_dtor)
gso_skb->destructor = NULL;
segs = skb_segment(gso_skb, features); segs = skb_segment(gso_skb, features);
if (unlikely(IS_ERR_OR_NULL(segs))) { if (unlikely(IS_ERR_OR_NULL(segs))) {
gso_skb->destructor = sock_wfree; if (copy_dtor)
gso_skb->destructor = sock_wfree;
return segs; return segs;
} }
for (seg = segs; seg; seg = seg->next) { /* GSO partial and frag_list segmentation only requires splitting
uh = udp_hdr(seg); * the frame into an MSS multiple and possibly a remainder, both
uh->len = htons(seg->len - hdrlen); * cases return a GSO skb. So update the mss now.
uh->check = check; */
if (skb_is_gso(segs))
mss *= skb_shinfo(segs)->gso_segs;
seg = segs;
uh = udp_hdr(seg);
/* compute checksum adjustment based on old length versus new */
newlen = htons(sizeof(*uh) + mss);
check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
for (;;) {
if (copy_dtor) {
seg->destructor = sock_wfree;
seg->sk = sk;
sum_truesize += seg->truesize;
}
/* last packet can be partial gso_size */
if (!seg->next) if (!seg->next)
csum_replace2(&uh->check, htons(mss), break;
htons(seg->len - hdrlen - sizeof(*uh)));
uh->check = ~uh->check; uh->len = newlen;
seg->destructor = sock_wfree; uh->check = check;
seg->sk = sk;
sum_truesize += seg->truesize;
}
refcount_add(sum_truesize - gso_skb->truesize, &sk->sk_wmem_alloc); if (seg->ip_summed == CHECKSUM_PARTIAL)
gso_reset_checksum(seg, ~check);
else
uh->check = gso_make_checksum(seg, ~check) ? :
CSUM_MANGLED_0;
return segs; seg = seg->next;
} uh = udp_hdr(seg);
EXPORT_SYMBOL_GPL(__udp_gso_segment); }
static struct sk_buff *__udp4_gso_segment(struct sk_buff *gso_skb, /* last packet can be partial gso_size, account for that in checksum */
netdev_features_t features) newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) +
{ seg->data_len);
const struct iphdr *iph = ip_hdr(gso_skb); check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
unsigned int mss = skb_shinfo(gso_skb)->gso_size;
if (!can_checksum_protocol(features, htons(ETH_P_IP))) uh->len = newlen;
return ERR_PTR(-EIO); uh->check = check;
if (seg->ip_summed == CHECKSUM_PARTIAL)
gso_reset_checksum(seg, ~check);
else
uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
return __udp_gso_segment(gso_skb, features, mss, /* update refcount for the packet */
udp_v4_check(sizeof(struct udphdr) + mss, if (copy_dtor)
iph->saddr, iph->daddr, 0)); refcount_add(sum_truesize - gso_skb->truesize,
&sk->sk_wmem_alloc);
return segs;
} }
EXPORT_SYMBOL_GPL(__udp_gso_segment);
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
...@@ -272,7 +298,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, ...@@ -272,7 +298,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
goto out; goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
return __udp4_gso_segment(skb, features); return __udp_gso_segment(skb, features);
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss)) if (unlikely(skb->len <= mss))
......
...@@ -17,20 +17,6 @@ ...@@ -17,20 +17,6 @@
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include "ip6_offload.h" #include "ip6_offload.h"
static struct sk_buff *__udp6_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features)
{
const struct ipv6hdr *ip6h = ipv6_hdr(gso_skb);
unsigned int mss = skb_shinfo(gso_skb)->gso_size;
if (!can_checksum_protocol(features, htons(ETH_P_IPV6)))
return ERR_PTR(-EIO);
return __udp_gso_segment(gso_skb, features, mss,
udp_v6_check(sizeof(struct udphdr) + mss,
&ip6h->saddr, &ip6h->daddr, 0));
}
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -63,7 +49,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, ...@@ -63,7 +49,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
goto out; goto out;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
return __udp6_gso_segment(skb, features); return __udp_gso_segment(skb, features);
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments. * do checksum of UDP packets sent as multiple IP fragments.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册