提交 c194cf93 编写于 作者: A Alexander Duyck 提交者: David S. Miller

gro: Defer clearing of flush bit in tunnel paths

This patch updates the GRO handlers for GRE, VXLAN, GENEVE, and FOU so that
we do not clear the flush bit until after we have called the next level GRO
handler.  Previously this was being cleared before parsing through the list
of frames, however this resulted in several paths where either the bit
needed to be reset but wasn't as in the case of FOU, or cases where it was
being set as in GENEVE.  By just deferring the clearing of the bit until
after the next level protocol has been parsed we can avoid any unnecessary
bit twiddling and avoid bugs.
Signed-off-by: NAlexander Duyck <aduyck@mirantis.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 3a8befcd
...@@ -463,8 +463,6 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head, ...@@ -463,8 +463,6 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
goto out; goto out;
} }
flush = 0;
for (p = *head; p; p = p->next) { for (p = *head; p; p = p->next) {
if (!NAPI_GRO_CB(p)->same_flow) if (!NAPI_GRO_CB(p)->same_flow)
continue; continue;
...@@ -481,14 +479,13 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head, ...@@ -481,14 +479,13 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
rcu_read_lock(); rcu_read_lock();
ptype = gro_find_receive_by_type(type); ptype = gro_find_receive_by_type(type);
if (!ptype) { if (!ptype)
flush = 1;
goto out_unlock; goto out_unlock;
}
skb_gro_pull(skb, gh_len); skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = ptype->callbacks.gro_receive(head, skb); pp = ptype->callbacks.gro_receive(head, skb);
flush = 0;
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -591,8 +591,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, ...@@ -591,8 +591,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
flush = 0;
for (p = *head; p; p = p->next) { for (p = *head; p; p = p->next) {
if (!NAPI_GRO_CB(p)->same_flow) if (!NAPI_GRO_CB(p)->same_flow)
continue; continue;
...@@ -606,6 +604,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, ...@@ -606,6 +604,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
} }
pp = eth_gro_receive(head, skb); pp = eth_gro_receive(head, skb);
flush = 0;
out: out:
skb_gro_remcsum_cleanup(skb, &grc); skb_gro_remcsum_cleanup(skb, &grc);
......
...@@ -319,8 +319,6 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, ...@@ -319,8 +319,6 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, hdrlen); skb_gro_pull(skb, hdrlen);
flush = 0;
for (p = *head; p; p = p->next) { for (p = *head; p; p = p->next) {
const struct guehdr *guehdr2; const struct guehdr *guehdr2;
...@@ -352,6 +350,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, ...@@ -352,6 +350,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
goto out_unlock; goto out_unlock;
pp = ops->callbacks.gro_receive(head, skb); pp = ops->callbacks.gro_receive(head, skb);
flush = 0;
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -175,8 +175,6 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, ...@@ -175,8 +175,6 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
null_compute_pseudo); null_compute_pseudo);
} }
flush = 0;
for (p = *head; p; p = p->next) { for (p = *head; p; p = p->next) {
const struct gre_base_hdr *greh2; const struct gre_base_hdr *greh2;
...@@ -213,6 +211,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, ...@@ -213,6 +211,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, greh, grehlen); skb_gro_postpull_rcsum(skb, greh, grehlen);
pp = ptype->callbacks.gro_receive(head, skb); pp = ptype->callbacks.gro_receive(head, skb);
flush = 0;
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册