提交 34a802a5 编写于 作者: A Alexander Duyck 提交者: David S. Miller

tcp: move stats merge to the end of tcp_try_coalesce

This change cleans up the last bits of tcp_try_coalesce so that we only
need one goto which jumps to the end of the function.  The idea is to make
the code more readable by putting things in a linear order so that we start
execution at the top of the function, and end it at the bottom.

I also made a slight tweak to the code for handling frags when we are a
clone.  Instead of making it an if (clone) loop else nr_frags = 0 I changed
the logic so that if (!clone) we just set the number of frags to 0 which
disables the for loop anyway.
Signed-off-by: NAlexander Duyck <alexander.h.duyck@intel.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Acked-by: NEric Dumazet <edumazet@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 57b55a7e
...@@ -4548,15 +4548,13 @@ static bool tcp_try_coalesce(struct sock *sk, ...@@ -4548,15 +4548,13 @@ static bool tcp_try_coalesce(struct sock *sk,
int i, delta, len = from->len; int i, delta, len = from->len;
*fragstolen = false; *fragstolen = false;
if (tcp_hdr(from)->fin || skb_cloned(to)) if (tcp_hdr(from)->fin || skb_cloned(to))
return false; return false;
if (len <= skb_tailroom(to)) { if (len <= skb_tailroom(to)) {
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
merge: goto merge;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
return true;
} }
if (skb_has_frag_list(to) || skb_has_frag_list(from)) if (skb_has_frag_list(to) || skb_has_frag_list(from))
...@@ -4581,7 +4579,6 @@ static bool tcp_try_coalesce(struct sock *sk, ...@@ -4581,7 +4579,6 @@ static bool tcp_try_coalesce(struct sock *sk,
skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
page, offset, skb_headlen(from)); page, offset, skb_headlen(from));
*fragstolen = true; *fragstolen = true;
goto copyfrags;
} else { } else {
if (skb_shinfo(to)->nr_frags + if (skb_shinfo(to)->nr_frags +
skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
...@@ -4589,27 +4586,33 @@ static bool tcp_try_coalesce(struct sock *sk, ...@@ -4589,27 +4586,33 @@ static bool tcp_try_coalesce(struct sock *sk,
delta = from->truesize - delta = from->truesize -
SKB_TRUESIZE(skb_end_pointer(from) - from->head); SKB_TRUESIZE(skb_end_pointer(from) - from->head);
copyfrags:
WARN_ON_ONCE(delta < len);
memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
skb_shinfo(from)->frags,
skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
if (skb_cloned(from))
for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
skb_frag_ref(from, i);
else
skb_shinfo(from)->nr_frags = 0;
to->truesize += delta;
atomic_add(delta, &sk->sk_rmem_alloc);
sk_mem_charge(sk, delta);
to->len += len;
to->data_len += len;
goto merge;
} }
return false;
WARN_ON_ONCE(delta < len);
memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
skb_shinfo(from)->frags,
skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
if (!skb_cloned(from))
skb_shinfo(from)->nr_frags = 0;
/* if the skb is cloned this does nothing since we set nr_frags to 0 */
for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
skb_frag_ref(from, i);
to->truesize += delta;
atomic_add(delta, &sk->sk_rmem_alloc);
sk_mem_charge(sk, delta);
to->len += len;
to->data_len += len;
merge:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
return true;
} }
static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册