提交 aa93466b 编写于 作者: D David S. Miller

[TCP]: Eliminate redundant computations in tcp_write_xmit().

tcp_snd_test() is run for every packet output by a single
call to tcp_write_xmit(), but this is not necessary.

For one, the congestion window space needs to only be
calculated one time, then used throughout the duration
of the loop.

This cleanup also makes experimenting with different TSO
packetization schemes much easier.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 7f4dd0a9
...@@ -887,6 +887,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) ...@@ -887,6 +887,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
unsigned int tso_segs, cwnd_quota;
int sent_pkts; int sent_pkts;
/* If we are closed, the bytes will have to remain here. /* If we are closed, the bytes will have to remain here.
...@@ -896,19 +897,31 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) ...@@ -896,19 +897,31 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
if (unlikely(sk->sk_state == TCP_CLOSE)) if (unlikely(sk->sk_state == TCP_CLOSE))
return 0; return 0;
skb = sk->sk_send_head;
if (unlikely(!skb))
return 0;
tso_segs = tcp_init_tso_segs(sk, skb);
cwnd_quota = tcp_cwnd_test(tp, skb);
sent_pkts = 0; sent_pkts = 0;
while ((skb = sk->sk_send_head) &&
tcp_snd_test(sk, skb, mss_now, while (cwnd_quota >= tso_segs) {
tcp_skb_is_last(sk, skb) ? nonagle : if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
TCP_NAGLE_PUSH)) { (tcp_skb_is_last(sk, skb) ?
if (skb->len > mss_now) { nonagle : TCP_NAGLE_PUSH))))
if (tcp_fragment(sk, skb, mss_now)) break;
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
break;
if (unlikely(skb->len > mss_now)) {
if (unlikely(tcp_fragment(sk, skb, mss_now)))
break; break;
} }
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
tcp_tso_set_push(skb); tcp_tso_set_push(skb);
if (tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))) if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))))
break; break;
/* Advance the send_head. This one is sent out. /* Advance the send_head. This one is sent out.
...@@ -917,10 +930,19 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) ...@@ -917,10 +930,19 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
update_send_head(sk, tp, skb); update_send_head(sk, tp, skb);
tcp_minshall_update(tp, mss_now, skb); tcp_minshall_update(tp, mss_now, skb);
sent_pkts = 1; sent_pkts++;
/* Do not optimize this to use tso_segs. If we chopped up
* the packet above, tso_segs will no longer be valid.
*/
cwnd_quota -= tcp_skb_pcount(skb);
skb = sk->sk_send_head;
if (!skb)
break;
tso_segs = tcp_init_tso_segs(sk, skb);
} }
if (sent_pkts) { if (likely(sent_pkts)) {
tcp_cwnd_validate(sk, tp); tcp_cwnd_validate(sk, tp);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册