提交 6e08d5e3 编写于 作者: Y Yuchung Cheng 提交者: David S. Miller

tcp: fix false undo corner cases

The undo code assumes that, upon entering loss recovery, TCP
1) always retransmit something
2) the retransmission never fails locally (e.g., qdisc drop)

so undo_marker is set in tcp_enter_recovery() and undo_retrans is
incremented only when tcp_retransmit_skb() is successful.

When the assumption is broken because TCP's cwnd is too small to
retransmit or the retransmit fails locally. The next (DUP)ACK
would incorrectly revert the cwnd and the congestion state in
tcp_try_undo_dsack() or tcp_may_undo(). Subsequent (DUP)ACKs
may enter the recovery state. The sender repeatedly enter and
(incorrectly) exit recovery states if the retransmits continue to
fail locally while receiving (DUP)ACKs.

The fix is to initialize undo_retrans to -1 and start counting on
the first retransmission. Always increment undo_retrans even if the
retransmissions fail locally because they couldn't cause DSACKs to
undo the cwnd reduction.
Signed-off-by: NYuchung Cheng <ycheng@google.com>
Signed-off-by: NNeal Cardwell <ncardwell@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 e326f2f1
...@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, ...@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
} }
/* D-SACK for already forgotten data... Do dumb counting. */ /* D-SACK for already forgotten data... Do dumb counting. */
if (dup_sack && tp->undo_marker && tp->undo_retrans && if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
!after(end_seq_0, prior_snd_una) && !after(end_seq_0, prior_snd_una) &&
after(end_seq_0, tp->undo_marker)) after(end_seq_0, tp->undo_marker))
tp->undo_retrans--; tp->undo_retrans--;
...@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk, ...@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
/* Account D-SACK for retransmitted packet. */ /* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) { if (dup_sack && (sacked & TCPCB_RETRANS)) {
if (tp->undo_marker && tp->undo_retrans && if (tp->undo_marker && tp->undo_retrans > 0 &&
after(end_seq, tp->undo_marker)) after(end_seq, tp->undo_marker))
tp->undo_retrans--; tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED) if (sacked & TCPCB_SACKED_ACKED)
...@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp) ...@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
tp->lost_out = 0; tp->lost_out = 0;
tp->undo_marker = 0; tp->undo_marker = 0;
tp->undo_retrans = 0; tp->undo_retrans = -1;
} }
void tcp_clear_retrans(struct tcp_sock *tp) void tcp_clear_retrans(struct tcp_sock *tp)
...@@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) ...@@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
tp->prior_ssthresh = 0; tp->prior_ssthresh = 0;
tp->undo_marker = tp->snd_una; tp->undo_marker = tp->snd_una;
tp->undo_retrans = tp->retrans_out; tp->undo_retrans = tp->retrans_out ? : -1;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
if (!ece_ack) if (!ece_ack)
......
...@@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) ...@@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (!tp->retrans_stamp) if (!tp->retrans_stamp)
tp->retrans_stamp = TCP_SKB_CB(skb)->when; tp->retrans_stamp = TCP_SKB_CB(skb)->when;
tp->undo_retrans += tcp_skb_pcount(skb);
/* snd_nxt is stored to detect loss of retransmitted segment, /* snd_nxt is stored to detect loss of retransmitted segment,
* see tcp_input.c tcp_sacktag_write_queue(). * see tcp_input.c tcp_sacktag_write_queue().
*/ */
...@@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) ...@@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
} else if (err != -EBUSY) { } else if (err != -EBUSY) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
} }
if (tp->undo_retrans < 0)
tp->undo_retrans = 0;
tp->undo_retrans += tcp_skb_pcount(skb);
return err; return err;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册