提交 7026b912 编写于 作者: Y Yuchung Cheng 提交者: David S. Miller

tcp: fix undo on partial ack in recovery

Upon detecting spurious fast retransmit via timestamps during recovery,
use PRR to clock out new data packet instead of retransmission. Once
all retransmission are proven spurious, the sender then reverts the
cwnd reduction and congestion state to open or disorder.

The current code does the opposite: it undoes cwnd as soon as any
retransmission is spurious and continues to retransmit until all
data are acked. This nullifies the point to undo the cwnd because
the sender is still retransmistting spuriously. This patch fixes
it. The undo_ssthresh argument of tcp_undo_cwnd_reductiuon() is no
longer needed and is removed.
Signed-off-by: NYuchung Cheng <ycheng@google.com>
Acked-by: NNeal Cardwell <ncardwell@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 6a63df46
...@@ -2243,8 +2243,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) ...@@ -2243,8 +2243,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
#define DBGUNDO(x...) do { } while (0) #define DBGUNDO(x...) do { } while (0)
#endif #endif
static void tcp_undo_cwnd_reduction(struct sock *sk, const bool undo_ssthresh, static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
bool unmark_loss)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -2268,7 +2267,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, const bool undo_ssthresh, ...@@ -2268,7 +2267,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, const bool undo_ssthresh,
else else
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { if (tp->prior_ssthresh > tp->snd_ssthresh) {
tp->snd_ssthresh = tp->prior_ssthresh; tp->snd_ssthresh = tp->prior_ssthresh;
TCP_ECN_withdraw_cwr(tp); TCP_ECN_withdraw_cwr(tp);
} }
...@@ -2276,9 +2275,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, const bool undo_ssthresh, ...@@ -2276,9 +2275,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, const bool undo_ssthresh,
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
} }
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
tp->undo_marker = 0;
if (undo_ssthresh)
tp->undo_marker = 0;
} }
static inline bool tcp_may_undo(const struct tcp_sock *tp) static inline bool tcp_may_undo(const struct tcp_sock *tp)
...@@ -2298,7 +2295,7 @@ static bool tcp_try_undo_recovery(struct sock *sk) ...@@ -2298,7 +2295,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
* or our original transmission succeeded. * or our original transmission succeeded.
*/ */
DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
tcp_undo_cwnd_reduction(sk, true, false); tcp_undo_cwnd_reduction(sk, false);
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
mib_idx = LINUX_MIB_TCPLOSSUNDO; mib_idx = LINUX_MIB_TCPLOSSUNDO;
else else
...@@ -2324,7 +2321,7 @@ static void tcp_try_undo_dsack(struct sock *sk) ...@@ -2324,7 +2321,7 @@ static void tcp_try_undo_dsack(struct sock *sk)
if (tp->undo_marker && !tp->undo_retrans) { if (tp->undo_marker && !tp->undo_retrans) {
DBGUNDO(sk, "D-SACK"); DBGUNDO(sk, "D-SACK");
tcp_undo_cwnd_reduction(sk, true, false); tcp_undo_cwnd_reduction(sk, false);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
} }
} }
...@@ -2364,7 +2361,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) ...@@ -2364,7 +2361,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (frto_undo || tcp_may_undo(tp)) { if (frto_undo || tcp_may_undo(tp)) {
tcp_undo_cwnd_reduction(sk, true, true); tcp_undo_cwnd_reduction(sk, true);
DBGUNDO(sk, "partial loss"); DBGUNDO(sk, "partial loss");
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
...@@ -2644,32 +2641,37 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) ...@@ -2644,32 +2641,37 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
} }
/* Undo during fast recovery after partial ACK. */ /* Undo during fast recovery after partial ACK. */
static bool tcp_try_undo_partial(struct sock *sk, int acked) static bool tcp_try_undo_partial(struct sock *sk, const int acked,
const int prior_unsacked)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
/* Partial ACK arrived. Force Hoe's retransmit. */
bool failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering);
if (tcp_may_undo(tp)) { if (tp->undo_marker && tcp_packet_delayed(tp)) {
/* Plain luck! Hole if filled with delayed /* Plain luck! Hole if filled with delayed
* packet, rather than with a retransmit. * packet, rather than with a retransmit.
*/ */
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
/* We are getting evidence that the reordering degree is higher
* than we realized. If there are no retransmits out then we
* can undo. Otherwise we clock out new packets but do not
* mark more packets lost or retransmit more.
*/
if (tp->retrans_out) {
tcp_cwnd_reduction(sk, prior_unsacked, 0);
return true;
}
if (!tcp_any_retrans_done(sk)) if (!tcp_any_retrans_done(sk))
tp->retrans_stamp = 0; tp->retrans_stamp = 0;
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); DBGUNDO(sk, "partial recovery");
tcp_undo_cwnd_reduction(sk, true);
DBGUNDO(sk, "Hoe");
tcp_undo_cwnd_reduction(sk, false, false);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
tcp_try_keep_open(sk);
/* So... Do not make Hoe's retransmit yet. return true;
* If the first packet was delayed, the rest
* ones are most probably delayed as well.
*/
failed = false;
} }
return failed; return false;
} }
/* Process an event, which can update packets-in-flight not trivially. /* Process an event, which can update packets-in-flight not trivially.
...@@ -2742,8 +2744,13 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, ...@@ -2742,8 +2744,13 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp) && is_dupack) if (tcp_is_reno(tp) && is_dupack)
tcp_add_reno_sack(sk); tcp_add_reno_sack(sk);
} else } else {
do_lost = tcp_try_undo_partial(sk, acked); if (tcp_try_undo_partial(sk, acked, prior_unsacked))
return;
/* Partial ACK arrived. Force fast retransmit. */
do_lost = tcp_is_reno(tp) ||
tcp_fackets_out(tp) > tp->reordering;
}
break; break;
case TCP_CA_Loss: case TCP_CA_Loss:
tcp_process_loss(sk, flag, is_dupack); tcp_process_loss(sk, flag, is_dupack);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册