tcp_recovery.c 5.2 KB
Newer Older
1 2 3
#include <linux/tcp.h>
#include <net/tcp.h>

4
int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
Y
Yuchung Cheng 已提交
5

6 7 8 9 10 11 12 13 14
static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
{
	struct tcp_sock *tp = tcp_sk(sk);

	tcp_skb_mark_lost_uncond_verify(tp, skb);
	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
		/* Account for retransmits that are lost again */
		TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
		tp->retrans_out -= tcp_skb_pcount(skb);
15 16
		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
			      tcp_skb_pcount(skb));
17 18 19
	}
}

20
static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
21
{
22
	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
23 24
}

25 26 27
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
 *
 * Marks a packet lost, if some packet sent later has been (s)acked.
Y
Yuchung Cheng 已提交
28 29 30 31 32 33 34 35 36 37 38 39
 * The underlying idea is similar to the traditional dupthresh and FACK
 * but they look at different metrics:
 *
 * dupthresh: 3 OOO packets delivered (packet count)
 * FACK: sequence delta to highest sacked sequence (sequence space)
 * RACK: sent time delta to the latest delivered packet (time domain)
 *
 * The advantage of RACK is it applies to both original and retransmitted
 * packet and therefore is robust against tail losses. Another advantage
 * is being more resilient to reordering by simply allowing some
 * "settling delay", instead of tweaking the dupthresh.
 *
40 41 42 43
 * When tcp_rack_detect_loss() detects some packets are lost and we
 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
 * make us enter the CA_Recovery state.
Y
Yuchung Cheng 已提交
44
 */
45
static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
Y
Yuchung Cheng 已提交
46 47
{
	struct tcp_sock *tp = tcp_sk(sk);
48
	struct sk_buff *skb, *n;
49
	u32 reo_wnd;
Y
Yuchung Cheng 已提交
50

51
	*reo_timeout = 0;
Y
Yuchung Cheng 已提交
52 53 54 55 56 57
	/* To be more reordering resilient, allow min_rtt/4 settling delay
	 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
	 * RTT because reordering is often a path property and less related
	 * to queuing or delayed ACKs.
	 */
	reo_wnd = 1000;
58
	if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
Y
Yuchung Cheng 已提交
59 60
		reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);

61 62
	list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
				 tcp_tsorted_anchor) {
Y
Yuchung Cheng 已提交
63 64
		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);

65
		if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
66
					tp->rack.end_seq, scb->end_seq)) {
67 68 69 70
			/* Step 3 in draft-cheng-tcpm-rack-00.txt:
			 * A packet is lost if its elapsed time is beyond
			 * the recent RTT plus the reordering window.
			 */
71 72
			u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
							 skb->skb_mstamp);
73 74 75
			s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;

			if (remaining < 0) {
76
				tcp_rack_mark_skb_lost(sk, skb);
77
				list_del_init(&skb->tcp_tsorted_anchor);
78
				continue;
79
			}
80 81 82 83 84 85 86 87

			/* Skip ones marked lost but not yet retransmitted */
			if ((scb->sacked & TCPCB_LOST) &&
			    !(scb->sacked & TCPCB_SACKED_RETRANS))
				continue;

			/* Record maximum wait time (+1 to avoid 0) */
			*reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
88
		} else {
Y
Yuchung Cheng 已提交
89 90 91
			break;
		}
	}
92 93
}

94
void tcp_rack_mark_lost(struct sock *sk)
95 96
{
	struct tcp_sock *tp = tcp_sk(sk);
97
	u32 timeout;
98

99
	if (!tp->rack.advanced)
100
		return;
101

102 103
	/* Reset the advanced flag to avoid unnecessary queue scanning */
	tp->rack.advanced = 0;
104
	tcp_rack_detect_loss(sk, &timeout);
105
	if (timeout) {
106
		timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
107 108 109
		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
					  timeout, inet_csk(sk)->icsk_rto);
	}
Y
Yuchung Cheng 已提交
110 111
}

112 113 114 115
/* Record the most recently (re)sent time among the (s)acked packets
 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
 * draft-cheng-tcpm-rack-00.txt
 */
116
void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
117
		      u64 xmit_time)
118
{
119 120
	u32 rtt_us;

121 122
	if (tp->rack.mstamp &&
	    !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
123
				 end_seq, tp->rack.end_seq))
124 125
		return;

126
	rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
127 128 129 130 131 132 133 134 135 136 137
	if (sacked & TCPCB_RETRANS) {
		/* If the sacked packet was retransmitted, it's ambiguous
		 * whether the retransmission or the original (or the prior
		 * retransmission) was sacked.
		 *
		 * If the original is lost, there is no ambiguity. Otherwise
		 * we assume the original can be delayed up to aRTT + min_rtt.
		 * the aRTT term is bounded by the fast recovery or timeout,
		 * so it's at least one RTT (i.e., retransmission is at least
		 * an RTT later).
		 */
138
		if (rtt_us < tcp_min_rtt(tp))
139 140
			return;
	}
141
	tp->rack.rtt_us = rtt_us;
142
	tp->rack.mstamp = xmit_time;
143
	tp->rack.end_seq = end_seq;
144 145
	tp->rack.advanced = 1;
}
146 147 148 149 150 151 152 153 154 155

/* We have waited long enough to accommodate reordering. Mark the expired
 * packets lost and retransmit them.
 */
void tcp_rack_reo_timeout(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);
	u32 timeout, prior_inflight;

	prior_inflight = tcp_packets_in_flight(tp);
156
	tcp_rack_detect_loss(sk, &timeout);
157 158 159 160 161 162 163 164 165 166 167
	if (prior_inflight != tcp_packets_in_flight(tp)) {
		if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
			tcp_enter_recovery(sk, false);
			if (!inet_csk(sk)->icsk_ca_ops->cong_control)
				tcp_cwnd_reduction(sk, 1, 0);
		}
		tcp_xmit_retransmit_queue(sk);
	}
	if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
		tcp_rearm_rto(sk);
}