提交 3cfe3baa 编写于 作者: I Ilpo Järvinen 提交者: David S. Miller

[TCP]: Add two new spurious RTO responses to FRTO

New sysctl tcp_frto_response is added to select amongst these
responses:
	- Rate halving based; reuses CA_CWR state (default)
	- Very conservative; used to be the only one available (=1)
	- Undo cwr; undoes ssthresh and cwnd reductions (=2)

The response with rate halving requires a new parameter to
tcp_enter_cwr because FRTO has already reduced ssthresh and
doing a second reduction there has to be prevented. In addition,
to keep things nice on 80 cols screen, a local variable was
added.
Signed-off-by: NIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 c5e7af0d
...@@ -439,6 +439,7 @@ enum ...@@ -439,6 +439,7 @@ enum
NET_TCP_AVAIL_CONG_CONTROL=122, NET_TCP_AVAIL_CONG_CONTROL=122,
NET_TCP_ALLOWED_CONG_CONTROL=123, NET_TCP_ALLOWED_CONG_CONTROL=123,
NET_TCP_MAX_SSTHRESH=124, NET_TCP_MAX_SSTHRESH=124,
NET_TCP_FRTO_RESPONSE=125,
}; };
enum { enum {
......
...@@ -220,6 +220,7 @@ extern int sysctl_tcp_app_win; ...@@ -220,6 +220,7 @@ extern int sysctl_tcp_app_win;
extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto; extern int sysctl_tcp_frto;
extern int sysctl_tcp_frto_response;
extern int sysctl_tcp_low_latency; extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_dma_copybreak; extern int sysctl_tcp_dma_copybreak;
extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_nometrics_save;
...@@ -738,7 +739,7 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp) ...@@ -738,7 +739,7 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp)
tp->left_out = tp->sacked_out + tp->lost_out; tp->left_out = tp->sacked_out + tp->lost_out;
} }
extern void tcp_enter_cwr(struct sock *sk); extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
/* Slow start with delack produces 3 packets of burst, so that /* Slow start with delack produces 3 packets of burst, so that
......
...@@ -646,6 +646,14 @@ ctl_table ipv4_table[] = { ...@@ -646,6 +646,14 @@ ctl_table ipv4_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec .proc_handler = &proc_dointvec
}, },
{
.ctl_name = NET_TCP_FRTO_RESPONSE,
.procname = "tcp_frto_response",
.data = &sysctl_tcp_frto_response,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
},
{ {
.ctl_name = NET_TCP_LOW_LATENCY, .ctl_name = NET_TCP_LOW_LATENCY,
.procname = "tcp_low_latency", .procname = "tcp_low_latency",
......
...@@ -86,6 +86,7 @@ int sysctl_tcp_stdurg __read_mostly; ...@@ -86,6 +86,7 @@ int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_rfc1337 __read_mostly;
int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
int sysctl_tcp_frto __read_mostly; int sysctl_tcp_frto __read_mostly;
int sysctl_tcp_frto_response __read_mostly;
int sysctl_tcp_nometrics_save __read_mostly; int sysctl_tcp_nometrics_save __read_mostly;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
...@@ -762,15 +763,17 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) ...@@ -762,15 +763,17 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
} }
/* Set slow start threshold and cwnd not falling to slow start */ /* Set slow start threshold and cwnd not falling to slow start */
void tcp_enter_cwr(struct sock *sk) void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
tp->prior_ssthresh = 0; tp->prior_ssthresh = 0;
tp->bytes_acked = 0; tp->bytes_acked = 0;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
tp->undo_marker = 0; tp->undo_marker = 0;
tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); if (set_ssthresh)
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp) + 1U); tcp_packets_in_flight(tp) + 1U);
tp->snd_cwnd_cnt = 0; tp->snd_cwnd_cnt = 0;
...@@ -2003,7 +2006,7 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) ...@@ -2003,7 +2006,7 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
tp->retrans_stamp = 0; tp->retrans_stamp = 0;
if (flag&FLAG_ECE) if (flag&FLAG_ECE)
tcp_enter_cwr(sk); tcp_enter_cwr(sk, 1);
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
int state = TCP_CA_Open; int state = TCP_CA_Open;
...@@ -2579,6 +2582,21 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp) ...@@ -2579,6 +2582,21 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
tcp_moderate_cwnd(tp); tcp_moderate_cwnd(tp);
} }
/* A conservative spurious RTO response algorithm: reduce cwnd using
* rate halving and continue in congestion avoidance.
*/
static void tcp_ratehalving_spur_to_response(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_enter_cwr(sk, 0);
tp->high_seq = tp->frto_highmark; /* Smoother w/o this? - ij */
}
static void tcp_undo_spur_to_response(struct sock *sk)
{
tcp_undo_cwr(sk, 1);
}
/* F-RTO spurious RTO detection algorithm (RFC4138) /* F-RTO spurious RTO detection algorithm (RFC4138)
* *
* F-RTO affects during two new ACKs following RTO (well, almost, see inline * F-RTO affects during two new ACKs following RTO (well, almost, see inline
...@@ -2661,7 +2679,17 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag) ...@@ -2661,7 +2679,17 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
tp->frto_counter = 2; tp->frto_counter = 2;
return 1; return 1;
} else /* frto_counter == 2 */ { } else /* frto_counter == 2 */ {
tcp_conservative_spur_to_response(tp); switch (sysctl_tcp_frto_response) {
case 2:
tcp_undo_spur_to_response(sk);
break;
case 1:
tcp_conservative_spur_to_response(tp);
break;
default:
tcp_ratehalving_spur_to_response(sk);
break;
};
tp->frto_counter = 0; tp->frto_counter = 0;
} }
return 0; return 0;
......
...@@ -545,7 +545,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -545,7 +545,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (likely(err <= 0)) if (likely(err <= 0))
return err; return err;
tcp_enter_cwr(sk); tcp_enter_cwr(sk, 1);
return net_xmit_eval(err); return net_xmit_eval(err);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册