diff --git a/include/net/tcp.h b/include/net/tcp.h index 9a3ce379b994455a51fcd650ef95e9f1d823ac4a..2b5372ef2e0eb8d48a90f005abcff582a287662d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1877,6 +1877,7 @@ void tcp_v4_init(void); void tcp_init(void); /* tcp_recovery.c */ +void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb); void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced); extern void tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 076206873e3e15c43f583819bf0802493d6a2162..6fb0a28977a07d5b06678ef8e85ec2a6ed544304 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1929,7 +1929,6 @@ void tcp_enter_loss(struct sock *sk) struct sk_buff *skb; bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; bool is_reneg; /* is receiver reneging on SACKs? */ - bool mark_lost; /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || @@ -1945,9 +1944,6 @@ void tcp_enter_loss(struct sock *sk) tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_jiffies32; - tp->retrans_out = 0; - tp->lost_out = 0; - if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); @@ -1959,21 +1955,13 @@ void tcp_enter_loss(struct sock *sk) /* Mark SACK reneging until we recover from this loss event. */ tp->is_sack_reneg = 1; } - tcp_clear_all_retrans_hints(tp); - skb_rbtree_walk_from(skb) { - mark_lost = (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || - is_reneg); - if (mark_lost) - tcp_sum_lost(tp, skb); - TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; - if (mark_lost) { + if (is_reneg) TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; - TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; - tp->lost_out += tcp_skb_pcount(skb); - } + tcp_mark_skb_lost(sk, skb); } tcp_verify_left_out(tp); + tcp_clear_all_retrans_hints(tp); /* Timeout in disordered state after receiving substantial DUPACKs * suggests that the degree of reordering is over-estimated. diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index 299b0e38aa9adec75e5346fc34389e51808cc617..b2f9be388bf3372bee4ae9eeb9679840f17cea3d 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -2,7 +2,7 @@ #include #include -static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) +void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); @@ -95,7 +95,7 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) remaining = tp->rack.rtt_us + reo_wnd - tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp); if (remaining <= 0) { - tcp_rack_mark_skb_lost(sk, skb); + tcp_mark_skb_lost(sk, skb); list_del_init(&skb->tcp_tsorted_anchor); } else { /* Record maximum wait time */