tcp_timer.c 18.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
8
 * Authors:	Ross Biro
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Florian La Roche, <flla@stud.uni-sb.de>
 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 *		Jorge Cwik, <jorge@laser.satlink.net>
 */

#include <linux/module.h>
22
#include <linux/gfp.h>
L
Linus Torvalds 已提交
23 24
#include <net/tcp.h>

25
int sysctl_tcp_orphan_retries __read_mostly;
A
Andreas Petlund 已提交
26
int sysctl_tcp_thin_linear_timeouts __read_mostly;
L
Linus Torvalds 已提交
27 28 29 30 31 32 33

static void tcp_write_err(struct sock *sk)
{
	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
	sk->sk_error_report(sk);

	tcp_done(sk);
34
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
L
Linus Torvalds 已提交
35 36 37 38 39 40 41
}

/* Do not allow orphaned sockets to eat all our resources.
 * This is direct violation of TCP specs, but it is required
 * to prevent DoS attacks. It is called when a retransmission timeout
 * or zero probe timeout occurs on orphaned socket.
 *
S
Stephen Hemminger 已提交
42
 * Criteria is still not confirmed experimentally and may change.
L
Linus Torvalds 已提交
43 44 45 46 47
 * We kill the socket, if:
 * 1. If number of orphaned sockets exceeds an administratively configured
 *    limit.
 * 2. If we have strong memory pressure.
 */
48
static int tcp_out_of_resources(struct sock *sk, bool do_reset)
L
Linus Torvalds 已提交
49 50
{
	struct tcp_sock *tp = tcp_sk(sk);
51
	int shift = 0;
L
Linus Torvalds 已提交
52

53
	/* If peer does not open window for long time, or did not transmit
L
Linus Torvalds 已提交
54 55
	 * anything for long time, penalize it. */
	if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
56
		shift++;
L
Linus Torvalds 已提交
57 58 59

	/* If some dubious ICMP arrived, penalize even more. */
	if (sk->sk_err_soft)
60
		shift++;
L
Linus Torvalds 已提交
61

A
Arun Sharma 已提交
62
	if (tcp_check_oom(sk, shift)) {
L
Linus Torvalds 已提交
63 64 65 66 67
		/* Catch exceptional cases, when connection requires reset.
		 *      1. Last segment was sent recently. */
		if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
		    /*  2. Window is closed. */
		    (!tp->snd_wnd && !tp->packets_out))
68
			do_reset = true;
L
Linus Torvalds 已提交
69 70 71
		if (do_reset)
			tcp_send_active_reset(sk, GFP_ATOMIC);
		tcp_done(sk);
72
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
L
Linus Torvalds 已提交
73 74 75 76 77 78
		return 1;
	}
	return 0;
}

/* Calculate maximal number or retries on an orphaned socket. */
79
static int tcp_orphan_retries(struct sock *sk, bool alive)
L
Linus Torvalds 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
{
	int retries = sysctl_tcp_orphan_retries; /* May be zero. */

	/* We know from an ICMP that something is wrong. */
	if (sk->sk_err_soft && !alive)
		retries = 0;

	/* However, if socket sent something recently, select some safe
	 * number of retries. 8 corresponds to >100 seconds with minimal
	 * RTO of 200msec. */
	if (retries == 0 && alive)
		retries = 8;
	return retries;
}

95 96
static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
{
F
Fan Du 已提交
97 98
	struct net *net = sock_net(sk);

99
	/* Black hole detection */
F
Fan Du 已提交
100
	if (net->ipv4.sysctl_tcp_mtu_probing) {
101 102
		if (!icsk->icsk_mtup.enabled) {
			icsk->icsk_mtup.enabled = 1;
103
			icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
104 105
			tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
		} else {
F
Fan Du 已提交
106
			struct net *net = sock_net(sk);
107
			struct tcp_sock *tp = tcp_sk(sk);
108 109
			int mss;

110
			mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
F
Fan Du 已提交
111
			mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
112 113 114 115 116 117 118
			mss = max(mss, 68 - tp->tcp_header_len);
			icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
			tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
		}
	}
}

119
/* This function calculates a "timeout" which is equivalent to the timeout of a
D
Daniel Mack 已提交
120
 * TCP connection after "boundary" unsuccessful, exponentially backed-off
121 122
 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
 * syn_set flag is set.
123 124
 */
static bool retransmits_timed_out(struct sock *sk,
125
				  unsigned int boundary,
126
				  unsigned int timeout,
127
				  bool syn_set)
128
{
129
	unsigned int linear_backoff_thresh, start_ts;
130
	unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
131 132 133 134

	if (!inet_csk(sk)->icsk_retransmits)
		return false;

E
Eric Dumazet 已提交
135 136 137
	start_ts = tcp_sk(sk)->retrans_stamp;
	if (unlikely(!start_ts))
		start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
138

139
	if (likely(timeout == 0)) {
140
		linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
141

142
		if (boundary <= linear_backoff_thresh)
143
			timeout = ((2 << boundary) - 1) * rto_base;
144
		else
145
			timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
146 147
				(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
	}
148 149 150
	return (tcp_time_stamp - start_ts) >= timeout;
}

L
Linus Torvalds 已提交
151 152 153
/* A write timeout has occurred. Process the after effects. */
static int tcp_write_timeout(struct sock *sk)
{
J
John Heffner 已提交
154
	struct inet_connection_sock *icsk = inet_csk(sk);
155
	struct tcp_sock *tp = tcp_sk(sk);
156
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
157
	int retry_until;
158
	bool do_reset, syn_set = false;
L
Linus Torvalds 已提交
159 160

	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
161
		if (icsk->icsk_retransmits) {
E
Eric Dumazet 已提交
162
			dst_negative_advice(sk);
163
			if (tp->syn_fastopen || tp->syn_data)
164
				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
165
			if (tp->syn_data && icsk->icsk_retransmits == 1)
166 167
				NET_INC_STATS_BH(sock_net(sk),
						 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
168
		}
169
		retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
170
		syn_set = true;
L
Linus Torvalds 已提交
171
	} else {
172
		if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) {
173 174 175 176 177 178 179 180
			/* Some middle-boxes may black-hole Fast Open _after_
			 * the handshake. Therefore we conservatively disable
			 * Fast Open on this path on recurring timeouts with
			 * few or zero bytes acked after Fast Open.
			 */
			if (tp->syn_data_acked &&
			    tp->bytes_acked <= tp->rx_opt.mss_clamp) {
				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
181
				if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
182 183 184
					NET_INC_STATS_BH(sock_net(sk),
							 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
			}
J
John Heffner 已提交
185
			/* Black hole detection */
186
			tcp_mtu_probing(icsk, sk);
L
Linus Torvalds 已提交
187

E
Eric Dumazet 已提交
188
			dst_negative_advice(sk);
L
Linus Torvalds 已提交
189 190
		}

191
		retry_until = net->ipv4.sysctl_tcp_retries2;
L
Linus Torvalds 已提交
192
		if (sock_flag(sk, SOCK_DEAD)) {
193
			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
194

L
Linus Torvalds 已提交
195
			retry_until = tcp_orphan_retries(sk, alive);
196
			do_reset = alive ||
197
				!retransmits_timed_out(sk, retry_until, 0, 0);
L
Linus Torvalds 已提交
198

199
			if (tcp_out_of_resources(sk, do_reset))
L
Linus Torvalds 已提交
200 201 202 203
				return 1;
		}
	}

204
	if (retransmits_timed_out(sk, retry_until,
205
				  syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
L
Linus Torvalds 已提交
206 207 208 209 210 211 212
		/* Has it gone just too far? */
		tcp_write_err(sk);
		return 1;
	}
	return 0;
}

213
void tcp_delack_timer_handler(struct sock *sk)
L
Linus Torvalds 已提交
214 215
{
	struct tcp_sock *tp = tcp_sk(sk);
216
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
217

218
	sk_mem_reclaim_partial(sk);
L
Linus Torvalds 已提交
219

220
	if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
L
Linus Torvalds 已提交
221 222
		goto out;

223 224
	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
L
Linus Torvalds 已提交
225 226
		goto out;
	}
227
	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
L
Linus Torvalds 已提交
228

229
	if (!skb_queue_empty(&tp->ucopy.prequeue)) {
L
Linus Torvalds 已提交
230 231
		struct sk_buff *skb;

232
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
L
Linus Torvalds 已提交
233 234

		while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
P
Peter Zijlstra 已提交
235
			sk_backlog_rcv(sk, skb);
L
Linus Torvalds 已提交
236 237 238 239

		tp->ucopy.memory = 0;
	}

240 241
	if (inet_csk_ack_scheduled(sk)) {
		if (!icsk->icsk_ack.pingpong) {
L
Linus Torvalds 已提交
242
			/* Delayed ACK missed: inflate ATO. */
243
			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
L
Linus Torvalds 已提交
244 245 246 247
		} else {
			/* Delayed ACK missed: leave pingpong mode and
			 * deflate ATO.
			 */
248 249
			icsk->icsk_ack.pingpong = 0;
			icsk->icsk_ack.ato      = TCP_ATO_MIN;
L
Linus Torvalds 已提交
250 251
		}
		tcp_send_ack(sk);
252
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
L
Linus Torvalds 已提交
253 254 255
	}

out:
256
	if (tcp_under_memory_pressure(sk))
257
		sk_mem_reclaim(sk);
258 259 260 261 262 263 264 265 266 267 268 269 270
}

static void tcp_delack_timer(unsigned long data)
{
	struct sock *sk = (struct sock *)data;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
		tcp_delack_timer_handler(sk);
	} else {
		inet_csk(sk)->icsk_ack.blocked = 1;
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
		/* deleguate our work to tcp_release_cb() */
271 272
		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
			sock_hold(sk);
273
	}
L
Linus Torvalds 已提交
274 275 276 277 278 279
	bh_unlock_sock(sk);
	sock_put(sk);
}

static void tcp_probe_timer(struct sock *sk)
{
280
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
281 282
	struct tcp_sock *tp = tcp_sk(sk);
	int max_probes;
283
	u32 start_ts;
L
Linus Torvalds 已提交
284

285
	if (tp->packets_out || !tcp_send_head(sk)) {
286
		icsk->icsk_probes_out = 0;
L
Linus Torvalds 已提交
287 288 289
		return;
	}

290 291 292 293 294 295 296
	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
	 * long as the receiver continues to respond probes. We support this by
	 * default and reset icsk_probes_out with incoming ACKs. But if the
	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
	 * kill the socket when the retry count and the time exceeds the
	 * corresponding system limit. We also implement similar policy when
	 * we use RTO to probe window in tcp_retransmit_timer().
L
Linus Torvalds 已提交
297
	 */
298 299 300 301 302 303
	start_ts = tcp_skb_timestamp(tcp_send_head(sk));
	if (!start_ts)
		skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
	else if (icsk->icsk_user_timeout &&
		 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
		goto abort;
L
Linus Torvalds 已提交
304

305
	max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
L
Linus Torvalds 已提交
306
	if (sock_flag(sk, SOCK_DEAD)) {
307
		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
308

L
Linus Torvalds 已提交
309
		max_probes = tcp_orphan_retries(sk, alive);
310 311 312
		if (!alive && icsk->icsk_backoff >= max_probes)
			goto abort;
		if (tcp_out_of_resources(sk, true))
L
Linus Torvalds 已提交
313 314 315
			return;
	}

316
	if (icsk->icsk_probes_out > max_probes) {
317
abort:		tcp_write_err(sk);
L
Linus Torvalds 已提交
318 319 320 321 322 323
	} else {
		/* Only send another probe if we didn't close things up. */
		tcp_send_probe0(sk);
	}
}

324 325 326 327 328 329 330 331
/*
 *	Timer for Fast Open socket to retransmit SYNACK. Note that the
 *	sk here is the child socket, not the parent (listener) socket.
 */
static void tcp_fastopen_synack_timer(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	int max_retries = icsk->icsk_syn_retries ? :
332
	    sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
333 334 335
	struct request_sock *req;

	req = tcp_sk(sk)->fastopen_rsk;
336
	req->rsk_ops->syn_ack_timeout(req);
337

338
	if (req->num_timeout >= max_retries) {
339 340 341 342 343 344 345 346
		tcp_write_err(sk);
		return;
	}
	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
	 * returned from rtx_syn_ack() to make it more persistent like
	 * regular retransmit because if the child socket has been accepted
	 * it's not good to give up too easily.
	 */
347 348
	inet_rtx_syn_ack(sk, req);
	req->num_timeout++;
349
	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
350
			  TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
351 352
}

L
Linus Torvalds 已提交
353 354 355 356
/*
 *	The TCP retransmit timer.
 */

357
void tcp_retransmit_timer(struct sock *sk)
L
Linus Torvalds 已提交
358 359
{
	struct tcp_sock *tp = tcp_sk(sk);
360
	struct net *net = sock_net(sk);
361
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
362

363
	if (tp->fastopen_rsk) {
364 365
		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
			     sk->sk_state != TCP_FIN_WAIT1);
366 367 368 369 370 371
		tcp_fastopen_synack_timer(sk);
		/* Before we receive ACK to our SYN-ACK don't retransmit
		 * anything else (e.g., data or FIN segments).
		 */
		return;
	}
L
Linus Torvalds 已提交
372 373 374
	if (!tp->packets_out)
		goto out;

375
	WARN_ON(tcp_write_queue_empty(sk));
L
Linus Torvalds 已提交
376

N
Nandita Dukkipati 已提交
377 378
	tp->tlp_high_seq = 0;

L
Linus Torvalds 已提交
379 380 381 382 383 384 385
	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
		/* Receiver dastardly shrinks window. Our retransmits
		 * become zero probes, but we should not timeout this
		 * connection. If the socket is an orphan, time it out,
		 * we cannot allow such beasts to hang infinitely.
		 */
386 387
		struct inet_sock *inet = inet_sk(sk);
		if (sk->sk_family == AF_INET) {
388 389 390 391 392
			net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
					    &inet->inet_daddr,
					    ntohs(inet->inet_dport),
					    inet->inet_num,
					    tp->snd_una, tp->snd_nxt);
L
Linus Torvalds 已提交
393
		}
E
Eric Dumazet 已提交
394
#if IS_ENABLED(CONFIG_IPV6)
395
		else if (sk->sk_family == AF_INET6) {
396 397 398 399 400
			net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
					    &sk->sk_v6_daddr,
					    ntohs(inet->inet_dport),
					    inet->inet_num,
					    tp->snd_una, tp->snd_nxt);
401
		}
L
Linus Torvalds 已提交
402 403 404 405 406
#endif
		if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
			tcp_write_err(sk);
			goto out;
		}
407
		tcp_enter_loss(sk);
408
		tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
L
Linus Torvalds 已提交
409 410 411 412 413 414 415
		__sk_dst_reset(sk);
		goto out_reset_timer;
	}

	if (tcp_write_timeout(sk))
		goto out;

416
	if (icsk->icsk_retransmits == 0) {
417 418
		int mib_idx;

419
		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
420 421 422 423
			if (tcp_is_sack(tp))
				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
			else
				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
424
		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
425
			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
426 427 428 429 430 431
		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
			   tp->sacked_out) {
			if (tcp_is_sack(tp))
				mib_idx = LINUX_MIB_TCPSACKFAILURES;
			else
				mib_idx = LINUX_MIB_TCPRENOFAILURES;
L
Linus Torvalds 已提交
432
		} else {
433
			mib_idx = LINUX_MIB_TCPTIMEOUTS;
L
Linus Torvalds 已提交
434
		}
435
		NET_INC_STATS_BH(sock_net(sk), mib_idx);
L
Linus Torvalds 已提交
436 437
	}

438
	tcp_enter_loss(sk);
L
Linus Torvalds 已提交
439

440
	if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
L
Linus Torvalds 已提交
441 442 443
		/* Retransmission failed because of local congestion,
		 * do not backoff.
		 */
444 445 446
		if (!icsk->icsk_retransmits)
			icsk->icsk_retransmits = 1;
		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
447 448
					  min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
					  TCP_RTO_MAX);
L
Linus Torvalds 已提交
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
		goto out;
	}

	/* Increase the timeout each time we retransmit.  Note that
	 * we do not increase the rtt estimate.  rto is initialized
	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
	 * that doubling rto each time is the least we can get away with.
	 * In KA9Q, Karn uses this for the first few times, and then
	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
	 * defined in the protocol as the maximum possible RTT.  I guess
	 * we'll have to use something other than TCP to talk to the
	 * University of Mars.
	 *
	 * PAWS allows us longer timeouts and large windows, so once
	 * implemented ftp to mars will work nicely. We will have to fix
	 * the 120 second clamps though!
	 */
467 468
	icsk->icsk_backoff++;
	icsk->icsk_retransmits++;
L
Linus Torvalds 已提交
469 470

out_reset_timer:
A
Andreas Petlund 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
	 * might be increased if the stream oscillates between thin and thick,
	 * thus the old value might already be too high compared to the value
	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
	 * exponential backoff behaviour to avoid continue hammering
	 * linear-timeout retransmissions into a black hole
	 */
	if (sk->sk_state == TCP_ESTABLISHED &&
	    (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
	    tcp_stream_is_thin(tp) &&
	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
		icsk->icsk_backoff = 0;
		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
	} else {
		/* Use normal (exponential) backoff */
		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
	}
490
	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
491
	if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0))
L
Linus Torvalds 已提交
492 493 494 495 496
		__sk_dst_reset(sk);

out:;
}

497
void tcp_write_timer_handler(struct sock *sk)
L
Linus Torvalds 已提交
498
{
499
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
500 501
	int event;

502
	if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
L
Linus Torvalds 已提交
503 504
		goto out;

505 506
	if (time_after(icsk->icsk_timeout, jiffies)) {
		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
L
Linus Torvalds 已提交
507 508 509
		goto out;
	}

510
	event = icsk->icsk_pending;
L
Linus Torvalds 已提交
511 512

	switch (event) {
N
Nandita Dukkipati 已提交
513 514 515 516 517 518
	case ICSK_TIME_EARLY_RETRANS:
		tcp_resume_early_retransmit(sk);
		break;
	case ICSK_TIME_LOSS_PROBE:
		tcp_send_loss_probe(sk);
		break;
519
	case ICSK_TIME_RETRANS:
N
Nandita Dukkipati 已提交
520
		icsk->icsk_pending = 0;
L
Linus Torvalds 已提交
521 522
		tcp_retransmit_timer(sk);
		break;
523
	case ICSK_TIME_PROBE0:
N
Nandita Dukkipati 已提交
524
		icsk->icsk_pending = 0;
L
Linus Torvalds 已提交
525 526 527 528 529
		tcp_probe_timer(sk);
		break;
	}

out:
530
	sk_mem_reclaim(sk);
531 532 533 534 535 536 537 538 539 540 541
}

static void tcp_write_timer(unsigned long data)
{
	struct sock *sk = (struct sock *)data;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
		tcp_write_timer_handler(sk);
	} else {
		/* deleguate our work to tcp_release_cb() */
542 543
		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
			sock_hold(sk);
544
	}
L
Linus Torvalds 已提交
545 546 547 548
	bh_unlock_sock(sk);
	sock_put(sk);
}

549
void tcp_syn_ack_timeout(const struct request_sock *req)
550
{
551 552 553
	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);

	NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
554 555 556
}
EXPORT_SYMBOL(tcp_syn_ack_timeout);

L
Linus Torvalds 已提交
557 558 559 560 561 562
void tcp_set_keepalive(struct sock *sk, int val)
{
	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
		return;

	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
563
		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
L
Linus Torvalds 已提交
564
	else if (!val)
565
		inet_csk_delete_keepalive_timer(sk);
L
Linus Torvalds 已提交
566 567 568 569 570 571
}


static void tcp_keepalive_timer (unsigned long data)
{
	struct sock *sk = (struct sock *) data;
572
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
573
	struct tcp_sock *tp = tcp_sk(sk);
574
	u32 elapsed;
L
Linus Torvalds 已提交
575 576 577 578

	/* Only process if socket is not in use. */
	bh_lock_sock(sk);
	if (sock_owned_by_user(sk)) {
579
		/* Try again later. */
580
		inet_csk_reset_keepalive_timer (sk, HZ/20);
L
Linus Torvalds 已提交
581 582 583 584
		goto out;
	}

	if (sk->sk_state == TCP_LISTEN) {
585
		pr_err("Hmm... keepalive on a LISTEN ???\n");
L
Linus Torvalds 已提交
586 587 588 589 590
		goto out;
	}

	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
		if (tp->linger2 >= 0) {
591
			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
L
Linus Torvalds 已提交
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607

			if (tmo > 0) {
				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
				goto out;
			}
		}
		tcp_send_active_reset(sk, GFP_ATOMIC);
		goto death;
	}

	if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
		goto out;

	elapsed = keepalive_time_when(tp);

	/* It is alive without keepalive 8) */
608
	if (tp->packets_out || tcp_send_head(sk))
L
Linus Torvalds 已提交
609 610
		goto resched;

611
	elapsed = keepalive_time_elapsed(tp);
L
Linus Torvalds 已提交
612 613

	if (elapsed >= keepalive_time_when(tp)) {
614 615 616 617 618 619 620 621
		/* If the TCP_USER_TIMEOUT option is enabled, use that
		 * to determine when to timeout instead.
		 */
		if ((icsk->icsk_user_timeout != 0 &&
		    elapsed >= icsk->icsk_user_timeout &&
		    icsk->icsk_probes_out > 0) ||
		    (icsk->icsk_user_timeout == 0 &&
		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
L
Linus Torvalds 已提交
622 623 624 625
			tcp_send_active_reset(sk, GFP_ATOMIC);
			tcp_write_err(sk);
			goto out;
		}
626
		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
627
			icsk->icsk_probes_out++;
L
Linus Torvalds 已提交
628 629 630 631 632 633 634 635 636 637 638 639
			elapsed = keepalive_intvl_when(tp);
		} else {
			/* If keepalive was lost due to local congestion,
			 * try harder.
			 */
			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
		}
	} else {
		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
		elapsed = keepalive_time_when(tp) - elapsed;
	}

640
	sk_mem_reclaim(sk);
L
Linus Torvalds 已提交
641 642

resched:
643
	inet_csk_reset_keepalive_timer (sk, elapsed);
L
Linus Torvalds 已提交
644 645
	goto out;

646
death:
L
Linus Torvalds 已提交
647 648 649 650 651 652
	tcp_done(sk);

out:
	bh_unlock_sock(sk);
	sock_put(sk);
}
653 654 655 656 657 658

void tcp_init_xmit_timers(struct sock *sk)
{
	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
				  &tcp_keepalive_timer);
}