tcp_fastopen.c 17.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
H
Herbert Xu 已提交
2
#include <linux/crypto.h>
3
#include <linux/err.h>
Y
Yuchung Cheng 已提交
4 5
#include <linux/init.h>
#include <linux/kernel.h>
6 7 8 9 10 11
#include <linux/list.h>
#include <linux/tcp.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <net/inetpeer.h>
#include <net/tcp.h>
Y
Yuchung Cheng 已提交
12

13
void tcp_fastopen_init_key_once(struct net *net)
14
{
15 16 17 18 19 20 21 22 23 24
	u8 key[TCP_FASTOPEN_KEY_LENGTH];
	struct tcp_fastopen_context *ctxt;

	rcu_read_lock();
	ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
	if (ctxt) {
		rcu_read_unlock();
		return;
	}
	rcu_read_unlock();
25 26 27 28 29 30 31

	/* tcp_fastopen_reset_cipher publishes the new context
	 * atomically, so we allow this race happening here.
	 *
	 * All call sites of tcp_fastopen_cookie_gen also check
	 * for a valid cookie, so this is an acceptable risk.
	 */
32
	get_random_bytes(key, sizeof(key));
33
	tcp_fastopen_reset_cipher(net, NULL, key, NULL, sizeof(key));
34 35
}

36 37 38 39
static void tcp_fastopen_ctx_free(struct rcu_head *head)
{
	struct tcp_fastopen_context *ctx =
	    container_of(head, struct tcp_fastopen_context, rcu);
40 41 42 43 44 45 46
	int i;

	/* We own ctx, thus no need to hold the Fastopen-lock */
	for (i = 0; i < TCP_FASTOPEN_KEY_MAX; i++) {
		if (ctx->tfm[i])
			crypto_free_cipher(ctx->tfm[i]);
	}
47 48 49
	kfree(ctx);
}

50 51 52 53 54 55 56 57 58 59
void tcp_fastopen_destroy_cipher(struct sock *sk)
{
	struct tcp_fastopen_context *ctx;

	ctx = rcu_dereference_protected(
			inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
	if (ctx)
		call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
void tcp_fastopen_ctx_destroy(struct net *net)
{
	struct tcp_fastopen_context *ctxt;

	spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);

	ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
				lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
	rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
	spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);

	if (ctxt)
		call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
}

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
struct tcp_fastopen_context *tcp_fastopen_alloc_ctx(void *primary_key,
						    void *backup_key,
						    unsigned int len)
{
	struct tcp_fastopen_context *new_ctx;
	void *key = primary_key;
	int err, i;

	new_ctx = kmalloc(sizeof(*new_ctx), GFP_KERNEL);
	if (!new_ctx)
		return ERR_PTR(-ENOMEM);
	for (i = 0; i < TCP_FASTOPEN_KEY_MAX; i++)
		new_ctx->tfm[i] = NULL;
	for (i = 0; i < (backup_key ? 2 : 1); i++) {
		new_ctx->tfm[i] = crypto_alloc_cipher("aes", 0, 0);
		if (IS_ERR(new_ctx->tfm[i])) {
			err = PTR_ERR(new_ctx->tfm[i]);
			new_ctx->tfm[i] = NULL;
			pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
			goto out;
		}
		err = crypto_cipher_setkey(new_ctx->tfm[i], key, len);
		if (err) {
			pr_err("TCP: TFO cipher key error: %d\n", err);
			goto out;
		}
		memcpy(&new_ctx->key[i * TCP_FASTOPEN_KEY_LENGTH], key, len);
		key = backup_key;
	}
	return new_ctx;
out:
	tcp_fastopen_ctx_free(&new_ctx->rcu);
	return ERR_PTR(err);
}

110
int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
111 112
			      void *primary_key, void *backup_key,
			      unsigned int len)
113 114
{
	struct tcp_fastopen_context *ctx, *octx;
115
	struct fastopen_queue *q;
116
	int err = 0;
117

118 119 120 121
	ctx = tcp_fastopen_alloc_ctx(primary_key, backup_key, len);
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
		goto out;
122
	}
123
	spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
124 125 126
	if (sk) {
		q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
		octx = rcu_dereference_protected(q->ctx,
127
			lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
128 129 130 131 132 133
		rcu_assign_pointer(q->ctx, ctx);
	} else {
		octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
			lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
		rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
	}
134
	spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
135 136 137

	if (octx)
		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
138
out:
139 140 141
	return err;
}

142 143 144 145
static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
					     struct sk_buff *syn,
					     struct crypto_cipher *tfm,
					     struct tcp_fastopen_cookie *foc)
146
{
147 148 149
	if (req->rsk_ops->family == AF_INET) {
		const struct iphdr *iph = ip_hdr(syn);
		__be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
150

151 152 153 154
		crypto_cipher_encrypt_one(tfm, foc->val, (void *)path);
		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
		return true;
	}
155

156 157 158 159 160 161 162 163 164 165 166 167 168
#if IS_ENABLED(CONFIG_IPV6)
	if (req->rsk_ops->family == AF_INET6) {
		const struct ipv6hdr *ip6h = ipv6_hdr(syn);
		struct tcp_fastopen_cookie tmp;
		struct in6_addr *buf;
		int i;

		crypto_cipher_encrypt_one(tfm, tmp.val,
					  (void *)&ip6h->saddr);
		buf = &tmp.addr;
		for (i = 0; i < 4; i++)
			buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
		crypto_cipher_encrypt_one(tfm, foc->val, (void *)buf);
169
		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
170
		return true;
171
	}
172 173
#endif
	return false;
174 175 176 177 178 179 180 181
}

/* Generate the fastopen cookie by doing aes128 encryption on both
 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
 * addresses. For the longer IPv6 addresses use CBC-MAC.
 *
 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
 */
182
static void tcp_fastopen_cookie_gen(struct sock *sk,
183
				    struct request_sock *req,
184 185 186
				    struct sk_buff *syn,
				    struct tcp_fastopen_cookie *foc)
{
187
	struct tcp_fastopen_context *ctx;
188

189
	rcu_read_lock();
190
	ctx = tcp_fastopen_get_ctx(sk);
191
	if (ctx)
192
		__tcp_fastopen_cookie_gen_cipher(req, syn, ctx->tfm[0], foc);
193
	rcu_read_unlock();
194
}
195

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
/* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
 * queue this additional data / FIN.
 */
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
{
	struct tcp_sock *tp = tcp_sk(sk);

	if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
		return;

	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb)
		return;

	skb_dst_drop(skb);
211 212 213 214 215 216 217 218
	/* segs_in has been initialized to 1 in tcp_create_openreq_child().
	 * Hence, reset segs_in to 0 before calling tcp_segs_in()
	 * to avoid double counting.  Also, tcp_segs_in() expects
	 * skb->len to include the tcp_hdrlen.  Hence, it should
	 * be called before __skb_pull().
	 */
	tp->segs_in = 0;
	tcp_segs_in(tp, skb);
219
	__skb_pull(skb, tcp_hdrlen(skb));
220
	sk_forced_mem_schedule(sk, skb->truesize);
221 222
	skb_set_owner_r(skb, sk);

223 224 225
	TCP_SKB_CB(skb)->seq++;
	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;

226 227 228 229 230 231 232 233
	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	tp->syn_data_acked = 1;

	/* u64_stats_update_begin(&tp->syncp) not needed here,
	 * as we certainly are not changing upper 32bit value (0)
	 */
	tp->bytes_received = skb->len;
234 235 236

	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
		tcp_fin(sk);
237 238
}

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
/* returns 0 - no key match, 1 for primary, 2 for backup */
static int tcp_fastopen_cookie_gen_check(struct sock *sk,
					 struct request_sock *req,
					 struct sk_buff *syn,
					 struct tcp_fastopen_cookie *orig,
					 struct tcp_fastopen_cookie *valid_foc)
{
	struct tcp_fastopen_cookie search_foc = { .len = -1 };
	struct tcp_fastopen_cookie *foc = valid_foc;
	struct tcp_fastopen_context *ctx;
	int i, ret = 0;

	rcu_read_lock();
	ctx = tcp_fastopen_get_ctx(sk);
	if (!ctx)
		goto out;
	for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
		__tcp_fastopen_cookie_gen_cipher(req, syn, ctx->tfm[i], foc);
		if (tcp_fastopen_cookie_match(foc, orig)) {
			ret = i + 1;
			goto out;
		}
		foc = &search_foc;
	}
out:
	rcu_read_unlock();
	return ret;
}

268 269 270
static struct sock *tcp_fastopen_create_child(struct sock *sk,
					      struct sk_buff *skb,
					      struct request_sock *req)
271
{
272
	struct tcp_sock *tp;
273 274
	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
	struct sock *child;
275
	bool own_req;
276 277 278 279 280

	req->num_retrans = 0;
	req->num_timeout = 0;
	req->sk = NULL;

281 282
	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
							 NULL, &own_req);
283
	if (!child)
284
		return NULL;
285

286 287 288
	spin_lock(&queue->fastopenq.lock);
	queue->fastopenq.qlen++;
	spin_unlock(&queue->fastopenq.lock);
289 290 291 292 293 294 295 296

	/* Initialize the child socket. Have to fix some values to take
	 * into account the child is a Fast Open socket and is created
	 * only out of the bits carried in the SYN packet.
	 */
	tp = tcp_sk(child);

	tp->fastopen_rsk = req;
297
	tcp_rsk(req)->tfo_listener = true;
298 299 300 301 302

	/* RFC1323: The window in SYN & SYN/ACK segments is never
	 * scaled. So correct it appropriately.
	 */
	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
303
	tp->max_window = tp->snd_wnd;
304 305

	/* Activate the retrans timer so that SYNACK can be retransmitted.
306
	 * The request socket is not added to the ehash
307 308 309 310 311
	 * because it's been added to the accept queue directly.
	 */
	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);

312
	refcount_set(&req->rsk_refcnt, 2);
313 314

	/* Now finish processing the fastopen child socket. */
315
	tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
316

317 318 319 320 321
	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;

	tcp_fastopen_add_skb(child, skb);

	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
322
	tp->rcv_wup = tp->rcv_nxt;
323 324
	/* tcp_conn_request() is sending the SYNACK,
	 * and queues the child into listener accept queue.
325 326
	 */
	return child;
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
}

static bool tcp_fastopen_queue_check(struct sock *sk)
{
	struct fastopen_queue *fastopenq;

	/* Make sure the listener has enabled fastopen, and we don't
	 * exceed the max # of pending TFO requests allowed before trying
	 * to validating the cookie in order to avoid burning CPU cycles
	 * unnecessarily.
	 *
	 * XXX (TFO) - The implication of checking the max_qlen before
	 * processing a cookie request is that clients can't differentiate
	 * between qlen overflow causing Fast Open to be disabled
	 * temporarily vs a server not supporting Fast Open at all.
	 */
343 344
	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
	if (fastopenq->max_qlen == 0)
345 346 347 348 349 350
		return false;

	if (fastopenq->qlen >= fastopenq->max_qlen) {
		struct request_sock *req1;
		spin_lock(&fastopenq->lock);
		req1 = fastopenq->rskq_rst_head;
351
		if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
352 353
			__NET_INC_STATS(sock_net(sk),
					LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
354
			spin_unlock(&fastopenq->lock);
355 356 357 358 359
			return false;
		}
		fastopenq->rskq_rst_head = req1->dl_next;
		fastopenq->qlen--;
		spin_unlock(&fastopenq->lock);
360
		reqsk_put(req1);
361 362 363 364
	}
	return true;
}

365 366 367 368 369 370 371 372 373
static bool tcp_fastopen_no_cookie(const struct sock *sk,
				   const struct dst_entry *dst,
				   int flag)
{
	return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
	       tcp_sk(sk)->fastopen_no_cookie ||
	       (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
}

374 375 376 377
/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
 * cookie request (foc->len == 0).
 */
378 379
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
			      struct request_sock *req,
380 381
			      struct tcp_fastopen_cookie *foc,
			      const struct dst_entry *dst)
382
{
383
	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
384 385
	int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
386
	struct sock *child;
387
	int ret = 0;
388

389
	if (foc->len == 0) /* Client requests a cookie */
390
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
391

392
	if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
393 394 395
	      (syn_data || foc->len >= 0) &&
	      tcp_fastopen_queue_check(sk))) {
		foc->len = -1;
396
		return NULL;
397 398
	}

399 400
	if (syn_data &&
	    tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
401 402
		goto fastopen;

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
	if (foc->len == 0) {
		/* Client requests a cookie. */
		tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
	} else if (foc->len > 0) {
		ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
						    &valid_foc);
		if (!ret) {
			NET_INC_STATS(sock_net(sk),
				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
		} else {
			/* Cookie is valid. Create a (full) child socket to
			 * accept the data in SYN before returning a SYN-ACK to
			 * ack the data. If we fail to create the socket, fall
			 * back and ack the ISN only but includes the same
			 * cookie.
			 *
			 * Note: Data-less SYN with valid cookie is allowed to
			 * send data in SYN_RECV state.
			 */
422
fastopen:
423 424 425 426 427 428 429 430 431 432 433 434 435 436
			child = tcp_fastopen_create_child(sk, skb, req);
			if (child) {
				if (ret == 2) {
					valid_foc.exp = foc->exp;
					*foc = valid_foc;
					NET_INC_STATS(sock_net(sk),
						      LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
				} else {
					foc->len = -1;
				}
				NET_INC_STATS(sock_net(sk),
					      LINUX_MIB_TCPFASTOPENPASSIVE);
				return child;
			}
437
			NET_INC_STATS(sock_net(sk),
438
				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
439
		}
440
	}
441
	valid_foc.exp = foc->exp;
442
	*foc = valid_foc;
443
	return NULL;
444
}
445 446 447 448

bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
			       struct tcp_fastopen_cookie *cookie)
{
449
	const struct dst_entry *dst;
450

451
	tcp_fastopen_cache_get(sk, mss, cookie);
452 453 454 455 456 457 458

	/* Firewall blackhole issue check */
	if (tcp_fastopen_active_should_disable(sk)) {
		cookie->len = -1;
		return false;
	}

459 460 461
	dst = __sk_dst_get(sk);

	if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
462 463 464 465 466
		cookie->len = -1;
		return true;
	}
	return cookie->len > 0;
}
W
Wei Wang 已提交
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499

/* This function checks if we want to defer sending SYN until the first
 * write().  We defer under the following conditions:
 * 1. fastopen_connect sockopt is set
 * 2. we have a valid cookie
 * Return value: return true if we want to defer until application writes data
 *               return false if we want to send out SYN immediately
 */
bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
{
	struct tcp_fastopen_cookie cookie = { .len = 0 };
	struct tcp_sock *tp = tcp_sk(sk);
	u16 mss;

	if (tp->fastopen_connect && !tp->fastopen_req) {
		if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
			inet_sk(sk)->defer_connect = 1;
			return true;
		}

		/* Alloc fastopen_req in order for FO option to be included
		 * in SYN
		 */
		tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
					   sk->sk_allocation);
		if (tp->fastopen_req)
			tp->fastopen_req->cookie = cookie;
		else
			*err = -ENOBUFS;
	}
	return false;
}
EXPORT_SYMBOL(tcp_fastopen_defer_connect);
500 501 502 503 504 505 506 507 508

/*
 * The following code block is to deal with middle box issues with TFO:
 * Middlebox firewall issues can potentially cause server's data being
 * blackholed after a successful 3WHS using TFO.
 * The proposed solution is to disable active TFO globally under the
 * following circumstances:
 *   1. client side TFO socket receives out of order FIN
 *   2. client side TFO socket receives out of order RST
509 510
 *   3. client side TFO socket has timed out three times consecutively during
 *      or after handshake
511 512 513 514 515 516 517 518 519
 * We disable active side TFO globally for 1hr at first. Then if it
 * happens again, we disable it for 2h, then 4h, 8h, ...
 * And we reset the timeout back to 1hr when we see a successful active
 * TFO connection with data exchanges.
 */

/* Disable active TFO and record current jiffies and
 * tfo_active_disable_times
 */
520
void tcp_fastopen_active_disable(struct sock *sk)
521
{
522
	struct net *net = sock_net(sk);
523

524 525 526
	atomic_inc(&net->ipv4.tfo_active_disable_times);
	net->ipv4.tfo_active_disable_stamp = jiffies;
	NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
527 528 529 530 531 532 533 534
}

/* Calculate timeout for tfo active disable
 * Return true if we are still in the active TFO disable period
 * Return false if timeout already expired and we should use active TFO
 */
bool tcp_fastopen_active_should_disable(struct sock *sk)
{
535 536
	unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
	int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
537
	unsigned long timeout;
538
	int multiplier;
539 540 541 542 543 544

	if (!tfo_da_times)
		return false;

	/* Limit timout to max: 2^6 * initial timeout */
	multiplier = 1 << min(tfo_da_times - 1, 6);
545 546
	timeout = multiplier * tfo_bh_timeout * HZ;
	if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
		return true;

	/* Mark check bit so we can check for successful active TFO
	 * condition and reset tfo_active_disable_times
	 */
	tcp_sk(sk)->syn_fastopen_ch = 1;
	return false;
}

/* Disable active TFO if FIN is the only packet in the ofo queue
 * and no data is received.
 * Also check if we can reset tfo_active_disable_times if data is
 * received successfully on a marked active TFO sockets opened on
 * a non-loopback interface
 */
void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct dst_entry *dst;
566
	struct sk_buff *skb;
567 568 569 570 571

	if (!tp->syn_fastopen)
		return;

	if (!tp->data_segs_in) {
572 573
		skb = skb_rb_first(&tp->out_of_order_queue);
		if (skb && !skb_rb_next(skb)) {
574
			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
575
				tcp_fastopen_active_disable(sk);
576 577 578 579
				return;
			}
		}
	} else if (tp->syn_fastopen_ch &&
580
		   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
581 582
		dst = sk_dst_get(sk);
		if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
583
			atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
584 585 586
		dst_release(dst);
	}
}
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
{
	u32 timeouts = inet_csk(sk)->icsk_retransmits;
	struct tcp_sock *tp = tcp_sk(sk);

	/* Broken middle-boxes may black-hole Fast Open connection during or
	 * even after the handshake. Be extremely conservative and pause
	 * Fast Open globally after hitting the third consecutive timeout or
	 * exceeding the configured timeout limit.
	 */
	if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
	    (timeouts == 2 || (timeouts < 2 && expired))) {
		tcp_fastopen_active_disable(sk);
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
	}
}